repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
bootphon/abxpy
ABXpy/misc/tinytree.py
Tree.addChildrenFromList
python
def addChildrenFromList(self, children): skip = True v = list(zip( itertools.chain([None], children), itertools.chain(children, [None]) )) for i in v: if skip: skip = False continue self.addChild(i[0]) if _isSequenceLike(i[1]): i[0].addChildrenFromList(i[1]) skip = True
Add children to this node. :children A nested list specifying a tree of children
https://github.com/bootphon/abxpy/blob/8d9f4f824169f49ab363cbb2d9d2950df77f097d/ABXpy/misc/tinytree.py#L64-L82
import sys import itertools import unicodedata def _isStringLike(anobj): try: anobj[:0] + '' except: return 0 else: return 1 def _isSequenceLike(anobj): if not hasattr(anobj, "next"): if _isStringLike(anobj): return 0 try: anobj[:0] except: return 0 return 1 class Tree(object): def __init__(self, children=None): self.children = [] if children: self.addChildrenFromList(children) self.parent = None
MIT License
onshape-public/onshape-clients
python/onshape_client/oas/models/bt_parameter_visibility_on_equal180.py
BTParameterVisibilityOnEqual180.openapi_types
python
def openapi_types(): return { "bt_type": (str,), "parameter_id": (str,), "value": (str,), }
This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type.
https://github.com/onshape-public/onshape-clients/blob/20843a00c628e516e7219e17a23ec4ef2bf9f16f/python/onshape_client/oas/models/bt_parameter_visibility_on_equal180.py#L85-L98
from __future__ import absolute_import import re import sys import six import nulltype from onshape_client.oas.model_utils import ( ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) try: from onshape_client.oas.models import bt_parameter_visibility_condition177 except ImportError: bt_parameter_visibility_condition177 = sys.modules[ "onshape_client.oas.models.bt_parameter_visibility_condition177" ] try: from onshape_client.oas.models import bt_parameter_visibility_on_equal180_all_of except ImportError: bt_parameter_visibility_on_equal180_all_of = sys.modules[ "onshape_client.oas.models.bt_parameter_visibility_on_equal180_all_of" ] try: from onshape_client.oas.models import bt_parameter_visibility_on_mate_dof_type2114 except ImportError: bt_parameter_visibility_on_mate_dof_type2114 = sys.modules[ "onshape_client.oas.models.bt_parameter_visibility_on_mate_dof_type2114" ] class BTParameterVisibilityOnEqual180(ModelComposed): allowed_values = {} validations = {} additional_properties_type = None @staticmethod
MIT License
okpy/ok-client
client/protocols/analytics.py
AnalyticsProtocol.run
python
def run(self, messages): statistics = {} statistics['time'] = str(datetime.now()) statistics['time-utc'] = str(datetime.utcnow()) statistics['unlock'] = self.args.unlock if self.args.question: statistics['question'] = [t.name for t in self.assignment.specified_tests] statistics['requested-questions'] = self.args.question if self.args.suite: statistics['requested-suite'] = self.args.suite if self.args.case: statistics['requested-case'] = self.args.case messages['analytics'] = statistics self.log_run(messages)
Returns some analytics about this autograder run.
https://github.com/okpy/ok-client/blob/3c5eca17100eed808023a815654cfe1c95179080/client/protocols/analytics.py#L35-L52
import logging import os import pickle import re from client.protocols.common import models from datetime import datetime log = logging.getLogger(__name__) class AnalyticsProtocol(models.Protocol): ANALYTICS_FILE = ".ok_history" RE_DEFAULT_CODE = re.compile(r""" ^\"\*\*\*\sREPLACE\sTHIS\sLINE\s\*\*\*\"$ """, re.X | re.I) RE_SCHEME_DEFAULT_CODE = re.compile(r""" ^\'REPLACE-THIS-LINE$ """, re.X | re.I) RE_REPLACE_MARK = re.compile(r""" [\#\;][ ]Replace[ ] """, re.X | re.I | re.M)
Apache License 2.0
zwicker-group/py-pde
pde/grids/spherical.py
SphericalSymGridBase.from_state
python
def from_state(cls, state: Dict[str, Any]) -> "SphericalSymGridBase": state_copy = state.copy() obj = cls(radius=state_copy.pop("radius"), shape=state_copy.pop("shape")) if state_copy: raise ValueError(f"State items {state_copy.keys()} were not used") return obj
create a field from a stored `state`. Args: state (dict): The state from which the grid is reconstructed.
https://github.com/zwicker-group/py-pde/blob/0549f7c74a52705e1d29e62d27b5578251c2054c/pde/grids/spherical.py#L101-L112
import warnings from abc import ABCMeta from typing import TYPE_CHECKING, Any, Dict, Generator, Tuple, Union import numpy as np from ..tools.cache import cached_property from ..tools.docstrings import fill_in_docstring from ..tools.plotting import plot_on_axes from ..tools.spherical import volume_from_radius from .base import DimensionError, GridBase, _check_shape, discretize_interval from .cartesian import CartesianGrid if TYPE_CHECKING: from .boundaries.axes import Boundaries PI_4 = 4 * np.pi PI_43 = 4 / 3 * np.pi class SphericalSymGridBase(GridBase, metaclass=ABCMeta): periodic = [False] num_axes = 1 def __init__( self, radius: Union[float, Tuple[float, float]], shape: Union[Tuple[int], int] ): super().__init__() shape_list = _check_shape(shape) if not len(shape_list) == 1: raise ValueError(f"`shape` must be a single number, not {shape_list}") self._shape: Tuple[int] = (int(shape_list[0]),) try: r_inner, r_outer = radius except TypeError: r_inner, r_outer = 0, float(radius) if r_inner < 0: raise ValueError("Inner radius must be positive") if r_inner >= r_outer: raise ValueError("Outer radius must be larger than inner radius") rs, dr = discretize_interval(r_inner, r_outer, self.shape[0]) self._axes_coords = (rs,) self._axes_bounds = ((r_inner, r_outer),) self._discretization = np.array((dr,)) @property def state(self) -> Dict[str, Any]: return {"radius": self.radius, "shape": self.shape} @property def has_hole(self) -> bool: return self.axes_bounds[0][0] > 0 @classmethod
MIT License
fisco-bcos/python-sdk
utils/encoding.py
text_if_str
python
def text_if_str(to_type, text_or_primitive): if isinstance(text_or_primitive, str): (primitive, text) = (None, text_or_primitive) else: (primitive, text) = (text_or_primitive, None) return to_type(primitive, text=text)
Convert to a type, assuming that strings can be only unicode text (not a hexstr) @param to_type is a function that takes the arguments (primitive, hexstr=hexstr, text=text), eg~ to_bytes, to_text, to_hex, to_int, etc @param hexstr_or_primitive in bytes, str, or int.
https://github.com/fisco-bcos/python-sdk/blob/1932e1d80439155a9ec1b4b6d940fec12d8ec98e/utils/encoding.py#L188-L200
import json import re from eth_abi.encoding import ( BaseArrayEncoder, ) from eth_utils import ( add_0x_prefix, big_endian_to_int, decode_hex, encode_hex, int_to_big_endian, is_boolean, is_bytes, is_hex, is_integer, is_list_like, remove_0x_prefix, to_hex, ) from eth_utils.toolz import ( curry, ) from hexbytes import ( HexBytes, ) from utils.abi import ( is_address_type, is_array_type, is_bool_type, is_bytes_type, is_int_type, is_string_type, is_uint_type, size_of_type, sub_type_of_array_type, ) from utils.validation import ( assert_one_val, validate_abi_type, validate_abi_value, ) from utils.datastructures import ( AttributeDict, ) def hex_encode_abi_type(abi_type, value, force_size=None): validate_abi_type(abi_type) validate_abi_value(abi_type, value) data_size = force_size or size_of_type(abi_type) if is_array_type(abi_type): sub_type = sub_type_of_array_type(abi_type) return "".join([remove_0x_prefix(hex_encode_abi_type(sub_type, v, 256)) for v in value]) elif is_bool_type(abi_type): return to_hex_with_size(value, data_size) elif is_uint_type(abi_type): return to_hex_with_size(value, data_size) elif is_int_type(abi_type): return to_hex_twos_compliment(value, data_size) elif is_address_type(abi_type): return pad_hex(value, data_size) elif is_bytes_type(abi_type): if is_bytes(value): return encode_hex(value) else: return value elif is_string_type(abi_type): return to_hex(text=value) else: raise ValueError( "Unsupported ABI type: {0}".format(abi_type) ) def to_hex_twos_compliment(value, bit_size): if value >= 0: return to_hex_with_size(value, bit_size) value = (1 << bit_size) + value hex_value = hex(value) hex_value = hex_value.rstrip("L") return hex_value def to_hex_with_size(value, bit_size): return pad_hex(to_hex(value), bit_size) def pad_hex(value, bit_size): value = remove_0x_prefix(value) return add_0x_prefix(value.zfill(int(bit_size / 4))) def trim_hex(hexstr): if hexstr.startswith('0x0'): hexstr = re.sub('^0x0+', '0x', hexstr) if hexstr == '0x': hexstr = '0x0' return hexstr def to_int(value=None, hexstr=None, text=None): assert_one_val(value, hexstr=hexstr, text=text) if hexstr is not None: return int(hexstr, 16) elif text is not None: return int(text) elif isinstance(value, bytes): return big_endian_to_int(value) elif isinstance(value, str): raise TypeError("Pass in strings with keyword hexstr or text") else: return int(value) @curry def pad_bytes(fill_with, num_bytes, unpadded): return unpadded.rjust(num_bytes, fill_with) zpad_bytes = pad_bytes(b'\0') def to_bytes(primitive=None, hexstr=None, text=None): assert_one_val(primitive, hexstr=hexstr, text=text) if is_boolean(primitive): return b'\x01' if primitive else b'\x00' elif isinstance(primitive, bytes): return primitive elif is_integer(primitive): return to_bytes(hexstr=to_hex(primitive)) elif hexstr is not None: if len(hexstr) % 2: hexstr = '0x0' + remove_0x_prefix(hexstr) return decode_hex(hexstr) elif text is not None: return text.encode('utf-8') raise TypeError("expected an int in first arg, or keyword of hexstr or text") def to_text(primitive=None, hexstr=None, text=None): assert_one_val(primitive, hexstr=hexstr, text=text) if hexstr is not None: return to_bytes(hexstr=hexstr).decode('utf-8') elif text is not None: return text elif isinstance(primitive, str): return to_text(hexstr=primitive) elif isinstance(primitive, bytes): return primitive.decode('utf-8') elif is_integer(primitive): byte_encoding = int_to_big_endian(primitive) return to_text(byte_encoding) raise TypeError("Expected an int, bytes or hexstr.") @curry
MIT License
improbable-ai/airobot
src/airobot/utils/transform_util.py
interpolate_pose
python
def interpolate_pose(pose_initial, pose_final, N): frame_id = pose_initial.header.frame_id pose_initial_list = pose_stamped2list(pose_initial) pose_final_list = pose_stamped2list(pose_final) trans_initial = pose_initial_list[:3] quat_initial = pose_initial_list[3:] trans_final = pose_final_list[:3] quat_final = pose_final_list[3:] trans_interp_total = [np.linspace(trans_initial[0], trans_final[0], num=N), np.linspace(trans_initial[1], trans_final[1], num=N), np.linspace(trans_initial[2], trans_final[2], num=N)] key_rots = R.from_quat([quat_initial, quat_final]) slerp = Slerp(np.arange(2), key_rots) interp_rots = slerp(np.linspace(0, 1, N)) quat_interp_total = interp_rots.as_quat() pose_interp = [] for counter in range(N): pose_tmp = [ trans_interp_total[0][counter], trans_interp_total[1][counter], trans_interp_total[2][counter], quat_interp_total[counter][0], quat_interp_total[counter][1], quat_interp_total[counter][2], quat_interp_total[counter][3], ] pose_interp.append(list2pose_stamped(pose_tmp, frame_id=frame_id)) return pose_interp
Function to interpolate between two poses using a combination of linear position interpolation and quaternion spherical-linear interpolation (SLERP) Args: pose_initial (PoseStamped): Initial pose pose_final (PoseStamped): Final pose N (int): Number of intermediate points. Returns: list: List of poses that interpolates between initial and final pose. Each element is PoseStamped.
https://github.com/improbable-ai/airobot/blob/4cef868ed7ab1c8ecd3f63b1290b5f1202bebda3/src/airobot/utils/transform_util.py#L277-L322
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from scipy.spatial.transform import Rotation as R from scipy.spatial.transform import Slerp from airobot.utils import common class Position: def __init__(self): self.x = 0. self.y = 0. self.z = 0. class Orientation: def __init__(self): self.x = 0. self.y = 0. self.z = 0. self.w = 0. class Pose: def __init__(self, position, orientation): self.position = position self.orientation = orientation class Header: def __init__(self): self.frame_id = "world" class PoseStamped(): def __init__(self): position = Position() orientation = Orientation() pose = Pose(position, orientation) header = Header() self.pose = pose self.header = header def pose_stamped2list(msg): return [float(msg.pose.position.x), float(msg.pose.position.y), float(msg.pose.position.z), float(msg.pose.orientation.x), float(msg.pose.orientation.y), float(msg.pose.orientation.z), float(msg.pose.orientation.w), ] def list2pose_stamped(pose, frame_id="world"): msg = PoseStamped() msg.header.frame_id = frame_id msg.pose.position.x = pose[0] msg.pose.position.y = pose[1] msg.pose.position.z = pose[2] msg.pose.orientation.x = pose[3] msg.pose.orientation.y = pose[4] msg.pose.orientation.z = pose[5] msg.pose.orientation.w = pose[6] return msg def unit_pose(): return list2pose_stamped([0, 0, 0, 0, 0, 0, 1]) def matrix_from_pose(pose): pose_list = pose_stamped2list(pose) trans, quat = pose_list[:3], pose_list[3:] T = np.eye(4) T[:-1, :-1] = common.quat2rot(quat) T[0:3, 3] = trans return T def pose_from_matrix(matrix, frame_id="world"): quat = common.rot2quat(matrix[:-1, :-1]) trans = matrix[:-1, -1] pose = list(trans) + list(quat) pose = list2pose_stamped(pose, frame_id=frame_id) return pose def get_transform(pose_frame_target, pose_frame_source): T_target_world = matrix_from_pose(pose_frame_target) T_source_world = matrix_from_pose(pose_frame_source) T_relative_world = np.matmul(T_target_world, np.linalg.inv(T_source_world)) pose_relative_world = pose_from_matrix( T_relative_world, frame_id=pose_frame_source.header.frame_id) return pose_relative_world def convert_reference_frame(pose_source, pose_frame_target, pose_frame_source, frame_id="world"): T_pose_source = matrix_from_pose(pose_source) pose_transform_target2source = get_transform( pose_frame_source, pose_frame_target) T_pose_transform_target2source = matrix_from_pose( pose_transform_target2source) T_pose_target = np.matmul(T_pose_transform_target2source, T_pose_source) pose_target = pose_from_matrix(T_pose_target, frame_id=frame_id) return pose_target def transform_pose(pose_source, pose_transform): T_pose_source = matrix_from_pose(pose_source) T_transform_source = matrix_from_pose(pose_transform) T_pose_final_source = np.matmul(T_transform_source, T_pose_source) pose_final_source = pose_from_matrix( T_pose_final_source, frame_id=pose_source.header.frame_id) return pose_final_source def transform_body(pose_source_world, pose_transform_target_body): pose_source_body = convert_reference_frame(pose_source_world, pose_source_world, unit_pose(), frame_id="body_frame") pose_source_rotated_body = transform_pose(pose_source_body, pose_transform_target_body) pose_source_rotated_world = convert_reference_frame(pose_source_rotated_body, unit_pose(), pose_source_world, frame_id="yumi_body") return pose_source_rotated_world
MIT License
cjdrake/pyeda
pyeda/boolalg/expr.py
Expression.depth
python
def depth(self): return self.node.depth()
Return the depth of the expression. Expression depth is defined recursively: 1. An atom node (constant or literal) has zero depth. 2. A branch node (operator) has depth equal to the maximum depth of its children (arguments) plus one.
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/expr.py#L791-L800
import itertools import os import random import pyeda.parsing.boolexpr from pyeda.boolalg import boolfunc from pyeda.util import bit_on, cached_property, clog2 if os.getenv('READTHEDOCS') == 'True': from unittest.mock import MagicMock exprnode = MagicMock() else: from pyeda.boolalg import exprnode from pyeda.boolalg import picosat _LITS = dict() _ASSUMPTIONS = set() def _assume2point(): point = dict() for lit in _ASSUMPTIONS: if isinstance(lit, Complement): point[~lit] = 0 elif isinstance(lit, Variable): point[lit] = 1 return point def exprvar(name, index=None): bvar = boolfunc.var(name, index) try: var = _LITS[bvar.uniqid] except KeyError: var = _LITS[bvar.uniqid] = Variable(bvar) return var def _exprcomp(node): try: comp = _LITS[node.data()] except KeyError: comp = _LITS[node.data()] = Complement(node) return comp _KIND2EXPR = { exprnode.ZERO : lambda node: Zero, exprnode.ONE : lambda node: One, exprnode.COMP : lambda node: _exprcomp(node), exprnode.VAR : lambda node: _LITS[node.data()], exprnode.OP_OR : lambda node: OrOp(node), exprnode.OP_AND : lambda node: AndOp(node), exprnode.OP_XOR : lambda node: XorOp(node), exprnode.OP_EQ : lambda node: EqualOp(node), exprnode.OP_NOT : lambda node: NotOp(node), exprnode.OP_IMPL : lambda node: ImpliesOp(node), exprnode.OP_ITE : lambda node: IfThenElseOp(node), } def _expr(node): return _KIND2EXPR[node.kind()](node) def expr(obj, simplify=True): if isinstance(obj, Expression): return obj elif isinstance(obj, int) and obj in {0, 1}: return _CONSTS[obj] elif isinstance(obj, str): ast = pyeda.parsing.boolexpr.parse(obj) ex = ast2expr(ast) if simplify: ex = ex.simplify() return ex else: return One if bool(obj) else Zero def ast2expr(ast): if ast[0] == 'const': return _CONSTS[ast[1]] elif ast[0] == 'var': return exprvar(ast[1], ast[2]) else: xs = [ast2expr(x) for x in ast[1:]] return ASTOPS[ast[0]](*xs, simplify=False) def expr2dimacscnf(ex): litmap, nvars, clauses = ex.encode_cnf() return litmap, DimacsCNF(nvars, clauses) def expr2dimacssat(ex): if not ex.simple: raise ValueError("expected ex to be simplified") litmap, nvars = ex.encode_inputs() formula = _expr2sat(ex, litmap) if 'xor' in formula: if '=' in formula: fmt = 'satex' else: fmt = 'satx' elif '=' in formula: fmt = 'sate' else: fmt = 'sat' return "p {} {}\n{}".format(fmt, nvars, formula) def _expr2sat(ex, litmap): if isinstance(ex, Literal): return str(litmap[ex]) elif isinstance(ex, NotOp): return "-(" + _expr2sat(ex.x, litmap) + ")" elif isinstance(ex, OrOp): return "+(" + " ".join(_expr2sat(x, litmap) for x in ex.xs) + ")" elif isinstance(ex, AndOp): return "*(" + " ".join(_expr2sat(x, litmap) for x in ex.xs) + ")" elif isinstance(ex, XorOp): return ("xor(" + " ".join(_expr2sat(x, litmap) for x in ex.xs) + ")") elif isinstance(ex, EqualOp): return "=(" + " ".join(_expr2sat(x, litmap) for x in ex.xs) + ")" else: fstr = ("expected ex to be a Literal or Not/Or/And/Xor/Equal op, " "got {0.__name__}") raise ValueError(fstr.format(type(ex))) def upoint2exprpoint(upoint): point = dict() for uniqid in upoint[0]: point[_LITS[uniqid]] = 0 for uniqid in upoint[1]: point[_LITS[uniqid]] = 1 return point def Not(x, simplify=True): x = Expression.box(x).node y = exprnode.not_(x) if simplify: y = y.simplify() return _expr(y) def Or(*xs, simplify=True): xs = [Expression.box(x).node for x in xs] y = exprnode.or_(*xs) if simplify: y = y.simplify() return _expr(y) def And(*xs, simplify=True): xs = [Expression.box(x).node for x in xs] y = exprnode.and_(*xs) if simplify: y = y.simplify() return _expr(y) def Xor(*xs, simplify=True): xs = [Expression.box(x).node for x in xs] y = exprnode.xor(*xs) if simplify: y = y.simplify() return _expr(y) def Equal(*xs, simplify=True): xs = [Expression.box(x).node for x in xs] y = exprnode.eq(*xs) if simplify: y = y.simplify() return _expr(y) def Implies(p, q, simplify=True): p = Expression.box(p).node q = Expression.box(q).node y = exprnode.impl(p, q) if simplify: y = y.simplify() return _expr(y) def ITE(s, d1, d0, simplify=True): s = Expression.box(s).node d1 = Expression.box(d1).node d0 = Expression.box(d0).node y = exprnode.ite(s, d1, d0) if simplify: y = y.simplify() return _expr(y) def Nor(*xs, simplify=True): xs = [Expression.box(x).node for x in xs] y = exprnode.not_(exprnode.or_(*xs)) if simplify: y = y.simplify() return _expr(y) def Nand(*xs, simplify=True): xs = [Expression.box(x).node for x in xs] y = exprnode.not_(exprnode.and_(*xs)) if simplify: y = y.simplify() return _expr(y) def Xnor(*xs, simplify=True): xs = [Expression.box(x).node for x in xs] y = exprnode.not_(exprnode.xor(*xs)) if simplify: y = y.simplify() return _expr(y) def Unequal(*xs, simplify=True): xs = [Expression.box(x).node for x in xs] y = exprnode.not_(exprnode.eq(*xs)) if simplify: y = y.simplify() return _expr(y) def OneHot0(*xs, simplify=True, conj=True): xs = [Expression.box(x).node for x in xs] terms = list() if conj: for x0, x1 in itertools.combinations(xs, 2): terms.append(exprnode.or_(exprnode.not_(x0), exprnode.not_(x1))) y = exprnode.and_(*terms) else: for _xs in itertools.combinations(xs, len(xs) - 1): terms.append(exprnode.and_(*[exprnode.not_(x) for x in _xs])) y = exprnode.or_(*terms) if simplify: y = y.simplify() return _expr(y) def OneHot(*xs, simplify=True, conj=True): xs = [Expression.box(x).node for x in xs] terms = list() if conj: for x0, x1 in itertools.combinations(xs, 2): terms.append(exprnode.or_(exprnode.not_(x0), exprnode.not_(x1))) terms.append(exprnode.or_(*xs)) y = exprnode.and_(*terms) else: for i, xi in enumerate(xs): zeros = [exprnode.not_(x) for x in xs[:i] + xs[i+1:]] terms.append(exprnode.and_(xi, *zeros)) y = exprnode.or_(*terms) if simplify: y = y.simplify() return _expr(y) def NHot(n, *xs, simplify=True): if not isinstance(n, int): raise TypeError("expected n to be an int") if not 0 <= n <= len(xs): fstr = "expected 0 <= n <= {}, got {}" raise ValueError(fstr.format(len(xs), n)) xs = [Expression.box(x).node for x in xs] num = len(xs) terms = list() for hot_idxs in itertools.combinations(range(num), n): hot_idxs = set(hot_idxs) _xs = [xs[i] if i in hot_idxs else exprnode.not_(xs[i]) for i in range(num)] terms.append(exprnode.and_(*_xs)) y = exprnode.or_(*terms) if simplify: y = y.simplify() return _expr(y) def Majority(*xs, simplify=True, conj=False): xs = [Expression.box(x).node for x in xs] if conj: terms = list() for _xs in itertools.combinations(xs, (len(xs) + 1) // 2): terms.append(exprnode.or_(*_xs)) y = exprnode.and_(*terms) else: terms = list() for _xs in itertools.combinations(xs, len(xs) // 2 + 1): terms.append(exprnode.and_(*_xs)) y = exprnode.or_(*terms) if simplify: y = y.simplify() return _expr(y) def AchillesHeel(*xs, simplify=True): nargs = len(xs) if nargs & 1: fstr = "expected an even number of arguments, got {}" raise ValueError(fstr.format(nargs)) xs = [Expression.box(x).node for x in xs] y = exprnode.and_(*[exprnode.or_(xs[2*i], xs[2*i+1]) for i in range(nargs // 2)]) if simplify: y = y.simplify() return _expr(y) def Mux(fs, sel, simplify=True): if isinstance(sel, Expression): sel = [sel] if len(sel) < clog2(len(fs)): fstr = "expected at least {} select bits, got {}" raise ValueError(fstr.format(clog2(len(fs)), len(sel))) it = boolfunc.iter_terms(sel) y = exprnode.or_(*[exprnode.and_(f.node, *[lit.node for lit in next(it)]) for f in fs]) if simplify: y = y.simplify() return _expr(y) def ForAll(vs, ex): return And(*ex.cofactors(vs)) def Exists(vs, ex): return Or(*ex.cofactors(vs)) class _Clause: def _lits(self): raise NotImplementedError() def _encode_clause(self, litmap): raise NotImplementedError() class _DNF: def _encode_dnf(self): raise NotImplementedError() @cached_property def _cover(self): raise NotImplementedError() class _CNF: def _encode_cnf(self): raise NotImplementedError() class Expression(boolfunc.Function): ASTOP = NotImplemented def __init__(self, node): self.node = node def __repr__(self): return self.__str__() def __enter__(self): raise ValueError("expected assumption to be a literal") def __exit__(self, exc_type, exc_val, exc_tb): raise ValueError("expected assumption to be a literal") def __invert__(self): return _expr(exprnode.not_(self.node)) def __or__(self, other): other_node = self.box(other).node return _expr(exprnode.or_(self.node, other_node)) def __and__(self, other): other_node = self.box(other).node return _expr(exprnode.and_(self.node, other_node)) def __xor__(self, other): other_node = self.box(other).node return _expr(exprnode.xor(self.node, other_node)) def eq(self, other): other_node = self.box(other).node return _expr(exprnode.eq(self.node, other_node)) def __rshift__(self, other): other_node = self.box(other).node return _expr(exprnode.impl(self.node, other_node)) def __rrshift__(self, other): other_node = self.box(other).node return _expr(exprnode.impl(other_node, self.node)) @cached_property def support(self): s = set() for ex in self.iter_dfs(): if isinstance(ex, Complement): s.add(~ex) elif isinstance(ex, Variable): s.add(ex) return frozenset(s) @cached_property def inputs(self): return tuple(sorted(self.support, key=lambda ex: ex.node.data())) def restrict(self, point): d = dict() for key, val in point.items(): if not isinstance(key, Variable): raise TypeError("expected point keys to be variables") val = _expr(self.box(val).node) if not isinstance(val, Constant): raise TypeError("expected point values to be constants") d[key.node] = val.node return _expr(self.node.restrict(d)) def compose(self, mapping): d = dict() for key, val in mapping.items(): if not isinstance(key, Variable): raise TypeError("expected mapping keys to be variables") d[key.node] = self.box(val).node return _expr(self.node.compose(d)) def satisfy_one(self): if self.is_cnf(): litmap, cnf = expr2dimacscnf(self) assumptions = [litmap[lit] for lit in _ASSUMPTIONS] soln = cnf.satisfy_one(assumptions) if soln is None: return None else: return cnf.soln2point(soln, litmap) else: if _ASSUMPTIONS: aupnt = _assume2point() soln = _backtrack(self.restrict(aupnt)) soln.update(aupnt) return soln else: return _backtrack(self) def satisfy_all(self): if self.is_cnf(): litmap, cnf = expr2dimacscnf(self) for soln in cnf.satisfy_all(): yield cnf.soln2point(soln, litmap) else: yield from _iter_backtrack(self) def is_zero(self): return False def is_one(self): return False @staticmethod def box(obj): if isinstance(obj, Expression): return obj elif isinstance(obj, int) and obj in {0, 1}: return _CONSTS[obj] elif isinstance(obj, str): ast = pyeda.parsing.boolexpr.parse(obj) return ast2expr(ast) else: return One if bool(obj) else Zero def to_ast(self): return self.node.to_ast() def iter_dfs(self): for node in self.node: yield _expr(node) @cached_property
BSD 2-Clause Simplified License
symjax/symjax
symjax/rl/utils.py
discount_cumsum
python
def discount_cumsum(x, discount): return lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
computing discounted cumulative sums of vectors. input: vector x, [x0, x1, x2] output: [x0 + discount * x1 + discount^2 * x2, x1 + discount * x2, x2]
https://github.com/symjax/symjax/blob/d8778c2eb3254b478cef4f45d934bf921e695619/symjax/rl/utils.py#L77-L91
from collections import deque import random from scipy.signal import lfilter import numpy as np from collections import deque class NStepRewarder(object): def __init__(self, factor, gamma=None, n=8, normalize=False, eps=1e-6): self.factor = factor self.n = n self.gamma = gamma self.normalize = normalize self.eps = eps self.ptr, self.path_start_idx = 0, 0 def total(self, ep_batch, tot_reward): for step in ep_batch: step[2] = tot_reward * self.factor return ep_batch def discount(self, ep_batch): if self.n == np.inf: rewards = [] for reward, is_terminal in zip( reversed(ep_batch[:, 2]), reversed(ep_batch[:, 4]) ): if is_terminal: discounted_reward = 0 discounted_reward = reward + (self.gamma * discounted_reward) rewards.insert(0, discounted_reward) ep_batch[:, 2] = np.asarray(rewards) elif self.n != 1: x = ep_batch[:, 2] if self.n == np.inf: b = [1] a = [1, -self.gamma] else: b = self.gamma ** np.arange(self.n) a = [1] ep_batch[:, 2] = lfilter(b=b, a=a, x=x[::-1], axis=0)[::-1] if self.normalize: std = np.std(ep_batch[:, 2]) + self.eps ep_batch[:, 2] = (ep_batch[:, 2] - np.mean(ep_batch[:, 2])) / std ep_batch[:, 2] *= self.factor return ep_batch
Apache License 2.0
som-shahlab/trove
trove/models/model_search.py
grid_search_span
python
def grid_search_span(model_class, model_class_init, param_grid, train=None, dev=None, n_model_search=5, val_metric='f1', seed=1234, verbose=True): L_train, Y_train = train if len(train) == 2 else (train[0], None) L_dev, Y_dev = dev params = sample_param_grid(param_grid, seed)[:n_model_search] defaults = {'optimizer': 'adam', 'seed': seed} best_score, best_config = 0.0, None average = 'binary' if np.unique(Y_dev).shape[0] == 2 else 'micro' print(f"Grid search over {len(params)} configs") print(f'Averaging: {average}') for i, config in enumerate(params): print(f'[{i}] Label Model') config = dict(zip(param_grid.keys(), config)) config.update({ param: value for param, value in defaults.items() if param not in config}) model = model_class(**model_class_init) model.fit(L_train, Y_dev, **config) y_pred = model.predict(L_dev) y_gold = Y_dev if -1 in y_pred: continue mask = [] for i in range(L_dev.shape[0]): if not np.all(L_dev[i] == -1): mask.append(i) mask = np.array(mask) metrics = compute_metrics(Y_dev[mask], model.predict(L_dev[mask])) msgs = [] if not best_score or metrics[val_metric] > best_score[val_metric]: print(config) best_score = metrics best_config = config mask = [i for i in range(L_train.shape[0]) if not np.all(L_train[i] == -1)] msgs.append( f'Coverage: {(len(mask) / L_train.shape[0] * 100):2.1f}%' ) if Y_train is not None: y_mask = [i for i in range(len(Y_train)) if Y_train[i] != -1] mask = np.array(sorted(list(set(y_mask).intersection(mask)))) metrics = compute_metrics(Y_train[mask], model.predict(L_train[mask])) msgs.append( 'TRAIN {}'.format(' | '.join( [f'{m}: {v * 100:2.2f}' for m, v in metrics.items()]) ) ) msgs.append( 'DEV {}'.format(' | '.join( [f'{m}: {v * 100:2.2f}' for m, v in best_score.items()])) ) if verbose and msgs: print('\n'.join(msgs) + ('\n' + '-' * 80)) if i % 50 == 0: print(f'[{i}] Label Model') if verbose: print('BEST') print(best_config) model = model_class(**model_class_init) model.fit(L_train, Y_dev, **best_config) return model, best_config
Simple grid search helper function
https://github.com/som-shahlab/trove/blob/8249def4d86d0da9e14db3f81918c3e0ac65bd89/trove/models/model_search.py#L43-L139
import numpy as np from itertools import product from trove.metrics import score_sequences, tokens_to_sequences from trove.models.voting import mv from sklearn.metrics import ( precision_score, recall_score, f1_score, accuracy_score, precision_recall_fscore_support ) def sample_param_grid(param_grid, seed): rstate = np.random.get_state() np.random.seed(seed) params = list(product(*[param_grid[name] for name in param_grid])) np.random.shuffle(params) np.random.set_state(rstate) return params def compute_metrics(y_gold, y_pred, average='binary'): return { 'accuracy': accuracy_score(y_gold, y_pred), 'precision': precision_score(y_gold, y_pred, average=average), 'recall': recall_score(y_gold, y_pred, average=average), 'f1': f1_score(y_gold, y_pred, average=average) }
Apache License 2.0
citrineinformatics/citrine-python
src/citrine/resources/project.py
Project.predictor_evaluation_executions
python
def predictor_evaluation_executions(self) -> PredictorEvaluationExecutionCollection: return PredictorEvaluationExecutionCollection(project_id=self.uid, session=self.session)
Return a collection representing all visible predictor evaluation executions.
https://github.com/citrineinformatics/citrine-python/blob/d164744c99b17f935a09935c2e57d2f056675477/src/citrine/resources/project.py#L132-L134
from typing import Optional, Dict, List, Union, Iterable, Tuple, Iterator from uuid import UUID from warnings import warn from gemd.entity.base_entity import BaseEntity from gemd.entity.link_by_uid import LinkByUID from citrine._rest.collection import Collection from citrine._rest.resource import Resource, ResourceTypeEnum from citrine._serialization import properties from citrine._utils.functions import format_escaped_url from citrine._session import Session from citrine.resources.api_error import ApiError from citrine.resources.condition_template import ConditionTemplateCollection from citrine.resources.dataset import DatasetCollection from citrine.resources.delete import _async_gemd_batch_delete from citrine.resources.descriptors import DescriptorMethods from citrine.resources.design_space import DesignSpaceCollection from citrine.resources.design_workflow import DesignWorkflowCollection from citrine.resources.gemtables import GemTableCollection from citrine.resources.gemd_resource import GEMDResourceCollection from citrine.resources.ingredient_run import IngredientRunCollection from citrine.resources.ingredient_spec import IngredientSpecCollection from citrine.resources.material_run import MaterialRunCollection from citrine.resources.material_spec import MaterialSpecCollection from citrine.resources.material_template import MaterialTemplateCollection from citrine.resources.measurement_run import MeasurementRunCollection from citrine.resources.measurement_spec import MeasurementSpecCollection from citrine.resources.measurement_template import MeasurementTemplateCollection from citrine.resources.module import ModuleCollection from citrine.resources.parameter_template import ParameterTemplateCollection from citrine.resources.predictor import PredictorCollection from citrine.resources.predictor_evaluation_execution import PredictorEvaluationExecutionCollection from citrine.resources.predictor_evaluation_workflow import PredictorEvaluationWorkflowCollection from citrine.resources.process_run import ProcessRunCollection from citrine.resources.process_spec import ProcessSpecCollection from citrine.resources.process_template import ProcessTemplateCollection from citrine.resources.processor import ProcessorCollection from citrine.resources.project_member import ProjectMember from citrine.resources.project_roles import MEMBER, ROLES, ACTIONS from citrine.resources.property_template import PropertyTemplateCollection from citrine.resources.response import Response from citrine.resources.table_config import TableConfigCollection from citrine.resources.user import User class Project(Resource['Project']): _response_key = 'project' _resource_type = ResourceTypeEnum.PROJECT name = properties.String('name') description = properties.Optional(properties.String(), 'description') uid = properties.Optional(properties.UUID(), 'id') status = properties.Optional(properties.String(), 'status') created_at = properties.Optional(properties.Datetime(), 'created_at') def __init__(self, name: str, *, description: Optional[str] = None, session: Optional[Session] = None): self.name: str = name self.description: Optional[str] = description self.session: Session = session def __str__(self): return '<Project {!r}>'.format(self.name) def _path(self): return format_escaped_url('/projects/{project_id}', project_id=self.uid) @property def modules(self) -> ModuleCollection: return ModuleCollection(self.uid, self.session) @property def design_spaces(self) -> DesignSpaceCollection: return DesignSpaceCollection(self.uid, self.session) @property def processors(self) -> ProcessorCollection: return ProcessorCollection(self.uid, self.session) @property def predictors(self) -> PredictorCollection: return PredictorCollection(self.uid, self.session) @property def descriptors(self) -> DescriptorMethods: return DescriptorMethods(self.uid, self.session) @property def predictor_evaluation_workflows(self) -> PredictorEvaluationWorkflowCollection: return PredictorEvaluationWorkflowCollection(self.uid, self.session) @property
Apache License 2.0
jansel/opentuner
opentuner/api.py
TuningRunManager.get_best_result
python
def get_best_result(self): try: return self.search_driver.best_result except AttributeError: return None
The best result found so far. From the current tuning run only.
https://github.com/jansel/opentuner/blob/070c5cef6d933eb760a2f9cd5cd08c95f27aee75/opentuner/api.py#L65-L72
from datetime import datetime from opentuner import tuningrunmain class TuningRunManager(tuningrunmain.TuningRunMain): def __init__(self, measurement_interface, args, **kwargs): super(TuningRunManager, self).__init__(measurement_interface, args, **kwargs) self.init() self.tuning_run.state = 'RUNNING' self.commit(force=True) self.search_driver.external_main_begin() def get_next_desired_result(self): dr = self.measurement_driver.query_pending_desired_results().first() if dr is None: self.search_driver.external_main_generation() dr = self.measurement_driver.query_pending_desired_results().first() if dr is None: return None self.measurement_driver.claim_desired_result(dr) dr.limit = self.measurement_driver.run_time_limit(dr) return dr def get_desired_results(self): drs = self.measurement_driver.query_pending_desired_results().all() if len(drs) == 0: self.search_driver.external_main_generation() drs = self.measurement_driver.query_pending_desired_results().all() if len(drs) == 0: return [] for dr in drs: self.measurement_driver.claim_desired_result(dr) dr.limit = self.measurement_driver.run_time_limit(dr) return drs def report_result(self, desired_result, result, result_input=None): self.measurement_driver.report_result(desired_result, result, result_input) def get_best_configuration(self): try: return self.search_driver.best_result.configuration.data except AttributeError: return None
MIT License
magenta/magenta
magenta/models/score2perf/music_encoders.py
MidiPerformanceEncoder.decode
python
def decode(self, ids, strip_extraneous=False): ns = self.decode_to_note_sequence(ids, strip_extraneous=strip_extraneous) _, tmp_file_path = tempfile.mkstemp('_decode.mid') note_seq.sequence_proto_to_midi_file(ns, tmp_file_path) return tmp_file_path
Transform a sequence of event indices into a performance MIDI file. Args: ids: List of performance event indices. strip_extraneous: Whether to strip EOS and padding from the end of `ids`. Returns: Path to the temporary file where the MIDI was saved.
https://github.com/magenta/magenta/blob/be6558f1a06984faff6d6949234f5fe9ad0ffdb5/magenta/models/score2perf/music_encoders.py#L180-L195
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tempfile import note_seq from note_seq import performance_lib import pygtrie from tensor2tensor.data_generators import text_encoder CHORD_SYMBOL = note_seq.NoteSequence.TextAnnotation.CHORD_SYMBOL class MidiPerformanceEncoder(object): def __init__(self, steps_per_second, num_velocity_bins, min_pitch, max_pitch, add_eos=False, ngrams=None): self._steps_per_second = steps_per_second self._num_velocity_bins = num_velocity_bins self._add_eos = add_eos self._ngrams = ngrams or [] for ngram in self._ngrams: if len(ngram) < 2: raise ValueError('All n-grams must have length at least 2.') if any(i < self.num_reserved_ids for i in ngram): raise ValueError('N-grams cannot contain reserved IDs.') self._encoding = note_seq.PerformanceOneHotEncoding( num_velocity_bins=num_velocity_bins, max_shift_steps=steps_per_second, min_pitch=min_pitch, max_pitch=max_pitch) ngram_ids = range(self.unigram_vocab_size, self.unigram_vocab_size + len(self._ngrams)) self._ngrams_trie = pygtrie.Trie(zip(self._ngrams, ngram_ids)) self._ngrams_trie.update(zip([(i,) for i in range(self.unigram_vocab_size)], range(self.unigram_vocab_size))) @property def num_reserved_ids(self): return text_encoder.NUM_RESERVED_TOKENS def encode_note_sequence(self, ns): performance = note_seq.Performance( note_seq.quantize_note_sequence_absolute(ns, self._steps_per_second), num_velocity_bins=self._num_velocity_bins) event_ids = [self._encoding.encode_event(event) + self.num_reserved_ids for event in performance] ids = [] j = 0 while j < len(event_ids): ngram = () for i in range(j, len(event_ids)): ngram += (event_ids[i],) if self._ngrams_trie.has_key(ngram): best_ngram = ngram if not self._ngrams_trie.has_subtrie(ngram): break ids.append(self._ngrams_trie[best_ngram]) j += len(best_ngram) if self._add_eos: ids.append(text_encoder.EOS_ID) return ids def encode(self, s): if s: ns = note_seq.midi_file_to_sequence_proto(s) else: ns = note_seq.NoteSequence() return self.encode_note_sequence(ns) def decode_to_note_sequence(self, ids, strip_extraneous=False): if strip_extraneous: ids = text_encoder.strip_ids(ids, list(range(self.num_reserved_ids))) event_ids = [] for i in ids: if i >= self.unigram_vocab_size: event_ids += self._ngrams[i - self.unigram_vocab_size] else: event_ids.append(i) performance = note_seq.Performance( quantized_sequence=None, steps_per_second=self._steps_per_second, num_velocity_bins=self._num_velocity_bins) for i in event_ids: performance.append(self._encoding.decode_event(i - self.num_reserved_ids)) ns = performance.to_sequence() return ns
Apache License 2.0
yangheng95/lcf-atepc
utils/data_utils.py
convert_examples_to_features
python
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): label_map = {label : i for i, label in enumerate(label_list,1)} features = [] for (ex_index,example) in enumerate(examples): text_spc_tokens = example.text_a aspect_tokens = example.text_b sentence_label = example.sentence_label aspect_label = example.aspect_label polaritiylist = example.polarity tokens = [] labels = [] polarities = [] valid = [] label_mask = [] text_spc_tokens.extend(['[SEP]']) text_spc_tokens.extend(aspect_tokens) enum_tokens = text_spc_tokens sentence_label.extend(['[SEP]']) sentence_label.extend(aspect_label) label_lists = sentence_label for i, word in enumerate(enum_tokens): token = tokenizer.tokenize(word) tokens.extend(token) label_1 = label_lists[i] polarity_1 = polaritiylist[i] for m in range(len(token)): if m == 0: labels.append(label_1) polarities.append(polarity_1) valid.append(1) label_mask.append(1) else: valid.append(0) if len(tokens) >= max_seq_length - 1: tokens = tokens[0:(max_seq_length - 2)] polarities = polarities[0:(max_seq_length - 2)] labels = labels[0:(max_seq_length - 2)] valid = valid[0:(max_seq_length - 2)] label_mask = label_mask[0:(max_seq_length - 2)] ntokens = [] segment_ids = [] label_ids = [] ntokens.append("[CLS]") segment_ids.append(0) valid.insert(0,1) label_mask.insert(0,1) label_ids.append(label_map["[CLS]"]) for i, token in enumerate(tokens): ntokens.append(token) segment_ids.append(0) if len(labels) > i: label_ids.append(label_map[labels[i]]) ntokens.append("[SEP]") segment_ids.append(0) valid.append(1) label_mask.append(1) label_ids.append(label_map["[SEP]"]) input_ids_spc = tokenizer.convert_tokens_to_ids(ntokens) input_mask = [1] * len(input_ids_spc) label_mask = [1] * len(label_ids) while len(input_ids_spc) < max_seq_length: input_ids_spc.append(0) input_mask.append(0) segment_ids.append(0) label_ids.append(0) valid.append(1) label_mask.append(0) while len(label_ids) < max_seq_length: label_ids.append(0) label_mask.append(0) while len(polarities) < max_seq_length: polarities.append(-1) assert len(input_ids_spc) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length assert len(valid) == max_seq_length assert len(label_mask) == max_seq_length features.append( InputFeatures(input_ids_spc=input_ids_spc, input_mask=input_mask, segment_ids=segment_ids, label_id=label_ids, polarities=polarities, valid_ids=valid, label_mask=label_mask)) return features
Loads a data file into a list of `InputBatch`s.
https://github.com/yangheng95/lcf-atepc/blob/4d05004a3ceabaa48753e55d458015dfc8f5ab17/utils/data_utils.py#L188-L300
import os class InputExample(object): def __init__(self, guid, text_a, text_b=None, sentence_label=None, aspect_label=None, polarity=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.sentence_label = sentence_label self.aspect_label = aspect_label self.polarity = polarity class InputFeatures(object): def __init__(self, input_ids_spc, input_mask, segment_ids, label_id, polarities=None, valid_ids=None, label_mask=None): self.input_ids_spc = input_ids_spc self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.valid_ids = valid_ids self.label_mask = label_mask self.polarities = polarities def readfile(filename): f = open(filename, encoding='utf8') data = [] sentence = [] tag= [] polarity = [] for line in f: if len(line)==0 or line.startswith('-DOCSTART') or line[0 ]=="\n": if len(sentence) > 0: data.append((sentence, tag, polarity)) sentence = [] tag = [] polarity = [] continue splits = line.split(' ') if len(splits) != 3: print('warning! detected error line(s) in input file:{}'.format(line)) sentence.append(splits[0]) tag.append(splits[-2]) polarity.append(int(splits[-1][:-1])) if len(sentence) > 0: data.append((sentence, tag, polarity)) return data class DataProcessor(object): def get_train_examples(self, data_dir): raise NotImplementedError() def get_dev_examples(self, data_dir): raise NotImplementedError() def get_labels(self): raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): return readfile(input_file) class ATEPCProcessor(DataProcessor): def get_train_examples(self, data_dir): if 'laptop' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "Laptops.atepc.train.dat")), "train") elif 'rest' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "Restaurants.atepc.train.dat")), "train") elif 'twitter' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "twitter.atepc.train.dat")), "train") elif 'car' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "car.atepc.train.dat")), "train") elif 'phone' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "phone.atepc.train.dat")), "train") elif 'camera' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "camera.atepc.train.dat")), "train") elif 'notebook' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "notebook.atepc.train.dat")), "train") elif 'mixed' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "mixed.atepc.train.dat")), "train") def get_test_examples(self, data_dir): if 'laptop' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "Laptops.atepc.test.dat")), "test") elif 'rest' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "Restaurants.atepc.test.dat")), "test") elif 'twitter' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "twitter.atepc.test.dat")), "test") elif 'car' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "car.atepc.test.dat")), "test") elif 'phone' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "phone.atepc.test.dat")), "test") elif 'camera' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "camera.atepc.test.dat")), "test") elif 'notebook' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "notebook.atepc.test.dat")), "test") elif 'mixed' in data_dir: return self._create_examples( self._read_tsv(os.path.join(data_dir, "mixed.atepc.test.dat")), "test") def get_labels(self): return ["O", "B-ASP", "I-ASP", "[CLS]", "[SEP]"] def _create_examples(self, lines, set_type): examples = [] for i, (sentence, tag, polarity) in enumerate(lines): aspect = [] aspect_tag = [] aspect_polarity = [-1] for w, t, p in zip(sentence, tag, polarity): if p != -1: aspect.append(w) aspect_tag.append(t) aspect_polarity.append(-1) guid = "%s-%s" % (set_type, i) text_a = sentence text_b = aspect polarity.extend(aspect_polarity) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, sentence_label=tag, aspect_label=aspect_tag, polarity=polarity)) return examples
MIT License
inmanta/inmanta-core
src/inmanta/server/protocol.py
Server.add_slice
python
def add_slice(self, slice: "ServerSlice") -> None: self._slices[slice.name] = slice self._slice_sequence = None
Add new endpoints to this rest transport
https://github.com/inmanta/inmanta-core/blob/7e57295314e30276204b74ddcb8e2402c0a50b19/src/inmanta/server/protocol.py#L102-L107
import asyncio import logging import socket import time import uuid from collections import defaultdict from typing import TYPE_CHECKING, Callable, Coroutine, Dict, List, Optional, Sequence, Set, Tuple, Union import importlib_metadata from tornado import gen, queues, routing, web from tornado.ioloop import IOLoop import inmanta.protocol.endpoints from inmanta import config as inmanta_config from inmanta.data.model import ExtensionStatus from inmanta.protocol import Client, common, endpoints, handle, methods from inmanta.protocol.exceptions import ShutdownInProgress from inmanta.protocol.rest import server from inmanta.server import SLICE_SESSION_MANAGER, SLICE_TRANSPORT from inmanta.server import config as opt from inmanta.types import ArgumentTypes, JsonType from inmanta.util import CycleException, Scheduler, TaskHandler, stable_depth_first if TYPE_CHECKING: from inmanta.server.extensions import Feature, FeatureManager LOGGER = logging.getLogger(__name__) class ServerStartFailure(Exception): pass class SliceStartupException(ServerStartFailure): def __init__(self, slice_name: str, cause: Exception): super(SliceStartupException, self).__init__() self.__cause__ = cause self.in_slice = slice_name def __str__(self) -> str: return f"Slice {self.in_slice} failed to start because: {str(self.__cause__)}" class ReturnClient(Client): def __init__(self, name: str, session: "Session") -> None: super().__init__(name, with_rest_client=False) self.session = session async def _call( self, method_properties: common.MethodProperties, args: List[object], kwargs: Dict[str, object] ) -> common.Result: call_spec = method_properties.build_call(args, kwargs) try: if method_properties.timeout: return_value = await self.session.put_call(call_spec, timeout=method_properties.timeout) else: return_value = await self.session.put_call(call_spec) except asyncio.CancelledError: return common.Result(code=500, result={"message": "Call timed out"}) return common.Result(code=return_value["code"], result=return_value["result"]) class Server(endpoints.Endpoint): def __init__(self, connection_timout: int = 120) -> None: super().__init__("server") self._slices: Dict[str, ServerSlice] = {} self._slice_sequence: Optional[List[ServerSlice]] = None self._handlers: List[routing.Rule] = [] self.token: Optional[str] = inmanta_config.Config.get(self.id, "token", None) self.connection_timout = connection_timout self.sessions_handler = SessionManager() self.add_slice(self.sessions_handler) self._transport = server.RESTServer(self.sessions_handler, self.id) self.add_slice(TransportSlice(self)) self.running = False
Apache License 2.0
yfauser/planespotter
app-server/app/lib/python2.7/site-packages/sqlalchemy/testing/requirements.py
SuiteRequirements.check_constraint_reflection
python
def check_constraint_reflection(self): return exclusions.closed()
target dialect supports reflection of check constraints
https://github.com/yfauser/planespotter/blob/d400216502b6b5592a4889eb9fa277b2ddb75f9b/app-server/app/lib/python2.7/site-packages/sqlalchemy/testing/requirements.py#L433-L435
import sys from . import exclusions from .. import util class Requirements(object): pass class SuiteRequirements(Requirements): @property def create_table(self): return exclusions.open() @property def drop_table(self): return exclusions.open() @property def foreign_keys(self): return exclusions.open() @property def on_update_cascade(self): return exclusions.open() @property def non_updating_cascade(self): return exclusions.closed() @property def deferrable_fks(self): return exclusions.closed() @property def on_update_or_deferrable_fks(self): return exclusions.only_if( lambda: self.on_update_cascade.enabled or self.deferrable_fks.enabled ) @property def self_referential_foreign_keys(self): return exclusions.open() @property def foreign_key_ddl(self): return exclusions.open() @property def named_constraints(self): return exclusions.open() @property def subqueries(self): return exclusions.open() @property def offset(self): return exclusions.open() @property def bound_limit_offset(self): return exclusions.open() @property def parens_in_union_contained_select_w_limit_offset(self): return exclusions.open() @property def parens_in_union_contained_select_wo_limit_offset(self): return exclusions.open() @property def boolean_col_expressions(self): return exclusions.closed() @property def nullsordering(self): return exclusions.closed() @property def standalone_binds(self): return exclusions.closed() @property def intersect(self): return exclusions.closed() @property def except_(self): return exclusions.closed() @property def window_functions(self): return exclusions.closed() @property def ctes(self): return exclusions.closed() @property def ctes_on_dml(self): return exclusions.closed() @property def autoincrement_insert(self): return exclusions.open() @property def fetch_rows_post_commit(self): return exclusions.open() @property def sane_rowcount(self): return exclusions.skip_if( lambda config: not config.db.dialect.supports_sane_rowcount, "driver doesn't support 'sane' rowcount" ) @property def sane_multi_rowcount(self): return exclusions.fails_if( lambda config: not config.db.dialect.supports_sane_multi_rowcount, "driver %(driver)s %(doesnt_support)s 'sane' multi row count" ) @property def sane_rowcount_w_returning(self): return exclusions.fails_if( lambda config: not config.db.dialect.supports_sane_rowcount_returning, "driver doesn't support 'sane' rowcount when returning is on" ) @property def empty_inserts(self): return exclusions.only_if( lambda config: config.db.dialect.supports_empty_insert or config.db.dialect.supports_default_values, "empty inserts not supported" ) @property def insert_from_select(self): return exclusions.open() @property def returning(self): return exclusions.only_if( lambda config: config.db.dialect.implicit_returning, "%(database)s %(does_support)s 'returning'" ) @property def tuple_in(self): return exclusions.closed() @property def duplicate_names_in_cursor_description(self): return exclusions.open() @property def denormalized_names(self): return exclusions.skip_if( lambda config: not config.db.dialect.requires_name_normalize, "Backend does not require denormalized names." ) @property def multivalues_inserts(self): return exclusions.skip_if( lambda config: not config.db.dialect.supports_multivalues_insert, "Backend does not support multirow inserts." ) @property def implements_get_lastrowid(self): return exclusions.open() @property def emulated_lastrowid(self): return exclusions.closed() @property def dbapi_lastrowid(self): return exclusions.closed() @property def views(self): return exclusions.closed() @property def schemas(self): return exclusions.closed() @property def server_side_cursors(self): return exclusions.only_if([ lambda config: config.db.dialect.supports_server_side_cursors ], "no server side cursors support") @property def sequences(self): return exclusions.only_if([ lambda config: config.db.dialect.supports_sequences ], "no sequence support") @property def sequences_optional(self): return exclusions.only_if([ lambda config: config.db.dialect.supports_sequences and config.db.dialect.sequences_optional ], "no sequence support, or sequences not optional") @property def reflects_pk_names(self): return exclusions.closed() @property def table_reflection(self): return exclusions.open() @property def comment_reflection(self): return exclusions.closed() @property def view_column_reflection(self): return self.views @property def view_reflection(self): return self.views @property def schema_reflection(self): return self.schemas @property def primary_key_constraint_reflection(self): return exclusions.open() @property def foreign_key_constraint_reflection(self): return exclusions.open() @property def foreign_key_constraint_option_reflection(self): return exclusions.closed() @property def temp_table_reflection(self): return exclusions.open() @property def temp_table_names(self): return exclusions.closed() @property def temporary_tables(self): return exclusions.open() @property def temporary_views(self): return exclusions.closed() @property def index_reflection(self): return exclusions.open() @property def unique_constraint_reflection(self): return exclusions.open() @property
MIT License
astooke/rlpyt
rlpyt/spaces/composite.py
Composite.null_value
python
def null_value(self): return self._NamedTupleCls(*(s.null_value() for s in self._spaces))
Return a null value which is a named tuple composed of null values from all sub-spaces.
https://github.com/astooke/rlpyt/blob/f04f23db1eb7b5915d88401fca67869968a07a37/rlpyt/spaces/composite.py#L24-L27
from rlpyt.spaces.base import Space class Composite(Space): def __init__(self, spaces, NamedTupleCls): self._spaces = spaces self._NamedTupleCls = NamedTupleCls def sample(self): return self._NamedTupleCls(*(s.sample() for s in self._spaces))
MIT License
terrycain/aioboto3
aioboto3/s3/cse.py
AsymmetricCryptoContext.from_der_private_key
python
def from_der_private_key(data: bytes, password: Optional[str] = None) -> _RSAPrivateKey: return serialization.load_der_private_key(data, password, default_backend())
Convert private key in DER encoding to a Private key object :param data: private key bytes :param password: password the private key is encrypted with
https://github.com/terrycain/aioboto3/blob/c413766fe9cd95a0889d80eb5099d4296ebeaa1a/aioboto3/s3/cse.py#L149-L156
import asyncio import base64 import json import inspect import os import re import sys import struct from io import BytesIO from typing import Dict, Union, IO, Optional, Any, Tuple import aioboto3 from cryptography.hazmat.primitives.ciphers.aead import AESGCM from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import Cipher from cryptography.hazmat.primitives.ciphers.algorithms import AES from cryptography.hazmat.primitives.ciphers.modes import CBC, CTR, ECB from cryptography.hazmat.primitives.padding import PKCS7 from cryptography.exceptions import InvalidTag from cryptography.hazmat.backends.openssl.rsa import _RSAPrivateKey, _RSAPublicKey from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives import serialization RANGE_REGEX = re.compile(r'bytes=(?P<start>\d+)-(?P<end>\d+)*') AES_BLOCK_SIZE = 128 AES_BLOCK_SIZE_BYTES = 16 JAVA_LONG_MAX_VALUE = 9223372036854775807 class DummyAIOFile(object): def __init__(self, data: bytes): self.file = BytesIO(data) async def read(self, n=-1): return self.file.read(n) async def readany(self): return self.file.read() async def readexactly(self, n): return self.file.read(n) async def readchunk(self): return self.file.read(), True class DecryptError(Exception): pass class CryptoContext(object): async def setup(self): pass async def close(self): async def get_decryption_aes_key(self, key: bytes, material_description: Dict[str, Any]) -> bytes: raise NotImplementedError() async def get_encryption_aes_key(self) -> Tuple[bytes, Dict[str, str], str]: raise NotImplementedError() class AsymmetricCryptoContext(CryptoContext): def __init__(self, public_key: Optional[_RSAPublicKey] = None, private_key: Optional[_RSAPrivateKey] = None, loop: Optional[asyncio.AbstractEventLoop] = None): self.public_key = public_key self.private_key = private_key self._loop = loop if not loop: self._loop = asyncio.get_event_loop() async def get_decryption_aes_key(self, key: bytes, material_description: Dict[str, Any]) -> bytes: if self.private_key is None: raise ValueError('Private key not provided during initialisation, cannot decrypt key encrypting key') plaintext = await self._loop.run_in_executor(None, lambda: (self.private_key.decrypt(key, padding.PKCS1v15()))) return plaintext async def get_encryption_aes_key(self) -> Tuple[bytes, Dict[str, str], str]: if self.public_key is None: raise ValueError('Public key not provided during initialisation, cannot encrypt key encrypting key') random_bytes = os.urandom(32) ciphertext = await self._loop.run_in_executor( None, lambda: (self.public_key.encrypt(random_bytes, padding.PKCS1v15()))) return random_bytes, {}, base64.b64encode(ciphertext).decode() @staticmethod def from_der_public_key(data: bytes) -> _RSAPublicKey: return serialization.load_der_public_key(data, default_backend()) @staticmethod
Apache License 2.0
ngageoint/sarpy
sarpy/io/complex/gff.py
_BlockHeader_2.__init__
python
def __init__(self, fi, estr): self.name = _get_string(fi.read(16)) self.major_version, self.minor_version = struct.unpack(estr+'HH', fi.read(2*2)) what0 = fi.read(4) self.size = struct.unpack(estr+'I', fi.read(4))[0] what1 = fi.read(4) if (self.version == '2.0' and self.size == 64) or (self.version == '1.0' and self.size == 52): self.name = 'RADARINFO'
Parameters ---------- fi : BinaryIO estr : str The endianness string for format interpretation, one of `['<', '>']`
https://github.com/ngageoint/sarpy/blob/91405721a7e6ffe7c76dd7b143915fee4bee1e82/sarpy/io/complex/gff.py#L456-L472
__classification__ = "UNCLASSIFIED" __author__ = "Thomas McCullough" import logging import os import struct from typing import Union, BinaryIO from datetime import datetime from tempfile import mkstemp import zlib import gc import numpy from scipy.constants import speed_of_light from sarpy.compliance import int_func, string_types from sarpy.io.general.base import BaseReader, BIPChipper, BSQChipper, is_file_like, SarpyIOError from sarpy.io.general.nitf import MemMap from sarpy.geometry.geocoords import geodetic_to_ecf, wgs_84_norm, ned_to_ecf from sarpy.io.complex.base import SICDTypeReader from sarpy.io.complex.sicd_elements.SICD import SICDType from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType, RadarModeType from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType from sarpy.io.complex.sicd_elements.ImageData import ImageDataType from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType from sarpy.io.complex.sicd_elements.SCPCOA import SCPCOAType from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, WaveformParametersType, ChanParametersType from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, RcvChanProcType from sarpy.io.complex.sicd_elements.Radiometric import RadiometricType, NoiseLevelType_ try: import PIL except ImportError: PIL = None logger = logging.getLogger(__name__) def is_a(file_name): if is_file_like(file_name): return None try: gff_details = GFFDetails(file_name) logger.info('File {} is determined to be a GFF version {} file.'.format( file_name, gff_details.version)) return GFFReader(gff_details) except SarpyIOError: return None def _get_string(bytes_in): bytes_in = bytes_in.replace(b'\x00', b'') return bytes_in.decode('utf-8') def _rescale_float(int_in, scale): return float(int_in)/scale class _GFFHeader_1_6(object): def __init__(self, fi, estr): self.file_object = fi self.estr = estr self.version = '1.6' fi.seek(12, os.SEEK_SET) self.header_length = struct.unpack(estr+'I', fi.read(4))[0] if self.header_length < 952: raise ValueError( 'The provided header is apparently too short to be a version 1.6 GFF header') fi.read(2) self.creator = _get_string(fi.read(24)) self.date_time = struct.unpack(estr+'6H', fi.read(6*2)) fi.read(2) self.bytes_per_pixel, self.frame_count, self.image_type, self.row_major, self.range_count, self.azimuth_count = struct.unpack(estr+'6I', fi.read(6*4)) self.scale_exponent, self.scale_mantissa, self.offset_exponent, self.offset_mantissa = struct.unpack(estr+'4i', fi.read(4*4)) fi.read(2) self.comment = _get_string(fi.read(166)) self.image_plane = struct.unpack(estr+'I', fi.read(4))[0] range_pixel_size, azimuth_pixel_size, azimuth_overlap = struct.unpack(estr+'3I', fi.read(3*4)) self.range_pixel_size = _rescale_float(range_pixel_size, 1 << 16) self.azimuth_pixel_size = _rescale_float(azimuth_pixel_size, 1 << 16) self.azimuth_overlap = _rescale_float(azimuth_overlap, 1 << 16) srp_lat, srp_lon, srp_alt, rfoa, x_to_srp = struct.unpack(estr+'5i', fi.read(5*4)) self.srp_lat = _rescale_float(srp_lat, 1 << 23) self.srp_lon = _rescale_float(srp_lon, 1 << 23) self.srp_alt = _rescale_float(srp_alt, 1 << 16) self.rfoa = _rescale_float(rfoa, 1 << 23) self.x_to_srp = _rescale_float(x_to_srp, 1 << 16) fi.read(2) self.phase_name = _get_string(fi.read(128)) fi.read(2) self.image_name = _get_string(fi.read(128)) self.look_count, self.param_ref_ap, self.param_ref_pos = struct.unpack(estr+'3I', fi.read(3*4)) graze_angle, squint, gta, range_beam_ctr, flight_time = struct.unpack(estr + 'I2i2I', fi.read(5*4)) self.graze_angle = _rescale_float(graze_angle, 1 << 23) self.squint = _rescale_float(squint, 1 << 23) self.gta = _rescale_float(gta, 1 << 23) self.range_beam_ctr = _rescale_float(range_beam_ctr, 1 << 8) self.flight_time = _rescale_float(flight_time, 1000) self.range_chirp_rate, x_to_start, self.mo_comp_mode, v_x = struct.unpack(estr+'fi2I', fi.read(4*4)) self.x_to_start = _rescale_float(x_to_start, 1 << 16) self.v_x = _rescale_float(v_x, 1 << 16) apc_lat, apc_lon, apc_alt = struct.unpack(estr+'3i', fi.read(3*4)) self.apc_lat = _rescale_float(apc_lat, 1 << 23) self.apc_lon = _rescale_float(apc_lon, 1 << 23) self.apc_alt = _rescale_float(apc_alt, 1 << 16) cal_parm, self.logical_block_address = struct.unpack(estr+'2I', fi.read(2*4)) self.cal_parm = _rescale_float(cal_parm, 1 << 24) az_resolution, range_resolution = struct.unpack(estr+'2I', fi.read(2*4)) self.az_resolution = _rescale_float(az_resolution, 1 << 16) self.range_resolution = _rescale_float(range_resolution, 1 << 16) des_sigma_n, des_graze, des_squint, des_range, scene_track_angle = struct.unpack(estr+'iIiIi', fi.read(5*4)) self.des_sigma_n = _rescale_float(des_sigma_n, 1 << 23) self.des_graze = _rescale_float(des_graze, 1 << 23) self.des_squint = _rescale_float(des_squint, 1 << 23) self.des_range = _rescale_float(des_range, 1 << 8) self.scene_track_angle = _rescale_float(scene_track_angle, 1 << 23) self.user_param = fi.read(48) self.coarse_snr, self.coarse_azimuth_sub, self.coarse_range_sub, self.max_azimuth_shift, self.max_range_shift, self.coarse_delta_azimuth, self.coarse_delta_range = struct.unpack(estr+'7i', fi.read(7*4)) self.tot_procs, self.tpt_box_cmode, self.snr_thresh, self.range_size, self.map_box_size, self.box_size, self.box_spc, self.tot_tpts, self.good_tpts, self.range_seed, self.range_shift, self.azimuth_shift = struct.unpack(estr+'12i', fi.read(12*4)) self.sum_x_ramp, self.sum_y_ramp = struct.unpack(estr+'2i', fi.read(2*4)) self.cy9k_tape_block, self.nominal_center_frequency = struct.unpack(estr+'If', fi.read(2*4)) self.image_flags, self.line_number, self.patch_number = struct.unpack(estr+'3I', fi.read(3*4)) self.lambda0, self.srange_pix_space = struct.unpack(estr+'2f', fi.read(2*4)) self.dopp_pix_space, self.dopp_offset, self.dopp_range_scale, self.mux_time_delay = struct.unpack(estr+'4f', fi.read(4*4)) self.apc_ecef = struct.unpack(estr+'3d', fi.read(3*8)) self.vel_ecef = struct.unpack(estr+'3f', fi.read(3*4)) self.phase_cal = struct.unpack(estr+'f', fi.read(4))[0] self.srp_ecef = struct.unpack(estr+'3d', fi.read(3*8)) self.res5 = fi.read(64) class _Radar_1_8(object): def __init__(self, the_bytes, estr): if not (isinstance(the_bytes, bytes) and len(the_bytes) == 76): raise ValueError('Incorrect length input') self.platform = _get_string(the_bytes[:24]) self.proc_id = _get_string(the_bytes[24:36]) self.radar_model = _get_string(the_bytes[36:48]) self.radar_id = struct.unpack(estr+'I', the_bytes[48:52])[0] self.swid = _get_string(the_bytes[52:76]) class _GFFHeader_1_8(object): def __init__(self, fi, estr): self.file_object = fi self.estr = estr self.version = '1.8' fi.seek(12, os.SEEK_SET) self.header_length = struct.unpack(estr+'I', fi.read(4))[0] if self.header_length < 2040: raise ValueError( 'The provided header is apparently too short to be a version 1.8 GFF header') fi.read(2) self.creator = _get_string(fi.read(24)) self.date_time = struct.unpack(estr+'6H', fi.read(6*2)) fi.read(2) self.bytes_per_pixel = int_func(struct.unpack(estr+'f', fi.read(4))[0]) self.frame_count, self.image_type, self.row_major, self.range_count, self.azimuth_count = struct.unpack(estr+'5I', fi.read(5*4)) self.scale_exponent, self.scale_mantissa, self.offset_exponent, self.offset_mantissa = struct.unpack(estr+'4i', fi.read(4*4)) self.res1 = fi.read(32) fi.read(2) self.comment = _get_string(fi.read(166)) self.image_plane = struct.unpack(estr+'I', fi.read(4))[0] range_pixel_size, azimuth_pixel_size, azimuth_overlap = struct.unpack(estr+'3I', fi.read(3*4)) self.range_pixel_size = _rescale_float(range_pixel_size, 1 << 16) self.azimuth_pixel_size = _rescale_float(azimuth_pixel_size, 1 << 16) self.azimuth_overlap = _rescale_float(azimuth_overlap, 1 << 16) srp_lat, srp_lon, srp_alt, rfoa, x_to_srp = struct.unpack(estr+'5i', fi.read(5*4)) self.srp_lat = _rescale_float(srp_lat, 1 << 23) self.srp_lon = _rescale_float(srp_lon, 1 << 23) self.srp_alt = _rescale_float(srp_alt, 1 << 16) self.rfoa = _rescale_float(rfoa, 1 << 23) self.x_to_srp = _rescale_float(x_to_srp, 1 << 16) self.res2 = fi.read(32) fi.read(2) self.phase_name = _get_string(fi.read(128)) fi.read(2) self.image_name = _get_string(fi.read(128)) self.look_count, self.param_ref_ap, self.param_ref_pos = struct.unpack(estr + '3I', fi.read(3*4)) graze_angle, squint, gta, range_beam_ctr, flight_time = struct.unpack(estr + 'I2i2I', fi.read(5*4)) self.graze_angle = _rescale_float(graze_angle, 1 << 23) self.squint = _rescale_float(squint, 1 << 23) self.gta = _rescale_float(gta, 1 << 23) self.range_beam_ctr = _rescale_float(range_beam_ctr, 1 << 8) self.flight_time = _rescale_float(flight_time, 1000) self.range_chirp_rate, x_to_start, self.mo_comp_mode, v_x = struct.unpack(estr + 'fi2I', fi.read(4*4)) self.x_to_start = _rescale_float(x_to_start, 1 << 16) self.v_x = _rescale_float(v_x, 1 << 16) apc_lat, apc_lon, apc_alt = struct.unpack(estr + '3i', fi.read(3*4)) self.apc_lat = _rescale_float(apc_lat, 1 << 23) self.apc_lon = _rescale_float(apc_lon, 1 << 23) self.apc_alt = _rescale_float(apc_alt, 1 << 16) cal_parm, self.logical_block_address = struct.unpack(estr + '2I', fi.read(2*4)) self.cal_parm = _rescale_float(cal_parm, 1 << 24) az_resolution, range_resolution = struct.unpack(estr + '2I', fi.read(2*4)) self.az_resolution = _rescale_float(az_resolution, 1 << 16) self.range_resolution = _rescale_float(range_resolution, 1 << 16) des_sigma_n, des_graze, des_squint, des_range, scene_track_angle = struct.unpack(estr + 'iIiIi', fi.read(5*4)) self.des_sigma_n = _rescale_float(des_sigma_n, 1 << 23) self.des_graze = _rescale_float(des_graze, 1 << 23) self.des_squint = _rescale_float(des_squint, 1 << 23) self.des_range = _rescale_float(des_range, 1 << 8) self.scene_track_angle = _rescale_float(scene_track_angle, 1 << 23) self.user_param = fi.read(48) self.coarse_snr, self.coarse_azimuth_sub, self.coarse_range_sub, self.max_azimuth_shift, self.max_range_shift, self.coarse_delta_azimuth, self.coarse_delta_range = struct.unpack(estr + '7i', fi.read(7*4)) self.tot_procs, self.tpt_box_cmode, self.snr_thresh, self.range_size, self.map_box_size, self.box_size, self.box_spc, self.tot_tpts, self.good_tpts, self.range_seed, self.range_shift, self.azimuth_shift = struct.unpack(estr + '12i', fi.read(12*4)) self.sum_x_ramp, self.sum_y_ramp = struct.unpack(estr + '2i', fi.read(2*4)) self.cy9k_tape_block, self.nominal_center_frequency = struct.unpack(estr + 'If', fi.read(2*4)) self.image_flags, self.line_number, self.patch_number = struct.unpack(estr + '3I', fi.read(3*4)) self.lambda0, self.srange_pix_space = struct.unpack(estr + '2f', fi.read(2*4)) self.dopp_pix_space, self.dopp_offset, self.dopp_range_scale, self.mux_time_delay = struct.unpack(estr + '4f', fi.read(4*4)) self.apc_ecef = struct.unpack(estr+'3d', fi.read(3*8)) self.vel_ecef = struct.unpack(estr+'3f', fi.read(3*4)) self.phase_cal = struct.unpack(estr+'f', fi.read(4))[0] self.srp_ecef = struct.unpack(estr+'3d', fi.read(3*8)) self.res5 = fi.read(64) self.header_length1 = struct.unpack(estr+'I', fi.read(4))[0] self.image_date = struct.unpack(estr+'6H', fi.read(6*2)) self.comp_file_name = _get_string(fi.read(128)) self.ref_file_name = _get_string(fi.read(128)) self.IE = _Radar_1_8(fi.read(76), estr) self.IF = _Radar_1_8(fi.read(76), estr) self.if_algo = _get_string(fi.read(8)) self.PH = _Radar_1_8(fi.read(76), estr) self.ph_data_rcd, self.proc_product = struct.unpack(estr+'2i', fi.read(2*4)) self.mission_text = _get_string(fi.read(8)) self.ph_source, self.gps_week = struct.unpack(estr+'iI', fi.read(2*4)) self.data_collect_reqh = _get_string(fi.read(14)) self.res6 = fi.read(2) self.grid_name = _get_string(fi.read(24)) self.pix_val_linearity, self.complex_or_real, self.bits_per_magnitude, self.bits_per_phase = struct.unpack(estr+'2i2H', fi.read(2*4+2*2)) self.complex_order_type, self.pix_data_type, self.image_length, self.image_cmp_scheme = struct.unpack(estr+'4i', fi.read(4*4)) self.apbo, self.asa_pitch, self.asa_squint, self.dsa_pitch, self.ira = struct.unpack(estr+'5f', fi.read(5*4)) self.rx_polarization = struct.unpack(estr+'2f', fi.read(2*4)) self.tx_polarization = struct.unpack(estr+'2f', fi.read(2*4)) self.v_avg = struct.unpack(estr+'3f', fi.read(3*4)) self.apc_avg = struct.unpack(estr+'3f', fi.read(3*4)) self.averaging_time, self.dgta = struct.unpack(estr+'2f', fi.read(2*4)) velocity_y, velocity_z = struct.unpack(estr+'2I', fi.read(2*4)) self.velocity_y = _rescale_float(velocity_y, 1 << 16) self.velocity_z = _rescale_float(velocity_z, 1 << 16) self.ba, self.be = struct.unpack(estr+'2f', fi.read(2*4)) self.az_geom_corr, self.range_geom_corr, self.az_win_fac_bw, self.range_win_fac_bw = struct.unpack(estr+'2i2f', fi.read(4*4)) self.az_win_id = _get_string(fi.read(48)) self.range_win_id = _get_string(fi.read(48)) self.keep_out_viol_prcnt = struct.unpack(estr+'f', fi.read(4))[0] self.az_coeff = struct.unpack(estr+'6f', fi.read(6*4)) self.pos_uncert = struct.unpack(estr+'3f', fi.read(3*4)) self.nav_aiding_type = struct.unpack(estr+'i', fi.read(4))[0] self.two_dnl_phase_coeffs = struct.unpack(estr+'10f', fi.read(10*4)) self.clutter_snr_thresh = struct.unpack(estr+'f', fi.read(4))[0] self.elevation_coeff = struct.unpack(estr+'9f', fi.read(9*4)) self.monopulse_coeff = struct.unpack(estr+'12f', fi.read(12*4)) self.twist_pt_err_prcnt, self.tilt_pt_err_prcnt, self.az_pt_err_prcnt = struct.unpack(estr+'3f', fi.read(3*4)) sigma_n, self.take_num = struct.unpack(estr+'Ii', fi.read(2*4)) self.sigma_n = _rescale_float(sigma_n, 1 << 23) self.if_sar_flags = struct.unpack(estr+'5i', fi.read(5*4)) self.mu_threshold, self.gff_app_type = struct.unpack(estr+'fi', fi.read(2*4)) self.res7 = fi.read(8) class _BlockHeader_2(object):
MIT License
opentoontowntools/openleveleditor
toontown/toontowngui/ToontownLoadingBlocker.py
ToontownLoadingBlocker.__createLoadingText
python
def __createLoadingText(self): self.loadingText = DirectLabel( parent = self, relief = None, guiId = 'BlockerLoadingText', pos = (0, 0, -0.2357), text = "Loading...", text_fg = (1, 1, 1, 1), text_scale = 0.055, textMayChange = 1, text_align = TextNode.ACenter, sortOrder = 50, ) self.loadingTextList = TTLocalizer.BlockerLoadingTexts self.__changeLoadingText() taskMgr.doMethodLater(self.loadingTextChangeTimer, self.__changeLoadingTextTask, "changeLoadingTextTask")
Create a loading text. This will have crazy toony loading texts, irrelevant to what is being loaded.
https://github.com/opentoontowntools/openleveleditor/blob/250c0c9b8e4dde406a7a9b921db59deb7b960ca9/toontown/toontowngui/ToontownLoadingBlocker.py#L153-L178
from direct.directnotify import DirectNotifyGlobal from direct.gui.DirectGui import * from panda3d.core import TextNode from toontown.toonbase import ToontownGlobals from toontown.toonbase import TTLocalizer from toontown.toontowngui import TTDialog from otp.otpgui.OTPDialog import * from direct.interval.LerpInterval import LerpPosInterval, LerpScaleInterval, LerpFunc from direct.interval.IntervalGlobal import Sequence, Parallel, Func, Wait from direct.task import Task import random class ToontownLoadingBlocker(TTDialog.TTDialog): notify = DirectNotifyGlobal.directNotify.newCategory("ToontownLoadingBlocker") def __init__(self, avList): if not self.__shouldShowBlocker(avList): return TTDialog.TTDialog.__init__(self) gui = loader.loadModel("phase_3/models/gui/tt_m_gui_pat_mainGui") img = gui.find("**/tt_t_gui_pat_loadingPopup") self['image'] = img self['image_scale'] = (1, 0, 1) self['image_pos'] = (0, 0, -0.4) gui.removeNode() self.loadingTextChangeTimer = 10.0 self.loadingTextTimerVariant = 3.0 self.loadingTextFreezeTime = 3.0 self.hideBlockerIval = None self.canChangeLoadingText = True self.__setupLoadingBar() self.__createTitleText() self.__createToonTip() self.__createLoadingText() self.__showBlocker() self.accept("phaseComplete-4", self.__shrinkLoadingBar) self.accept("launcherPercentPhaseComplete", self.__update) def destroy(self): taskMgr.remove("changeLoadingTextTask") taskMgr.remove("canChangeLoadingTextTask") self.ignore("phaseComplete-4") self.ignore("launcherPercentPhaseComplete") self.__cleanupHideBlockerIval() self.title.destroy() self.title = None self.loadingText.destroy() self.loadingText = None self.loadingTextList = None self.toonTipText.destroy() self.toonTipText = None self.bar.destroy() self.bar = None TTDialog.TTDialog.destroy(self) def __hideBlocker(self): self.hide() if self.__isValidDownloadBar(): base.downloadWatcher.text.show() def __showBlocker(self): self.show() if self.__isValidDownloadBar(): base.downloadWatcher.text.hide() def __setupLoadingBar(self): self.bar = DirectWaitBar( parent = self, guiId = 'DownloadBlockerBar', pos = (0, 0, -0.3138), relief = DGG.SUNKEN, frameSize = (-0.6,0.6,-0.1,0.1), borderWidth = (0.02,0.02), scale = (0.8, 0.8, 0.5), range = 100, sortOrder = 5000, frameColor = (0.5,0.5,0.5,0.5), barColor = (0.2,0.7,0.2,0.5), text = "0%", text_scale = (0.08, 0.128), text_fg = (1, 1, 1, 1), text_align = TextNode.ACenter, text_pos = (0, -0.035), ) self.bar.setBin('gui-popup', 1) if self.__isValidDownloadBar(): base.downloadWatcher.bar.hide() def __resetLoadingBar(self): self.bar.clearBin() if self.__isValidDownloadBar(): base.downloadWatcher.bar.show() def __isValidDownloadBar(self): if hasattr(base, "downloadWatcher") and base.downloadWatcher: if hasattr(base.downloadWatcher, "bar") and base.downloadWatcher.bar: return True return False def __createTitleText(self): self.title = DirectLabel( parent = self, relief = None, guiId = 'BlockerTitle', pos = (0, 0, 0.38), text = TTLocalizer.BlockerTitle, text_font = ToontownGlobals.getSignFont(), text_fg = (1,0.9,0.1,1), text_align = TextNode.ACenter, text_scale = 0.1, textMayChange = 1, sortOrder = 50, )
MIT License
bastipaeltz/codedict
source/database.py
Database.retrieve_links
python
def retrieve_links(self, values, selection_type): try: with self._db_instance: if selection_type == 'open': selection = self._db_instance.execute(''' SELECT url from Links WHERE name = ? AND language = ? ''', (values['searchpattern'], values['language'])) return selection.fetchone() else: if selection_type == 'display': selection = self._db_instance.execute(''' SELECT name, url, language from Links WHERE name LIKE ? ''', (values['searchpattern'] + '%', )) elif selection_type == 'lang-display': selection = self._db_instance.execute(''' SELECT name, url from Links WHERE name LIKE ? AND language = ? ''', (values['searchpattern'] + '%', values['language'])) selection_list = selected_rows_to_list(selection) return selection_list except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1)
Retrieves links into Link table.
https://github.com/bastipaeltz/codedict/blob/5830cc277c5d0dbcd62d5d86217383fad4d0f207/source/database.py#L404-L435
from collections import namedtuple import sqlite3 import sys import os import shutil DumpEntry = namedtuple('DumpEntry', ['language', 'tags', 'problem', 'solution']) class Database(object): def __init__(self): self.db_path = determine_db_path() if not os.path.isdir(self.db_path): print "Building database." os.makedirs(self.db_path) self._db_instance = establish_db_connection(self.db_path + '/codedict_db.DB') self._setup_database() def _setup_database(self): if not self._db_instance: print "Error while reaching DB." sys.exit(1) if not self._create_tables(): print "Error while creating DB tables." sys.exit(1) def _create_tables(self): try: with self._db_instance: self._db_instance.execute(''' CREATE table IF NOT EXISTS Languages (id INTEGER PRIMARY KEY, language TEXT UNIQUE, suffix TEXT) ''') self._db_instance.execute(''' CREATE table IF NOT EXISTS Tags (id INTEGER PRIMARY KEY, name TEXT, language TEXT) ''') self._db_instance.execute(''' CREATE table IF NOT EXISTS ItemsToTags (id INTEGER PRIMARY KEY, tagID INTEGER, dictID INTEGER) ''') self._db_instance.execute(''' CREATE table IF NOT EXISTS Dictionary (id INTEGER PRIMARY KEY, language TEXT, problem TEXT, solution TEXT) ''') self._db_instance.execute(''' CREATE table IF NOT EXISTS Config (configItem TEXT PRIMARY KEY, value TEXT) ''') self._db_instance.execute(''' CREATE table IF NOT EXISTS Links (id INTEGER PRIMARY KEY, name TEXT, URL text, language TEXT) ''') return True except sqlite3.Error as error: print "A database error has ocurred: ", error return False def rollback(self): try: shutil.copy2(self.db_path + "/BACKUP_codedict_db.DB", self.db_path + "/codedict_db.DB") except (shutil.Error, IOError, OSError) as error: print "Error while rolling back database.\n", error sys.exit(1) def get_config_item(self, config_item): try: with self._db_instance: value = self._db_instance.execute(''' SELECT value from Config where configItem = ? ''', (config_item, )) return value.fetchone() except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1) def set_config_item(self, config_item, value): try: with self._db_instance: self._db_instance.execute(''' INSERT or IGNORE INTO Config (configItem, value) VALUES (?, ?) ''', (config_item, value, )) self._db_instance.execute(''' UPDATE Config SET value = ? WHERE configItem = ? ''', (value, config_item, )) except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1) def retrieve_suffix(self, lang_name): try: with self._db_instance: suffix = self._db_instance.execute(''' SELECT suffix from Languages where language = ? ''', (lang_name, )) return suffix.fetchone() except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1) def set_suffix(self, lang_name, suffix): try: with self._db_instance: self._db_instance.execute(''' INSERT or IGNORE into Languages (language, suffix) VALUES(?,?) ''', (lang_name, suffix)) self._db_instance.execute(''' UPDATE Languages SET suffix = ? WHERE language = ? ''', (suffix, lang_name, )) except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1) def delete_content(self, values): try: with self._db_instance: self._db_instance.execute(''' DELETE from Dictionary WHERE problem = ? AND language = (SELECT language from Languages where language = ?) ''', (values['problem'], values['language'])) except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1) def update_content(self, values): try: with self._db_instance: if values['attribute'] != 'link': self._db_instance.execute(''' UPDATE Dictionary SET {0} = ? WHERE problem = ? AND language = (SELECT language from Languages where language = ?) '''.format(values['attribute']), (values['data'], values['problem'], values['language'])) else: self._db_instance.execute(''' UPDATE Links SET {0} = ? WHERE problem = ? AND language = (SELECT id from Languages where language = ?) '''.format(values['attribute']), (values['data'], values['problem'], values['language'])) except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1) def get_full_dump(self, language, required_tags): required_tags = set(required_tags) try: with self._db_instance: all_problems = self._db_instance.execute( ''' SELECT id, language, problem, solution FROM Dictionary ''') results = [] for dict_id, language, problem, solution in all_problems.fetchall(): tag_results = self._db_instance.execute( ''' SELECT name FROM Tags INNER JOIN ItemsToTags ON ItemsToTags.tagID = Tags.id WHERE ItemsToTags.dictID = ? ''', (dict_id,)) tags = set(tag for (tag,) in tag_results) if required_tags <= tags: results.append(DumpEntry(language, tags, problem, solution)) return results except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1) def get_tags(self, values): try: with self._db_instance: if 'problem' in values: results = self._db_instance.execute( ''' SELECT name FROM Tags INNER JOIN ItemsToTags ON Tags.id = ItemsToTags.tagID WHERE dictID = (SELECT id from Dictionary where language = ? and problem = ?) and language = ? ''', (values['language'], values['problem'], values['language'])) else: results = self._db_instance.execute( ''' SELECT name FROM Tags where language = ? ''', (values['language'], )) return results.fetchall() except sqlite3.Error as error: print " A database error has ocurred.", error sys.exit(1) def update_tags(self, values, update_type='add'): try: with self._db_instance: if update_type == 'add': self._db_instance.execute(''' INSERT or IGNORE into Tags (name, language) VALUES (?, ?) ''', (values['tag_name'], values['language'])) self._db_instance.execute(''' INSERT or REPLACE into ItemsToTags (tagID, dictID) VALUES ( (SELECT id from Tags WHERE name = ? AND language = ?), (SELECT id from Dictionary WHERE problem = ? and language = ?) )''', (values['tag_name'], values['language'], values['problem'], values['language'])) else: self._db_instance.execute(''' DELETE from ItemsToTags WHERE dictID = (SELECT id from Dictionary WHERE problem = ? and language = ?) AND tagID = (SELECT id from Tags WHERE name = ? AND language = ?) ''', (values['problem'], values['language'], values['tag_name'], values['language'])) except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1) def delete_tag(self, values): try: with self._db_instance: self._db_instance.execute( ''' DELETE from Tags WHERE name = ? AND language = (SELECT language from Languages where language = ?) ''', (values['tag_name'], values['language'])) except sqlite3.Error as error: print "A database error has ocurred ", error sys.exit(1) def retrieve_dict_per_tags(self, values): try: with self._db_instance: results = self._db_instance.execute( ''' SELECT DISTINCT problem, solution FROM Dictionary INNER JOIN ItemsToTags On Dictionary.id = ItemsToTags.dictID INNER JOIN Tags On ItemsToTags.tagID = Tags.id WHERE Tags.language = ? and Tags.name LIKE ? ''', (values['language'], values['searchpattern'] + '%')) return selected_rows_to_list(results) except sqlite3.Error as error: print "A database error has ocurred ", error sys.exit(1) def upsert_links(self, values, operation_type='add'): try: with self._db_instance: self._db_instance.execute(''' INSERT OR IGNORE INTO Links (id, name, url, language) VALUES ((SELECT id from Links WHERE name = ? AND language = ?), ?, ?, ?) ''', (values['link_name'], values['original-lang'], values['link_name'], values['url'], values['language'])) if operation_type == 'upsert': self._db_instance.execute(''' UPDATE Links SET {0} = ? WHERE name = ? AND url = ? AND language = ? '''.format(values['attribute']), (values['data'], values['link_name'], values['url'], values['original-lang'])) except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1) def delete_links(self, values): try: with self._db_instance: self._db_instance.execute(''' DELETE from Links WHERE url = ? ''', (values['url'], )) except sqlite3.Error as error: print "A database error has ocurred: ", error sys.exit(1)
MIT License
tensorflow/transform
tensorflow_transform/annotators.py
object_tracker_scope
python
def object_tracker_scope(object_tracker: ObjectTracker): global _OBJECT_TRACKER assert _OBJECT_TRACKER is None _OBJECT_TRACKER = object_tracker try: yield finally: _OBJECT_TRACKER = None
A context to manage trackable objects. Collects all trackable objects annotated using `track_object` within the body of its scope. Args: object_tracker: The passed in ObjectTracker object Yields: A scope in which the object_tracker is active.
https://github.com/tensorflow/transform/blob/6349d7f6d847cb8979f31b9b315981d79ffba3e5/tensorflow_transform/annotators.py#L76-L95
import contextlib import os from typing import Callable, List, Optional import tensorflow as tf from tensorflow_transform.graph_context import TFGraphContext from tensorflow.python.framework import func_graph from tensorflow.python.framework import ops from tensorflow.python.training.tracking import base __all__ = ['annotate_asset', 'make_and_track_object'] _ASSET_KEY_COLLECTION = 'tft_asset_key_collection' _ASSET_FILENAME_COLLECTION = 'tft_asset_filename_collection' _OBJECT_TRACKER = None class ObjectTracker: __slots__ = ['_trackable_objects'] def __init__(self): self._trackable_objects = [] @property def trackable_objects(self) -> List[base.Trackable]: return self._trackable_objects def add_trackable_object(self, trackable_object: base.Trackable, name: Optional[str]): if name is None: self._trackable_objects.append(trackable_object) else: module = TFGraphContext.get_module_to_export() if module is None: raise RuntimeError( f'No module found to track {name} with. Check that the ' '`preprocessing_fn` is invoked within a `TFGraphContext` with a ' 'valid `TFGraphContext.module_to_export`.') if hasattr(module, name): raise ValueError( f'An object with name {name} is already being tracked. Check that a ' 'unique name was passed.') setattr(module, name, trackable_object) @contextlib.contextmanager
Apache License 2.0
maobubu/stock-prediction
scripts/ELSTM_v2/elstm.py
days
python
def days(emb, sequence_mask, news_mask, keep_prob, is_training, options, elmo_input, elmo_length, elmo): batch = tf.shape(emb)[0] day = tf.shape(emb)[1] new_s = tf.shape(emb)[2] word = tf.shape(emb)[3] word_level_inputs = tf.reshape(emb, [batch * day * new_s, word, options['dim_word']]) word_level_mask = tf.reshape(sequence_mask, [batch * day * new_s, word]) news_level_mask = tf.reshape(news_mask, [batch * day, new_s]) elmo_input = tf.reshape(elmo_input, [batch * day * new_s, word]) elmo_length = tf.reshape(elmo_length, [batch * day * new_s]) elmo_embedding = elmo(inputs={"tokens": elmo_input, "sequence_len": elmo_length}, signature="tokens", as_dict=True)["elmo"] word_encoder_out = bilstm_filter(word_level_inputs, word_level_mask, keep_prob, prefix='sequence_encode', dim=options['dim'], is_training=is_training) word_encoder_out = tf.concat([tf.concat(word_encoder_out, 2), elmo_embedding], 2) * tf.expand_dims(word_level_mask, -1) word_level_output = attention_v2(word_encoder_out, word_level_mask, name='word_attention', keep=keep_prob, r=10, is_training=is_training) ''' word_level_output = tf.reduce_sum(word_encoder_out * tf.expand_dims(word_level_mask, -1), 1) / tf.expand_dims( tf.reduce_sum(word_level_mask, 1) + 1e-8, 1) ''' if options['use_dropout']: word_level_output = layers.dropout(word_level_output, keep_prob=keep_prob, is_training=is_training, seed=None) news_level_input = tf.reshape(word_level_output, [batch * day, new_s, 2 * options['dim'] + 1024]) news_level_input = news_level_input * tf.expand_dims(news_level_mask, -1) news_level_output = attention_v2(news_level_input, news_level_mask, name='news_attention', keep=keep_prob, r=10, is_training=is_training) day_level_output = tf.reshape(news_level_output, [batch, day, 2 * options['dim'] + 1024]) return day_level_output
word_level_output = tf.reduce_sum(word_encoder_out * tf.expand_dims(word_level_mask, -1), 1) / tf.expand_dims( tf.reduce_sum(word_level_mask, 1) + 1e-8, 1)
https://github.com/maobubu/stock-prediction/blob/b2442ccb027c25809a33a610f010cdec077bf61a/scripts/ELSTM_v2/elstm.py#L191-L236
import os from collections import defaultdict os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1" import numpy numpy.random.seed(1) import tensorflow as tf import logging import math from tensorflow import logging as log from tensorflow.python import debug as tf_debug from collections import OrderedDict from data_iterator_tensor import TextIterator from tensorflow.contrib import rnn import tensorflow.contrib.layers as layers import warnings import pickle as pkl import sys import pprint import pdb import os import copy import time import pickle import tensorflow_hub as hub logger = logging.getLogger(__name__) def _s(pp, name): return '{}_{}'.format(pp, name) def load_params(path, params): pp = numpy.load(path) for kk, vv in params.iteritems(): if kk not in pp: warnings.warn('{} is not in the archive'.format(kk)) continue params[kk] = pp[kk] return params def xavier_init(fan_in, fan_out, constant=1): low = -constant * numpy.sqrt(6.0 / (fan_in + fan_out)) high = constant * numpy.sqrt(6.0 / (fan_in + fan_out)) W = numpy.random.uniform(low=low, high=high, size=(fan_in, fan_out)) return W.astype('float32') def ortho_weight(ndim): W = numpy.random.randn(ndim, ndim) u, s, v = numpy.linalg.svd(W) return u.astype('float32') def norm_weight(nin, nout=None, scale=0.01, ortho=True): if nout is None: nout = nin if nout == nin and ortho: W = ortho_weight(nin) else: W = scale * numpy.random.randn(nin, nout) return W.astype('float32') def prepare_data(sequence, sequence_d1, sequence_d2, labels, options, maxlen=None, max_word=100): length, length_d1, length_d2 = [], [], [] for i, d1, d2 in zip(sequence, sequence_d1, sequence_d2): dd1, dd2 = list(), list() length.append(len(i)) for day in d1: dd1.append(len(day)) length_d1.append(dd1) for day in d2: dd2.append(len(day)) length_d2.append(dd2) if maxlen is not None: new_sequence = [] new_lengths = [] new_sequence_d1 = [] new_lengths_d1 = [] new_sequence_d2 = [] new_lengths_d2 = [] for l, s, ld1, sd1, ld2, sd2 in zip(length, sequence, length_d1, sequence_d1, length_d2, sequence_d2): dd1, lld1, dd2, lld2 = list(), list(), list(), list() if l < maxlen: new_sequence.append(s) new_lengths.append(l) for i, j in zip(ld1, sd1): if i < maxlen: dd1.append(j) lld1.append(i) new_sequence_d1.append(dd1) new_lengths_d1.append(lld1) for i, j in zip(ld2, sd2): if i < maxlen: dd2.append(j) lld2.append(i) new_sequence_d2.append(dd2) new_lengths_d2.append(lld2) length = new_lengths sequence = new_sequence length_d1 = new_lengths_d1 sequence_d1 = new_sequence_d1 length_d2 = new_lengths_d2 sequence_d2 = new_sequence_d2 if len(length) < 1: return None, None, None, None, None, None, None, None day1 = options['delay1'] - 1 day2 = options['delay2'] - options['delay1'] maxlen_x = numpy.max(length) try: maxlen_xd1 = numpy.max([numpy.max(i) for i in length_d1]) maxlen_xd2 = numpy.max([numpy.max(i) for i in length_d2]) except ValueError as e: print(str(e)) maxlen_xd1 = 100 maxlen_xd2 = 100 n_samples = len(sequence) max_sequence = max(len(j) for i in sequence for j in i) max_sequence_d1 = max(len(j) for i in sequence_d1 for z in i for j in z) max_sequence_d2 = max(len(j) for i in sequence_d2 for z in i for j in z) max_sequence = max_word if max_sequence > max_word else max_sequence max_sequence_d1 = max_word if max_sequence_d1 > max_word else max_sequence_d1 max_sequence_d2 = max_word if max_sequence_d2 > max_word else max_sequence_d2 x = numpy.zeros((n_samples, maxlen_x, max_sequence)).astype('int64') x_mask = numpy.zeros((n_samples, maxlen_x)).astype('float32') x_d1 = numpy.zeros((n_samples, day1, maxlen_xd1, max_sequence_d1)).astype('int64') x_d1_mask = numpy.zeros((n_samples, day1, maxlen_xd1)).astype('float32') x_d2 = numpy.zeros((n_samples, day2, maxlen_xd2, max_sequence_d2)).astype('int64') x_d2_mask = numpy.zeros((n_samples, day2, maxlen_xd2)).astype('float32') final_mask = numpy.ones((n_samples, 1 + day1 + day2)).astype('float32') l = numpy.zeros((n_samples,)).astype('int64') for index, (i, j, k, ll) in enumerate(zip(sequence, sequence_d1, sequence_d2, labels)): l[index] = ll for idx, ss in enumerate(i): if len(ss) < max_sequence: x[index, idx, :len(ss)] = ss else: x[index, idx, :max_sequence] = ss[:max_sequence] x_mask[index, idx] = 1. for jj, day in enumerate(j): for idx, ss in enumerate(day): if len(ss) < max_sequence_d1: x_d1[index, jj, idx, :len(ss)] = ss else: x_d1[index, jj, idx, :max_sequence_d1] = ss[:max_sequence_d1] x_d1_mask[index, jj, idx] = 1. for jj, day in enumerate(k): for idx, ss in enumerate(day): if len(ss) < max_sequence_d2: x_d2[index, jj, idx, :len(ss)] = ss else: x_d2[index, jj, idx, :max_sequence_d2] = ss[:max_sequence_d2] x_d2_mask[index, jj, idx] = 1. ''' haha = numpy.absolute(numpy.sign(x)) hehe = numpy.absolute(numpy.sign(x_d1)) jiji = numpy.absolute(numpy.sign(x_d2)) ''' return x, x_mask, x_d1, x_d1_mask, x_d2, x_d2_mask, l, final_mask
Apache License 2.0
zmcddn/data-science-helper
dshelper/data/utils.py
reduce_mem_usage
python
def reduce_mem_usage(df): for col in df.columns: col_type = df[col].dtype if col_type != object: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == "int": if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if ( c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max ): df[col] = df[col].astype(np.float16) elif ( c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max ): df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) return df
Iterate through all the columns of a dataframe and modify the data type to reduce memory usage.
https://github.com/zmcddn/data-science-helper/blob/99a876e0926b6af123207969c6f4ca1fce5369da/dshelper/data/utils.py#L4-L39
import numpy as np
MIT License
ioshchepkov/pygeoid
pygeoid/constants/iers2010.py
l2_shida_number
python
def l2_shida_number(lat: _u.deg = None) -> _u.dimensionless_unscaled: if lat is not None: return l2 + 0.0002 * (3 * _np.sin(lat)**2 - 1) / 2 else: return l2
Return degree 2 Shida number (l2,0). If `lat` is None, the nominal degree 2 Shida number l2=0.0847 will be returned. Parameters ---------- lat : ~astropy.units.Quantity, optional Geocentric (spherical) latitude. If given, a small latitude dependence will be considered. Returns ------- l2 : ~astropy.units.Quantity Nominal degree 2 Shida number. Notes ----- References ---------- .. [1] IERS Conventions(2010), section 7.1.1, page 105.
https://github.com/ioshchepkov/pygeoid/blob/86b29333cb9f3c5543983da6fbfc7923da8818d8/pygeoid/constants/iers2010.py#L209-L237
import numpy as _np import astropy.units as _u from astropy.constants import Constant as _Constant G = _Constant( abbrev='G', name='Constant of gravitation', value=6.67428e-11, unit='m**3 / (kg * s**2)', uncertainty=6.7e-15, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') L_G = _Constant( abbrev='L_G', name='1 - d(TT)/d(TCG)', value=6.969290134e-10, unit='', uncertainty=0, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') GM_sun = _Constant( abbrev='GM_sun', name='Heliocentric gravitational constant', value=1.32712442099e20, unit='m**3 / s**2', uncertainty=1e10, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') GM_earth_tcg = _Constant( abbrev='GM_earth', name='Geocentric gravitational constant (TCG-compatible)', value=3.986004418e14, unit='m**3 / s**2', uncertainty=8e15, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') GM_earth_tt = _Constant( abbrev='GM_earth', name='Geocentric gravitational constant (TT-compatible)', value=3.986004415e14, unit='m**3 / s**2', uncertainty=8e15, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') a = _Constant( abbrev='a', name='Equatorial radius of the Earth', value=6378136.6, unit='m', uncertainty=0.1, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') J2_earth = _Constant( abbrev='J2_earth', name='Dynamical form factor of the Earth', value=0.0010826359, unit='', uncertainty=1e-10, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') rf = _Constant( abbrev='rf', name='Flattening factor of the Earth', value=298.25642, unit='', uncertainty=0.00001, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') ge = _Constant( abbrev='ge', name='Mean equatorial gravity', value=9.7803278, unit='m / s**2', uncertainty=0.00001, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') W0 = _Constant( abbrev='W0', name='Potential of the geoid', value=62636856.0, unit='m**2 / s**2', uncertainty=0.5, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') R0 = _Constant( abbrev='R0', name='Geopotential scale factor (GM/W0)', value=6363672.6, unit='m', uncertainty=0.1, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') H = _Constant( abbrev='H', name='Dynamical flattening', value=3273795e-9, unit='', uncertainty=1e-9, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') def tcg_to_tt(x): return x * (1 - L_G) k2 = _Constant( abbrev='k2', name='Nominal degree 2 Love number k2', value=0.29525, unit='', uncertainty=0, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') h2 = _Constant( abbrev='h2', name='Nominal degree 2 Love number h2', value=0.6078, unit='', uncertainty=0, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') l2 = _Constant( abbrev='l2', name='Nominal degree 2 Shida number l2', value=0.0847, unit='', uncertainty=0, reference='IERS Conventions(2010), ' 'IERS Technical Note 36, ' 'Verlagdes Bundesamts für Kartographie und Geodäsie, ' 'Frankfurt am Main, Germany.') DEGREE2_LOVE_NUMBERS = {'k': k2, 'l': l2, 'h': h2}
MIT License
openstack/watcher-dashboard
watcher_dashboard/api/watcher.py
Audit.create
python
def create(cls, request, audit_template_uuid, audit_type, name=None, auto_trigger=False, interval=None): if interval: return watcherclient(request).audit.create( audit_template_uuid=audit_template_uuid, audit_type=audit_type, auto_trigger=auto_trigger, interval=interval, name=name) else: return watcherclient(request).audit.create( audit_template_uuid=audit_template_uuid, audit_type=audit_type, auto_trigger=auto_trigger, name=name)
Create an audit in Watcher :param request: request object :type request: django.http.HttpRequest :param audit_template_uuid: related audit template UUID :type audit_template_uuid: string :param audit_type: audit type :type audit_type: string :param interval: Audit interval (default: None) :type interval: int :param name: Name for this audit :type name: string :return: the created Audit object :rtype: :py:class:`~.Audit`
https://github.com/openstack/watcher-dashboard/blob/4192182302a9f81b2cac5aff484161afc776c750/watcher_dashboard/api/watcher.py#L65-L96
import logging from django.conf import settings from django.utils.translation import ugettext_lazy as _ from openstack_dashboard.api import base from watcherclient import client as wc from watcher_dashboard.utils import errors as errors_utils LOG = logging.getLogger(__name__) WATCHER_SERVICE = 'infra-optim' def watcherclient(request, password=None): api_version = "1" insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False) ca_file = getattr(settings, 'OPENSTACK_SSL_CACERT', None) insert_watcher_policy_file() endpoint = base.url_for(request, WATCHER_SERVICE) LOG.debug('watcherclient connection created using token "%s" and url "%s"' % (request.user.token.id, endpoint)) client = wc.get_client( api_version, watcher_url=endpoint, insecure=insecure, ca_file=ca_file, username=request.user.username, password=password, os_auth_token=request.user.token.id ) return client def insert_watcher_policy_file(): policy_files = getattr(settings, 'POLICY_FILES', {}) policy_files['infra-optim'] = 'watcher_policy.json' setattr(settings, 'POLICY_FILES', policy_files) class Audit(base.APIDictWrapper): _attrs = ('uuid', 'name', 'created_at', 'modified_at', 'deleted_at', 'state', 'audit_type', 'audit_template_uuid', 'audit_template_name', 'interval') def __init__(self, apiresource, request=None): super(Audit, self).__init__(apiresource) self._request = request @classmethod
Apache License 2.0
eigenmagic/twitforget
likesforget.py
augment_args
python
def augment_args(args): cp = ConfigParser.SafeConfigParser() cp.read(os.path.expanduser(args.config)) try: keeplist = cp.get('twitter', 'keeplikes') keeplist = [int(x) for x in keeplist.split()] log.debug('keeplist: %s', keeplist) if args.keeplist is not None: args.keeplist.extend(keeplist) else: args.keeplist = keeplist log.debug('args: %s', args.keeplist) except ConfigParser.NoOptionError: log.debug("No such option.") pass return args
Augment commandline arguments with config file parameters
https://github.com/eigenmagic/twitforget/blob/fb9ec481238837d380b191da6e1eeaf663719b06/likesforget.py#L541-L560
import sys import os.path import argparse import ConfigParser from more_itertools import chunked import arrow import zipfile import json import twitter import time import sqlite3 import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') log = logging.getLogger('likesforget') import pprint SQLDATE_FMT = 'ddd MMM DD hh:mm:ss Z YYYY' LIKES_DATAFILE = 'data/like.js' class TweetCache(object): def __init__(self, filename): log.debug("Opening database file: %s", filename) self.conn = sqlite3.connect(filename) self.conn.row_factory = sqlite3.Row c = self.conn.cursor() c.execute("""SELECT name FROM sqlite_master WHERE type='table' AND name='likes'""") result = c.fetchone() if result is None: self.create_schema() def create_schema(self): c = self.conn.cursor() c.execute("""CREATE TABLE likes (id integer UNIQUE, screen_name text, created_at datetime, content_text text, deleted bool)""") self.conn.commit() def __len__(self): c = self.conn.cursor() c.execute("SELECT count(*) FROM likes") result = c.fetchone()[0] log.debug("There are %d likes in the cache.", result) return result def get_deleted_count(self): c = self.conn.cursor() c.execute("SELECT count(*) FROM likes WHERE deleted = ?", (True,)) result = c.fetchone()[0] log.debug("There are %d deleted likes in the cache.", result) return result def __delitem__(self, tweetid): log.debug("Deleting like id %d from cache...", tweetid) c = self.conn.cursor() c.execute("DELETE FROM likes WHERE id = ?", (tweetid,)) self.conn.commit() return def mark_deleted(self, tweetid): log.debug("Marking like id %id as deleted in cache...", tweetid) c = self.conn.cursor() c.execute("UPDATE likes SET deleted = ? WHERE id = ?", (True, tweetid,)) self.conn.commit() return def save_likes(self, username, likes): c = self.conn.cursor() valset = [] for item in likes: if 'fullText' in item: if 'created_at' not in item: valset.append( (item['tweetId'], username, None, item['fullText'], False, ) ) else: valset.append( (item['tweetId'], username, item['created_at'], item['fullText'], False, ) ) else: valset.append( (item['id'], username, item['created_at'], item['text'], False, ) ) c.executemany("""INSERT OR IGNORE INTO likes (id, screen_name, created_at, content_text, deleted) VALUES (?, ?, ?, ?, ?)""", valset) self.conn.commit() log.debug("Saved %d likes into database.", len(valset)) def get_min_id(self, undeleted=False, ignoreids=None): c = self.conn.cursor() if undeleted: query = "SELECT min(id) FROM likes WHERE deleted = False" if ignoreids is not None: log.debug("Ignoring likes: %s", ignoreids) idlist = ','.join([ '%d' % x for x in ignoreids ]) log.debug(idlist) query += " AND id NOT IN (%s)" % idlist c.execute(query) else: c.execute("SELECT min(id) FROM likes") res = c.fetchone()[0] log.debug("minimum id is: %d", res) return res def get_max_id(self, undeleted=False): c = self.conn.cursor() if undeleted: c.execute("SELECT max(id) FROM likes WHERE deleted = ?", (True,)) else: c.execute("SELECT max(id) FROM likes") res = c.fetchone()[0] log.debug("max id is: %d", res) return res def get_destroy_set_keepnum(self, keepnum, deletemax=None): c = self.conn.cursor() QUERY = """SELECT * FROM likes WHERE id NOT IN (SELECT id FROM likes ORDER BY id DESC LIMIT ?) AND deleted IS NOT ? ORDER BY id ASC """ PARAMS = [keepnum, True] if deletemax is not None: QUERY += " LIMIT ?" PARAMS.append(deletemax) c.execute(QUERY, PARAMS) result = c.fetchall() return result def get_destroy_set_beforedays(self, beforedays, deletemax=None): c = self.conn.cursor() QUERY = """SELECT * FROM likes WHERE deleted IS NOT ? AND created_at IS NOT NULL ORDER BY id ASC """ PARAMS = [True] c.execute(QUERY, PARAMS) result = c.fetchall() beforedate = arrow.now().shift(days=-beforedays) destroy_set = [] for i, row in enumerate(result): if arrow.get(row['created_at'], SQLDATE_FMT) < beforedate: destroy_set.append(row) if deletemax and i+1 >= deletemax: break return destroy_set def get_destroy_set_dates(self, date_before, date_after=None, deletemax=None): c = self.conn.cursor() QUERY = """SELECT * FROM likes WHERE deleted IS NOT ? AND created_at IS NOT NULL ORDER BY id ASC """ PARAMS = [True] c.execute(QUERY, PARAMS) result = c.fetchall() destroy_set = [] for i, row in enumerate(result): tweet_date = arrow.get(row['created_at'], SQLDATE_FMT) if tweet_date < date_before: if date_after: if tweet_date > date_after: destroy_set.append(row) else: destroy_set.append(row) if deletemax and i+1 >= deletemax: break return destroy_set def get_destroy_nodate(self, keepnum, deletemax=None): c = self.conn.cursor() QUERY = """SELECT * FROM likes WHERE created_at IS NULL AND deleted IS NOT ? ORDER BY id ASC """ PARAMS = [True,] if deletemax is not None: QUERY += " LIMIT ?" PARAMS.append(deletemax) c.execute(QUERY, PARAMS) result = c.fetchall() return result def load_tweetcache(args): tweetcache = TweetCache(os.path.expanduser(args.tweetcache)) return tweetcache def fetch_all_likes(tw, args, tweetcache): username = args.userids[0] tweetcache = get_old_likes(tw, username, args, tweetcache) tweetcache = get_new_likes(tw, username, args, tweetcache) return tweetcache def get_new_likes(tw, username, args, tweetcache): fetching = True log.debug("Fetching new likes...") while(fetching): known_max_id = tweetcache.get_max_id(undeleted=False) log.debug("Getting likes since %s ...", known_max_id) likes = tw.favorites.list(screen_name=username, count=args.batchsize, since_id=known_max_id, include_entities=False, ) log.debug("Fetched %d likes.", len(likes)) if likes == []: log.debug("No more recent likes to fetch.") fetching = False break else: tweetcache.save_likes(username, likes) sleeptime = 60 / args.searchlimit log.debug("sleeping for %s seconds...", sleeptime) time.sleep(sleeptime) return tweetcache def get_old_likes(tw, username, args, tweetcache): fetching = True known_min_id = None while(fetching): if len(tweetcache) == 0: log.debug("Fetching first set of %d likes...", args.batchsize) likes = tw.favorites.list(screen_name=username, count=args.batchsize, include_entities=False, ) else: log.debug("There are %d likes, %d deleted", len(tweetcache), tweetcache.get_deleted_count()) min_id = tweetcache.get_min_id(undeleted=True, ignoreids=args.keeplist) if known_min_id == min_id: log.debug("Didn't find any new likes. All done.") break known_min_id = min_id log.debug("Fetching %d likes before tweet id: %s ...", args.batchsize, known_min_id - 1) likes = tw.favorites.list(screen_name=username, count=args.batchsize, max_id=known_min_id - 1, include_entities=False, ) log.debug("Fetched %d likes.", len(likes)) if likes == []: log.debug("No more old likes to fetch.") fetching = False break else: tweetcache.save_likes(username, likes) sleeptime = 60 / args.searchlimit log.debug("sleeping for %s seconds...", sleeptime) time.sleep(sleeptime) return tweetcache def get_destroy_set(args): if args.delete_nodate: log.debug("Using NULL created_at mode.") destroyset = tweetcache.get_destroy_nodate(args.deletemax) elif args.date_before is not None: log.debug("Using date based mode.") destroyset = tweetcache.get_destroy_set_dates(args.date_before, args.date_after, args.deletemax) elif args.beforedays is not None: log.debug("Using days before mode.") destroyset = tweetcache.get_destroy_set_beforedays(args.beforedays, args.deletemax) else: log.debug("Using number to keep mode. Keeping %d.", args.keepnum) destroyset = tweetcache.get_destroy_set_keepnum(args.keepnum, args.deletemax) return destroyset def destroy_likes(tw, args, tweetcache): destroyset = get_destroy_set(args) log.debug("Need to destroy %d likes.", len(destroyset)) for idx, item in enumerate(destroyset): log.debug("Destroying like id %s: %s", item['id'], item['content_text']) try: if args.keeplist is not None and item['id'] in args.keeplist: log.debug("Not deleting like: %d", item['id']) continue if not args.dryrun: gone_item = tw.favorites.destroy(_id=item['id']) tweetcache.mark_deleted(item['id']) log.debug("Gone like %s: %s", gone_item['id'], gone_item['text']) else: log.debug("Like not actually deleted.") except twitter.api.TwitterHTTPError, e: log.debug("Response: %s", e.response_data) errors = e.response_data['errors'] log.debug("errors: %s", errors) if len(errors) == 1: if errors[0]['code'] == 144: log.warn("Tweet with this id doesn't exist. Possibly stale cache entry. Removing.") tweetcache.mark_deleted(item['id']) elif errors[0]['code'] == 179: log.warn("Not authorised to delete like: [%s] %s", item['id'], item['content_text']) log.info("Probably a tweet that got deleted by original author. Stale cache entry. Removing.") tweetcache.mark_deleted(item['id']) elif errors[0]['code'] == 34: log.warn("Page doesn't exist for: [%s] %s", item['id'], item['content_text']) log.info("Probably a tweet that got deleted by original author. Stale cache entry. Removing.") tweetcache.mark_deleted(item['id']) elif errors[0]['code'] == 63: log.warn("User you retweeted got suspended. Removing cache entry.") tweetcache.mark_deleted(item['id']) else: log.critical("Unhandled response from Twitter for: [%s] %s", item['id'], item['content_text']) raise else: log.critical("Unhandled response from Twitter for: [%s] %s", item['id'], item['content_text']) raise log.info("Tweet %d of %d destroyed.", idx+1, len(destroyset)) del_sleeptime = 60 / args.deletelimit log.debug("sleeping for %s seconds...", del_sleeptime) time.sleep(del_sleeptime) return tweetcache def import_twitter_archive(tw, args, tweetcache): log.info("Importing twitter archive from %s", args.importfile) with zipfile.ZipFile(args.importfile) as ark: with ark.open(LIKES_DATAFILE, 'r') as twdf: log.debug("Importing likes from archive...") jsdata = twdf.readlines() jsdata[0] = jsdata[0][ jsdata[0].index('['): ] likeset = [ x['like'] for x in json.loads(''.join(jsdata)) ] decorate_with_tweetdate(tw, args, tweetcache, likeset) return tweetcache def decorate_with_tweetdate(tw, args, tweetcache, likeset): log.info("Decorating likes with created time for %d likes...", len(likeset)) sleeptime = 15 * 60 / 900 for likebatch in chunked(likeset, 100): log.debug("Fetching data for batch of %d...", len(likebatch)) likedict = {x['tweetId']: x for x in likebatch} tweet_ids = ','.join([x['tweetId'] for x in likebatch]) result = tw.statuses.lookup(_id=tweet_ids) for res in result: likedict[res['id_str']]['created_at'] = res['created_at'] tweetcache.save_likes(args.userids[0], likedict.values()) log.debug("sleeping for %s seconds...", sleeptime) time.sleep(sleeptime) log.debug("Completed data fetching.") return likedict.values()
MIT License
kuri65536/python-for-android
python-modules/twisted/twisted/internet/_threadedselect.py
ThreadedSelectReactor.addWriter
python
def addWriter(self, writer): self._sendToThread(self.writes.__setitem__, writer, 1) self.wakeUp()
Add a FileDescriptor for notification of data available to write.
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/internet/_threadedselect.py#L311-L315
from __future__ import generators from threading import Thread from Queue import Queue, Empty from time import sleep import sys from zope.interface import implements from twisted.internet.interfaces import IReactorFDSet from twisted.internet import error from twisted.internet import posixbase from twisted.python import log, failure, threadable from twisted.persisted import styles from twisted.python.runtime import platformType import select from errno import EINTR, EBADF from twisted.internet.selectreactor import _select _NO_FILENO = error.ConnectionFdescWentAway('Handler has no fileno method') _NO_FILEDESC = error.ConnectionFdescWentAway('Filedescriptor went away') def dictRemove(dct, value): try: del dct[value] except KeyError: pass def raiseException(e): raise e class ThreadedSelectReactor(posixbase.PosixReactorBase): implements(IReactorFDSet) def __init__(self): threadable.init(1) self.reads = {} self.writes = {} self.toThreadQueue = Queue() self.toMainThread = Queue() self.workerThread = None self.mainWaker = None posixbase.PosixReactorBase.__init__(self) self.addSystemEventTrigger('after', 'shutdown', self._mainLoopShutdown) def wakeUp(self): self.waker.wakeUp() def callLater(self, *args, **kw): tple = posixbase.PosixReactorBase.callLater(self, *args, **kw) self.wakeUp() return tple def _sendToMain(self, msg, *args): self.toMainThread.put((msg, args)) if self.mainWaker is not None: self.mainWaker() def _sendToThread(self, fn, *args): self.toThreadQueue.put((fn, args)) def _preenDescriptorsInThread(self): log.msg("Malformed file descriptor found. Preening lists.") readers = self.reads.keys() writers = self.writes.keys() self.reads.clear() self.writes.clear() for selDict, selList in ((self.reads, readers), (self.writes, writers)): for selectable in selList: try: select.select([selectable], [selectable], [selectable], 0) except: log.msg("bad descriptor %s" % selectable) else: selDict[selectable] = 1 def _workerInThread(self): try: while 1: fn, args = self.toThreadQueue.get() fn(*args) except SystemExit: pass except: f = failure.Failure() self._sendToMain('Failure', f) def _doSelectInThread(self, timeout): reads = self.reads writes = self.writes while 1: try: r, w, ignored = _select(reads.keys(), writes.keys(), [], timeout) break except ValueError, ve: log.err() self._preenDescriptorsInThread() except TypeError, te: log.err() self._preenDescriptorsInThread() except (select.error, IOError), se: if se.args[0] in (0, 2): if (not reads) and (not writes): return else: raise elif se.args[0] == EINTR: return elif se.args[0] == EBADF: self._preenDescriptorsInThread() else: raise self._sendToMain('Notify', r, w) def _process_Notify(self, r, w): reads = self.reads writes = self.writes _drdw = self._doReadOrWrite _logrun = log.callWithLogger for selectables, method, dct in ((r, "doRead", reads), (w, "doWrite", writes)): for selectable in selectables: if selectable not in dct: continue _logrun(selectable, _drdw, selectable, method, dct) def _process_Failure(self, f): f.raiseException() _doIterationInThread = _doSelectInThread def ensureWorkerThread(self): if self.workerThread is None or not self.workerThread.isAlive(): self.workerThread = Thread(target=self._workerInThread) self.workerThread.start() def doThreadIteration(self, timeout): self._sendToThread(self._doIterationInThread, timeout) self.ensureWorkerThread() msg, args = self.toMainThread.get() getattr(self, '_process_' + msg)(*args) doIteration = doThreadIteration def _interleave(self): while self.running: self.runUntilCurrent() t2 = self.timeout() t = self.running and t2 self._sendToThread(self._doIterationInThread, t) yield None msg, args = self.toMainThread.get_nowait() getattr(self, '_process_' + msg)(*args) def interleave(self, waker, *args, **kw): self.startRunning(*args, **kw) loop = self._interleave() def mainWaker(waker=waker, loop=loop): waker(loop.next) self.mainWaker = mainWaker loop.next() self.ensureWorkerThread() def _mainLoopShutdown(self): self.mainWaker = None if self.workerThread is not None: self._sendToThread(raiseException, SystemExit) self.wakeUp() try: while 1: msg, args = self.toMainThread.get_nowait() except Empty: pass self.workerThread.join() self.workerThread = None try: while 1: fn, args = self.toThreadQueue.get_nowait() if fn is self._doIterationInThread: log.msg('Iteration is still in the thread queue!') elif fn is raiseException and args[0] is SystemExit: pass else: fn(*args) except Empty: pass def _doReadOrWrite(self, selectable, method, dict): try: why = getattr(selectable, method)() handfn = getattr(selectable, 'fileno', None) if not handfn: why = _NO_FILENO elif handfn() == -1: why = _NO_FILEDESC except: why = sys.exc_info()[1] log.err() if why: self._disconnectSelectable(selectable, why, method == "doRead") def addReader(self, reader): self._sendToThread(self.reads.__setitem__, reader, 1) self.wakeUp()
Apache License 2.0
victorca25/basicsr
codes/scripts/color_transfer.py
SOTransfer
python
def SOTransfer(source, target, steps=10, batch_size=5, reg_sigmaXY=16.0, reg_sigmaV=5.0, clip=False): source = read_image(source).astype("float32") target = read_image(target).astype("float32") if not np.issubdtype(source.dtype, np.floating): raise ValueError("source value must be float") if not np.issubdtype(target.dtype, np.floating): raise ValueError("target value must be float") target = expand_img(image=target) source = expand_img(image=source) if source.shape != target.shape: source = scale_img(source, target) target_dtype = target.dtype h,w,c = target.shape new_target = target.copy() for step in range (steps): advect = np.zeros ((h*w,c), dtype=target_dtype) for batch in range (batch_size): dir = np.random.normal(size=c).astype(target_dtype) dir /= np.linalg.norm(dir) projsource = np.sum(new_target*dir, axis=-1).reshape((h*w)) projtarget = np.sum(source*dir, axis=-1).reshape((h*w)) idSource = np.argsort(projsource) idTarget = np.argsort(projtarget) a = projtarget[idTarget]-projsource[idSource] for i_c in range(c): advect[idSource,i_c] += a * dir[i_c] new_target += advect.reshape((h,w,c)) / batch_size new_target = _scale_array(new_target, clip=clip) if reg_sigmaXY != 0.0: target_diff = new_target-target new_target = target + cv2.bilateralFilter (target_diff, 0, reg_sigmaV, reg_sigmaXY) return new_target.astype("uint8")
Color Transform via Sliced Optimal Transfer, ported by @iperov https://dcoeurjo.github.io/OTColorTransfer source - any float range any channel image target - any float range any channel image, same shape as src steps - number of solver steps batch_size - solver batch size reg_sigmaXY - apply regularization and sigmaXY of filter, otherwise set to 0.0 reg_sigmaV - sigmaV of filter return value
https://github.com/victorca25/basicsr/blob/62cf668ebe35b1b0c9d573b500e129f94430ab5a/codes/scripts/color_transfer.py#L547-L605
import cv2 import numpy as np import argparse import os def read_image(image): if isinstance(image, str): return cv2.imread(image, cv2.IMREAD_COLOR) elif isinstance(image, np.ndarray): return image else: raise ValueError("Unexpected image type. Either a path or a np.ndarray are supported") def scale_img(source=None, target=None): width = int(target.shape[1]) height = int(target.shape[0]) dim = (width, height) return cv2.resize(source, dim, interpolation = cv2.INTER_AREA) def expand_img(image=None): if len(image.shape) < 3: return image[:,:,np.newaxis] else: return image def _imstats(image, calc='direct'): if calc == 'reshape': image = image.astype("float32").reshape(-1, 3).T mu = np.mean(image, axis=1, keepdims=False) sigma = np.std(image, axis=1, keepdims=False) elif calc == 'direct': mu = np.mean(image, axis=(0, 1), keepdims=True) sigma = np.std(image, axis=(0, 1), keepdims=True) elif calc == 'split': (l, a, b) = cv2.split(image) (lMean, lStd) = (l.mean(), l.std()) (aMean, aStd) = (a.mean(), a.std()) (bMean, bStd) = (b.mean(), b.std()) mu = [lMean, aMean, bMean] sigma = [lStd, aStd, bStd] return (mu, sigma) def _scale_array(arr, clip=True, new_range=(0, 255)): if clip: scaled = np.clip(arr, new_range[0], new_range[1]) else: scale_range = (max([arr.min(), new_range[0]]), min([arr.max(), new_range[1]])) scaled = _min_max_scale(arr, new_range=new_range) return scaled def _min_max_scale(arr, new_range=(0, 255)): mn = arr.min() mx = arr.max() if mn < new_range[0] or mx > new_range[1]: scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0] else: scaled = arr return scaled def im2double(im): if im.dtype == 'uint8': out = im.astype('float') / 255 elif im.dtype == 'uint16': out = im.astype('float') / 65535 elif im.dtype == 'float': out = im else: assert False out = np.clip(out, 0, 1) return out def bgr2ycbcr(img, only_y=True): in_img_type = img.dtype img_ = img.astype(np.float32) if in_img_type != np.uint8: img_ *= 255. if only_y: rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0 else: rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] if in_img_type == np.uint8: rlt = rlt.round() else: rlt /= 255. return rlt.astype(in_img_type) def ycbcr2rgb_(img): in_img_type = img.dtype img_ = img.astype(np.float32) if in_img_type != np.uint8: img_ *= 255. rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] np.putmask(rlt, rlt > 255, 255) np.putmask(rlt, rlt < 0, 0) if in_img_type == np.uint8: rlt = rlt.round() else: rlt /= 255. return rlt.astype(in_img_type) def ycbcr2rgb(img, only_y=True): in_img_type = img.dtype img_ = img.astype(np.float32) if in_img_type != np.uint8: img_ *= 255. mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]]) mat = np.linalg.inv(mat.T) * 255 offset = np.array([[[16, 128, 128]]]) rlt = np.dot((img_ - offset), mat) rlt = np.clip(rlt, 0, 255) if in_img_type == np.uint8: rlt = rlt.round() else: rlt /= 255. return rlt.astype(in_img_type) def replace_channels(source=None, target=None, ycbcr = True, hsv = False, transfersv = False): target = read_image(target) source = read_image(source) if source.shape != target.shape: source = scale_img(source, target) if ycbcr: ycbcr_in = cv2.cvtColor(target, cv2.COLOR_BGR2YCR_CB) y_in, _, _ = cv2.split(ycbcr_in) ycbcr_ref = cv2.cvtColor(source, cv2.COLOR_BGR2YCR_CB) _, cb_out, cr_out = cv2.split(ycbcr_ref) ycbcr_out = cv2.merge([y_in, cb_out, cr_out]) target = cv2.cvtColor(ycbcr_out, cv2.COLOR_YCR_CB2BGR) if hsv: hsv_in = cv2.cvtColor(target, cv2.COLOR_BGR2HSV) _, s_in, v_in = cv2.split(hsv_in) hsv_ref = cv2.cvtColor(source, cv2.COLOR_BGR2HSV) h_out, _, _ = cv2.split(hsv_ref) if transfersv: hsv_out = stats_transfer(target=hsv_in, source=hsv_ref) _, s_out, v_out = cv2.split(hsv_out) hsv_out = cv2.merge([h_out, s_out, v_out]) else: hsv_out = cv2.merge([h_out, s_in, v_in]) target = cv2.cvtColor(hsv_out, cv2.COLOR_HSV2BGR) return target.astype('uint8') def hue_transfer(source=None, target=None): target = read_image(target) source = read_image(source) hsv_in = cv2.cvtColor(target, cv2.COLOR_BGR2HSV) _, s_in, v_in = cv2.split(hsv_in) hsv_ref = cv2.cvtColor(source, cv2.COLOR_BGR2HSV) hsv_out = stats_transfer(target=hsv_in, source=hsv_ref) h_out, _, _ = cv2.split(hsv_out) hsv_out = cv2.merge([h_out, s_in, v_in]) img_arr_out = cv2.cvtColor(hsv_out, cv2.COLOR_HSV2BGR) return img_arr_out.astype('uint8') def luminance_transfer(source=None, target=None): target = read_image(target) source = read_image(source) ycbcr_in = cv2.cvtColor(target, cv2.COLOR_BGR2YCR_CB) _, cb_in, cr_in = cv2.split(ycbcr_in) ycbcr_ref = cv2.cvtColor(source, cv2.COLOR_BGR2YCR_CB) ycbcr_out = stats_transfer(target=ycbcr_in, source=ycbcr_ref) y_out, _, _ = cv2.split(ycbcr_out) ycbcr_out = cv2.merge([y_out, cb_in, cr_in]) img_arr_out = cv2.cvtColor(ycbcr_out, cv2.COLOR_YCR_CB2BGR) return img_arr_out.astype('uint8') def ycbcr_transfer(source=None, target=None, keep_y=True, histo_match=False): target = read_image(target) source = read_image(source) ycbcr_in = cv2.cvtColor(target, cv2.COLOR_BGR2YCR_CB) if keep_y: y_in, _, _ = cv2.split(ycbcr_in) ycbcr_ref = cv2.cvtColor(source, cv2.COLOR_BGR2YCR_CB) if histo_match: ycbcr_ref = histogram_matching(reference=ycbcr_ref, image=ycbcr_in) ycbcr_out = stats_transfer(target=ycbcr_in, source=ycbcr_ref) if keep_y: _, cb_out, cr_out = cv2.split(ycbcr_out) ycbcr_out = cv2.merge([y_in, cb_out, cr_out]) img_arr_out = cv2.cvtColor(ycbcr_out, cv2.COLOR_YCR_CB2BGR) return img_arr_out.astype('uint8') def lab_transfer(source=None, target=None): target = read_image(target) source = read_image(source) lab_in = cv2.cvtColor(target, cv2.COLOR_BGR2LAB) lab_ref = cv2.cvtColor(source, cv2.COLOR_BGR2LAB) lab_out = stats_transfer(target=lab_in, source=lab_ref) img_arr_out = cv2.cvtColor(lab_out, cv2.COLOR_LAB2BGR) return img_arr_out.astype('uint8') def stats_transfer(source=None, target=None): target = read_image(target) source = read_image(source) mean_in, std_in = _imstats(target) mean_ref, std_ref = _imstats(source) img_arr_out = (target - mean_in) / std_in * std_ref + mean_ref img_arr_out = _scale_array(img_arr_out) return img_arr_out.astype('uint8') def _match_cumulative_cdf(source, template): src_values, src_unique_indices, src_counts = np.unique(source.ravel(), return_inverse=True, return_counts=True) tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True) src_quantiles = np.cumsum(src_counts) / source.size tmpl_quantiles = np.cumsum(tmpl_counts) / template.size interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values) return interp_a_values[src_unique_indices].reshape(source.shape) def histogram_matching(reference=None, image=None, clip=None): image = read_image(image) reference = read_image(reference) image = expand_img(image) reference = expand_img(reference) if image.ndim != reference.ndim: raise ValueError('Image and reference must have the same number ' 'of channels.') if image.shape[-1] != reference.shape[-1]: raise ValueError('Number of channels in the input image and ' 'reference image must match!') matched = np.empty(image.shape, dtype=image.dtype) for channel in range(image.shape[-1]): matched_channel = _match_cumulative_cdf(image[..., channel], reference[..., channel]) matched[..., channel] = matched_channel if clip: matched = _scale_array(matched, clip=clip) return matched.astype("uint8")
Apache License 2.0
kcl-bmeis/exetera
exetera/core/dataset.py
HDF5Dataset.__iter__
python
def __iter__(self): return iter(self._dataframes)
Iteration through the dataframes stored in this dataset.
https://github.com/kcl-bmeis/exetera/blob/2149a386b79ae2e6538edee54361614d025ee3b3/exetera/core/dataset.py#L258-L260
from typing import Optional import h5py from exetera.core.abstract_types import DataFrame, Dataset from exetera.core import dataframe as edf class HDF5Dataset(Dataset): def __init__(self, session, dataset_path, mode, name): self.name = name self._session = session self._file = h5py.File(dataset_path, mode) self._dataframes = dict() for group in self._file.keys(): if group not in ('trash',): h5group = self._file[group] dataframe = edf.HDF5DataFrame(self, group, h5group=h5group) self._dataframes[group] = dataframe @property def session(self): return self._session def create_group(self, name: str): return self.create_dataframe(name) def create_dataframe(self, name: str, dataframe: Optional[DataFrame] = None): if dataframe is not None: if not isinstance(dataframe, DataFrame): raise ValueError("If set, 'dataframe' must be of type DataFrame " "but is of type {}".format(type(dataframe))) self._file.create_group(name) h5group = self._file[name] _dataframe = edf.HDF5DataFrame(self, name, h5group) if dataframe is not None: for k, v in dataframe.items(): f = v.create_like(_dataframe, k) if f.indexed: f.indices.write(v.indices[:]) f.values.write(v.values[:]) else: f.data.write(v.data[:]) self._dataframes[name] = _dataframe return _dataframe def require_dataframe(self, name): if self.__contains__(name): return self._dataframes[name] else: return self.create_dataframe(name) def close(self): self._file.close() def copy(self, dataframe, name): copy(dataframe, self, name) def __contains__(self, name: str): return name in self._dataframes def contains_dataframe(self, dataframe: DataFrame): if not isinstance(dataframe, DataFrame): raise TypeError("The field must be a DataFrame object") else: for v in self._dataframes.values(): if id(dataframe) == id(v): return True return False def __getitem__(self, name: str): if not isinstance(name, str): raise TypeError("The name must be a str object.") elif not self.__contains__(name): raise ValueError("Can not find the name from this dataset.") else: return self._dataframes[name] def get_dataframe(self, name: str): return self.__getitem__(name) def __setitem__(self, name: str, dataframe: DataFrame): if not isinstance(name, str): raise TypeError("The name must be a str object.") if not isinstance(dataframe, edf.DataFrame): raise TypeError("The field must be a DataFrame object.") if dataframe.dataset == self: del self._dataframes[dataframe.name] dataframe.name = name self._file.move(dataframe.h5group.name, name) else: copy(dataframe, self, name) def __delitem__(self, name: str): if not self.__contains__(name): raise ValueError("This dataframe does not contain the name to delete.") else: del self._dataframes[name] del self._file[name] return True def delete_dataframe(self, dataframe: DataFrame): name = dataframe.name if name is None: raise ValueError("This dataframe does not contain the field to delete.") else: self.__delitem__(name) def drop(self, name: str): del self._dataframes[name] del self._file[name] def keys(self): return self._dataframes.keys() def values(self): return self._dataframes.values() def items(self): return self._dataframes.items()
Apache License 2.0
neoacheron/midea-ac-py
midea.py
MideaClimateACDevice.__init__
python
def __init__(self, device, temp_step: float, include_off_as_state: bool): from midea.device import air_conditioning_device as ac self._operation_list = ac.operational_mode_enum.list() self._fan_list = ac.fan_speed_enum.list() self._swing_list = ac.swing_mode_enum.list() support_flags = SUPPORT_FLAGS if not include_off_as_state: support_flags != SUPPORT_ON_OFF else: self._operation_list.append("off") self._support_flags = support_flags self._device = device self._unit_of_measurement = TEMP_CELSIUS self._target_temperature_step = temp_step self._include_off_as_state = include_off_as_state self._changed = False
Initialize the climate device.
https://github.com/neoacheron/midea-ac-py/blob/d8cac1fed8e32abefbf7a486b4edf359a31bc516/midea.py#L69-L89
import logging import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA from homeassistant.components.climate.const import ( SUPPORT_TARGET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE_HIGH, SUPPORT_TARGET_TEMPERATURE_LOW, SUPPORT_AWAY_MODE, SUPPORT_FAN_MODE, SUPPORT_OPERATION_MODE, SUPPORT_SWING_MODE, SUPPORT_ON_OFF) from homeassistant.const import CONF_USERNAME, CONF_PASSWORD, TEMP_CELSIUS, TEMP_FAHRENHEIT, ATTR_TEMPERATURE REQUIREMENTS = ['midea==0.1.7', 'pycryptodome==3.7.0'] VERSION = '0.1.7' _LOGGER = logging.getLogger(__name__) CONF_APP_KEY = 'app_key' CONF_TEMP_STEP = 'temp_step' CONF_INCLUDE_OFF_AS_STATE = 'include_off_as_state' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_APP_KEY): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_TEMP_STEP, default=1.0): vol.Coerce(float), vol.Optional(CONF_INCLUDE_OFF_AS_STATE, default=True): vol.Coerce(bool) }) SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_AWAY_MODE | SUPPORT_FAN_MODE | SUPPORT_OPERATION_MODE | SUPPORT_SWING_MODE | SUPPORT_TARGET_TEMPERATURE_HIGH | SUPPORT_TARGET_TEMPERATURE_LOW async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): from midea.client import client as midea_client app_key = config.get(CONF_APP_KEY) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) temp_step = config.get(CONF_TEMP_STEP) include_off_as_state = config.get(CONF_INCLUDE_OFF_AS_STATE) client = midea_client(app_key, username, password) devices = client.devices() entities = [] for device in devices: if(device.type == 0xAC): entities.append(MideaClimateACDevice( device, temp_step, include_off_as_state)) else: _LOGGER.error( "Unsupported device type: 0x{:02x}".format(device.type)) async_add_entities(entities) class MideaClimateACDevice(ClimateDevice):
MIT License
chrisyounger/git_for_splunk
bin/git_for_splunk/aob_py2/splunktaucclib/rest_handler/credentials.py
RestCredentialsContext.realm
python
def realm(self): return self.REALM.format( base_app=get_base_app_name(), endpoint=self._endpoint.internal_endpoint.strip('/'), )
RestCredentials context ``realm``. :return:
https://github.com/chrisyounger/git_for_splunk/blob/c450f32069b5d1087d4e4ebb0803bf7a0f25c60d/bin/git_for_splunk/aob_py2/splunktaucclib/rest_handler/credentials.py#L40-L48
from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import object import json from urllib.parse import urlparse from solnlib.credentials import ( CredentialManager, CredentialNotExistException, ) from .util import get_base_app_name from .error import RestError __all__ = [ 'RestCredentialsContext', 'RestCredentials', ] class RestCredentialsContext(object): REALM = '__REST_CREDENTIAL__#{base_app}#{endpoint}' def __init__(self, endpoint, name, *args, **kwargs): self._endpoint = endpoint self._name = name self._args = args self._kwargs = kwargs
Apache License 2.0
joinemm/miso-bot
modules/util.py
activities_string
python
def activities_string(activities, markdown=True, show_emoji=True): if not activities: return None custom_activity = None base_activity = None spotify_activity = None for act in activities: if isinstance(act, discord.CustomActivity): custom_activity = act elif isinstance(act, discord.BaseActivity): base_activity = act elif isinstance(act, discord.Spotify): spotify_activity = act else: print(act) return "Unknown activity" emoji = custom_activity.emoji if custom_activity else None message = None if message is None and spotify_activity is not None: message = "Listening to " + ("**Spotify**" if markdown else "Spotify") if custom_activity: emoji = custom_activity.emoji message = custom_activity.name if message is None and base_activity is not None: if base_activity.type == discord.ActivityType.playing: prefix = "Playing" elif base_activity.type == discord.ActivityType.streaming: prefix = "Streaming" elif base_activity.type == discord.ActivityType.listening: prefix = "Listening" elif base_activity.type == discord.ActivityType.watching: prefix = "Watching" elif base_activity.type == discord.ActivityType.streaming: prefix = "Streaming" message = prefix + " " + (f"**{base_activity.name}**" if markdown else base_activity.name) text = "" if emoji is not None and show_emoji: text += f"{emoji} " if message is not None: text += message return text if text != "" else None
Print user activity as it shows up on the sidebar.
https://github.com/joinemm/miso-bot/blob/7509273e135a5be07903207b9449f6012dd82fa3/modules/util.py#L727-L777
import asyncio import copy import io import math import os import re import aiohttp import arrow import colorgram import discord import regex from discord.ext import commands from durations_nlp import Duration from durations_nlp.exceptions import InvalidTokenError from PIL import Image, UnidentifiedImageError from libraries import emoji_literals from modules import emojis, exceptions, queries IMAGE_SERVER_HOST = os.environ.get("IMAGE_SERVER_HOST") class ErrorMessage(Exception): pass class PatronCheckFailure(commands.CheckFailure): pass def displayname(member, escape=True): if member is None: return None name = member.name if isinstance(member, discord.Member): name = member.nick or member.name if escape: return escape_md(name) return name async def send_success(ctx, message): await ctx.send( embed=discord.Embed(description=":white_check_mark: " + message, color=int("77b255", 16)) ) async def determine_prefix(bot, message): if message.guild: prefix = bot.cache.prefixes.get(str(message.guild.id), bot.default_prefix) return commands.when_mentioned_or(prefix)(bot, message) return commands.when_mentioned_or(bot.default_prefix)(bot, message) async def is_blacklisted(ctx): if ctx.guild is not None and ctx.guild.id in ctx.bot.cache.blacklist["global"]["guild"]: raise exceptions.BlacklistedGuild() if ctx.channel.id in ctx.bot.cache.blacklist["global"]["channel"]: raise exceptions.BlacklistedChannel() if ctx.author.id in ctx.bot.cache.blacklist["global"]["user"]: raise exceptions.BlacklistedUser() if ctx.guild is not None and ctx.bot.cache.blacklist.get(str(ctx.guild.id)) is not None: if ctx.author.id in ctx.bot.cache.blacklist[str(ctx.guild.id)]["member"]: raise exceptions.BlacklistedMember() if ( ctx.command.qualified_name.lower() in ctx.bot.cache.blacklist[str(ctx.guild.id)]["command"] ): raise exceptions.BlacklistedCommand() return True def flags_to_badges(user): result = [] for flag, value in iter(user.public_flags): if value: result.append(emojis.Badge[flag].value) if isinstance(user, discord.Member) and user.premium_since is not None: result.append(emojis.Badge["boosting"].value) return result or ["-"] def region_flag(region: discord.VoiceRegion): if region in [ discord.VoiceRegion.eu_central, discord.VoiceRegion.eu_west, discord.VoiceRegion.europe, ]: return ":flag_eu:" if region in [ discord.VoiceRegion.us_central, discord.VoiceRegion.us_east, discord.VoiceRegion.us_south, discord.VoiceRegion.us_west, discord.VoiceRegion.vip_us_east, discord.VoiceRegion.vip_us_west, ]: return ":flag_us:" if region in [ discord.VoiceRegion.amsterdam, discord.VoiceRegion.vip_amsterdam, ]: return ":flag_nl:" if region is discord.VoiceRegion.dubai: return "flag_ae" if region is discord.VoiceRegion.frankfurt: return ":flag_de:" if region is discord.VoiceRegion.hongkong: return ":flag_hk:" if region is discord.VoiceRegion.india: return ":flag_in:" if region is discord.VoiceRegion.japan: return ":flag_jp:" if region is discord.VoiceRegion.london: return ":flag_gb:" if region is discord.VoiceRegion.russia: return ":flag_ru:" if region is discord.VoiceRegion.singapore: return ":flag_sg:" if region is discord.VoiceRegion.south_korea: return ":flag_kr:" if region is discord.VoiceRegion.southafrica: return ":flag_za:" if region is discord.VoiceRegion.sydney: return ":flag_au:" if region is discord.VoiceRegion.brazil: return ":flag_br:" return ":woman_shrugging:" async def send_as_pages(ctx, content, rows, maxrows=15, maxpages=10): pages = create_pages(content, rows, maxrows, maxpages) if len(pages) > 1: await page_switcher(ctx, pages) else: await ctx.send(embed=pages[0]) async def text_based_page_switcher(ctx, pages, prefix="```", suffix="```", numbers=True): total_rows = len("\n".join(pages).split("\n")) if numbers: seen_rows = 0 for i, page in enumerate(pages, start=1): seen_rows += len(page.split("\n")) page += f"\n{i}/{len(pages)} | {seen_rows}/{total_rows}{suffix}" page = prefix + "\n" + page pages[i - 1] = page pages = TwoWayIterator(pages) msg = await ctx.send(pages.current()) async def switch_page(new_page): await msg.edit(content=new_page) async def previous_page(): content = pages.previous() if content is not None: await switch_page(content) async def next_page(): content = pages.next() if content is not None: await switch_page(content) functions = {"⬅": previous_page, "➡": next_page} asyncio.ensure_future(reaction_buttons(ctx, msg, functions)) async def page_switcher(ctx, pages): if len(pages) == 1: return await ctx.send(embed=pages[0]) pages = TwoWayIterator(pages) for i, page in enumerate(pages.items, start=1): old_footer = page.footer.text if old_footer == discord.Embed.Empty: old_footer = None page.set_footer( text=f"{i}/{len(pages.items)}" + (f" | {old_footer}" if old_footer is not None else "") ) msg = await ctx.send(embed=pages.current()) async def switch_page(content): await msg.edit(embed=content) async def previous_page(): content = pages.previous() if content is not None: await switch_page(content) async def next_page(): content = pages.next() if content is not None: await switch_page(content) functions = {"⬅": previous_page, "➡": next_page} asyncio.ensure_future(reaction_buttons(ctx, msg, functions)) def create_pages(content, rows, maxrows=15, maxpages=10): pages = [] content.description = "" thisrow = 0 rowcount = len(rows) for row in rows: thisrow += 1 if len(content.description) + len(row) < 2000 and thisrow < maxrows + 1: content.description += f"\n{row}" rowcount -= 1 else: thisrow = 1 if len(pages) == maxpages - 1: content.description += f"\n*+ {rowcount} more entries...*" pages.append(content) content = None break pages.append(content) content = copy.deepcopy(content) content.description = f"{row}" rowcount -= 1 if content is not None and not content.description == "": pages.append(content) return pages async def paginate_list(ctx, items, use_locking=False, only_author=False, index_entries=True): pages = TwoWayIterator(items) if index_entries: msg = await ctx.send(f"`{pages.index + 1}.` {pages.current()}") else: msg = await ctx.send(pages.current()) async def next_result(): new_content = pages.next() if new_content is None: return if index_entries: await msg.edit(content=f"`{pages.index + 1}.` {new_content}", embed=None) else: await msg.edit(content=new_content, embed=None) async def previous_result(): new_content = pages.previous() if new_content is None: return await msg.edit(content=new_content, embed=None) async def done(): await msg.edit(content=f"{pages.current()}") return True functions = {"⬅": previous_result, "➡": next_result} if use_locking: functions["🔒"] = done asyncio.ensure_future(reaction_buttons(ctx, msg, functions, only_author=only_author)) async def reaction_buttons( ctx, message, functions, timeout=300.0, only_author=False, single_use=False, only_owner=False ): try: for emojiname in functions: await message.add_reaction(emojiname) except discord.errors.Forbidden: return def check(payload): return ( payload.message_id == message.id and str(payload.emoji) in functions and not payload.member == ctx.bot.user and ( (payload.member.id == ctx.bot.owner_id) if only_owner else (payload.member == ctx.author or not only_author) ) ) while True: try: payload = await ctx.bot.wait_for("raw_reaction_add", timeout=timeout, check=check) except asyncio.TimeoutError: break else: try: exits = await functions[str(payload.emoji)]() except discord.errors.NotFound: return try: await message.remove_reaction(payload.emoji, payload.member) except discord.errors.NotFound: pass except discord.errors.Forbidden: await ctx.send( "`error: I'm missing required discord permission [ manage messages ]`" ) if single_use or exits is True: break for emojiname in functions: try: await message.clear_reactions() except (discord.errors.NotFound, discord.errors.Forbidden): pass def message_embed(message): content = discord.Embed() content.set_author(name=f"{message.author}", icon_url=message.author.avatar_url) content.description = message.content content.set_footer(text=f"{message.guild.name} | #{message.channel.name}") content.timestamp = message.created_at content.colour = message.author.color if message.attachments: content.set_image(url=message.attachments[0].proxy_url) return content def timefromstring(s): s = s.removeprefix("for") try: return int(Duration(s).to_seconds()) except InvalidTokenError: return None def stringfromtime(t, accuracy=4): m, s = divmod(t, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) components = [] if d > 0: components.append(f"{int(d)} day" + ("s" if d != 1 else "")) if h > 0: components.append(f"{int(h)} hour" + ("s" if h != 1 else "")) if m > 0: components.append(f"{int(m)} minute" + ("s" if m != 1 else "")) if s > 0: components.append(f"{int(s)} second" + ("s" if s != 1 else "")) return " ".join(components[:accuracy]) def get_xp(level): return math.ceil(math.pow((level - 1) / (0.05 * (1 + math.sqrt(5))), 2)) def get_level(xp): return math.floor(0.05 * (1 + math.sqrt(5)) * math.sqrt(xp)) + 1 def xp_to_next_level(level): return get_xp(level + 1) - get_xp(level) def xp_from_message(message): words = message.content.split(" ") eligible_words = 0 for x in words: if len(x) > 1: eligible_words += 1 xp = eligible_words + (10 * len(message.attachments)) if xp == 0: xp = 1 return min(xp, 50) async def get_user(ctx, argument, fallback=None): if argument is None: return fallback try: return await commands.UserConverter().convert(ctx, argument) except commands.errors.BadArgument: return fallback async def get_member(ctx, argument, fallback=None, try_user=False): if argument is None: return fallback try: return await commands.MemberConverter().convert(ctx, argument) except commands.errors.BadArgument: if try_user: return await get_user(ctx, argument, fallback) return fallback async def get_textchannel(ctx, argument, fallback=None, guildfilter=None): if argument is None: return fallback if guildfilter is None: try: return await commands.TextChannelConverter().convert(ctx, argument) except commands.errors.BadArgument: return fallback else: result = discord.utils.find( lambda m: argument in (m.name, m.id), guildfilter.text_channels ) return result or fallback async def get_role(ctx, argument, fallback=None): if argument is None: return fallback try: return await commands.RoleConverter().convert(ctx, argument) except commands.errors.BadArgument: return fallback async def get_color(ctx, argument, fallback=None): if argument is None: return fallback try: return await commands.ColourConverter().convert(ctx, argument) except commands.errors.BadArgument: return fallback async def get_emoji(ctx, argument, fallback=None): if argument is None: return fallback try: return await commands.EmojiConverter().convert(ctx, argument) except commands.errors.BadArgument: try: return await commands.PartialEmojiConverter().convert(ctx, argument) except commands.errors.BadArgument: return fallback async def get_guild(ctx, argument, fallback=None): result = discord.utils.find(lambda m: argument in (m.name, m.id), ctx.bot.guilds) return result or fallback async def command_group_help(ctx): if ctx.invoked_subcommand is None: await ctx.bot.help_command.group_help_brief(ctx, ctx.command) async def send_command_help(ctx): await ctx.send_help(ctx.invoked_subcommand or ctx.command) def escape_md(s): transformations = {regex.escape(c): "\\" + c for c in ("*", "`", "_", "~", "\\", "||")} def replace(obj): return transformations.get(regex.escape(obj.group(0)), "") pattern = regex.compile("|".join(transformations.keys())) return pattern.sub(replace, s) def map_to_range(input_value, input_start, input_end, output_start, output_end): return output_start + ((output_end - output_start) / (input_end - input_start)) * ( input_value - input_start ) def rgb_to_hex(rgb): r, g, b = rgb def clamp(x): return max(0, min(x, 255)) return "{0:02x}{1:02x}{2:02x}".format(clamp(r), clamp(g), clamp(b)) async def color_from_image_url(url, fallback="E74C3C", return_color_object=False): if url.strip() == "": return fallback try: async with aiohttp.ClientSession() as session: async with session.get(url) as response: image = Image.open(io.BytesIO(await response.read())) colors = colorgram.extract(image, 1) dominant_color = colors[0].rgb if return_color_object: return dominant_color return rgb_to_hex(dominant_color) except Exception as e: print(e) return fallback def find_unicode_emojis(text): emoji_list = set() data = regex.findall( r"(?:\U0001f1e6[\U0001f1e8-\U0001f1ec\U0001f1ee\U0001f1f1\U0001f1f2\U0001f1f4\U0001f1f6-\U0001f1fa\U0001f1fc\U0001f1fd\U0001f1ff])\|(?:\U0001f1e7[\U0001f1e6\U0001f1e7\U0001f1e9-\U0001f1ef\U0001f1f1-\U0001f1f4\U0001f1f6-\U0001f1f9\U0001f1fb\U0001f1fc\U0001f1fe\U0001f1ff])|(?:\U0001f1e8[\U0001f1e6\U0001f1e8\U0001f1e9\U0001f1eb-\U0001f1ee\U0001f1f0-\U0001f1f5\U0001f1f7\U0001f1fa-\U0001f1ff])|(?:\U0001f1e9[\U0001f1ea\U0001f1ec\U0001f1ef\U0001f1f0\U0001f1f2\U0001f1f4\U0001f1ff])|(?:\U0001f1ea[\U0001f1e6\U0001f1e8\U0001f1ea\U0001f1ec\U0001f1ed\U0001f1f7-\U0001f1fa])|(?:\U0001f1eb[\U0001f1ee-\U0001f1f0\U0001f1f2\U0001f1f4\U0001f1f7])|(?:\U0001f1ec[\U0001f1e6\U0001f1e7\U0001f1e9-\U0001f1ee\U0001f1f1-\U0001f1f3\U0001f1f5-\U0001f1fa\U0001f1fc\U0001f1fe])|(?:\U0001f1ed[\U0001f1f0\U0001f1f2\U0001f1f3\U0001f1f7\U0001f1f9\U0001f1fa])|(?:\U0001f1ee[\U0001f1e8-\U0001f1ea\U0001f1f1-\U0001f1f4\U0001f1f6-\U0001f1f9])|(?:\U0001f1ef[\U0001f1ea\U0001f1f2\U0001f1f4\U0001f1f5])|(?:\U0001f1f0[\U0001f1ea\U0001f1ec-\U0001f1ee\U0001f1f2\U0001f1f3\U0001f1f5\U0001f1f7\U0001f1fc\U0001f1fe\U0001f1ff])|(?:\U0001f1f1[\U0001f1e6-\U0001f1e8\U0001f1ee\U0001f1f0\U0001f1f7-\U0001f1fb\U0001f1fe])|(?:\U0001f1f2[\U0001f1e6\U0001f1e8-\U0001f1ed\U0001f1f0-\U0001f1ff])|(?:\U0001f1f3[\U0001f1e6\U0001f1e8\U0001f1ea-\U0001f1ec\U0001f1ee\U0001f1f1\U0001f1f4\U0001f1f5\U0001f1f7\U0001f1fa\U0001f1ff])|\U0001f1f4\U0001f1f2|(?:\U0001f1f4[\U0001f1f2])|(?:\U0001f1f5[\U0001f1e6\U0001f1ea-\U0001f1ed\U0001f1f0-\U0001f1f3\U0001f1f7-\U0001f1f9\U0001f1fc\U0001f1fe])|\U0001f1f6\U0001f1e6|(?:\U0001f1f6[\U0001f1e6])|(?:\U0001f1f7[\U0001f1ea\U0001f1f4\U0001f1f8\U0001f1fa\U0001f1fc])|(?:\U0001f1f8[\U0001f1e6-\U0001f1ea\U0001f1ec-\U0001f1f4\U0001f1f7-\U0001f1f9\U0001f1fb\U0001f1fd-\U0001f1ff])|(?:\U0001f1f9[\U0001f1e6\U0001f1e8\U0001f1e9\U0001f1eb-\U0001f1ed\U0001f1ef-\U0001f1f4\U0001f1f7\U0001f1f9\U0001f1fb\U0001f1fc\U0001f1ff])|(?:\U0001f1fa[\U0001f1e6\U0001f1ec\U0001f1f2\U0001f1f8\U0001f1fe\U0001f1ff])|(?:\U0001f1fb[\U0001f1e6\U0001f1e8\U0001f1ea\U0001f1ec\U0001f1ee\U0001f1f3\U0001f1fa])|(?:\U0001f1fc[\U0001f1eb\U0001f1f8])|\U0001f1fd\U0001f1f0|(?:\U0001f1fd[\U0001f1f0])|(?:\U0001f1fe[\U0001f1ea\U0001f1f9])|(?:\U0001f1ff[\U0001f1e6\U0001f1f2\U0001f1fc])|(?:\U0001f3f3\ufe0f\u200d\U0001f308)|(?:\U0001f441\u200d\U0001f5e8)|(?:[\U0001f468\U0001f469]\u200d\u2764\ufe0f\u200d(?:\U0001f48b\u200d)?[\U0001f468\U0001f469])|(?:(?:(?:\U0001f468\u200d[\U0001f468\U0001f469])|(?:\U0001f469\u200d\U0001f469))(?:(?:\u200d\U0001f467(?:\u200d[\U0001f467\U0001f466])?)|(?:\u200d\U0001f466\u200d\U0001f466)))|(?:(?:(?:\U0001f468\u200d\U0001f468)|(?:\U0001f469\u200d\U0001f469))\u200d\U0001f466)|[\u2194-\u2199]|[\u23e9-\u23f3]|[\u23f8-\u23fa]|[\u25fb-\u25fe]|[\u2600-\u2604]|[\u2638-\u263a]|[\u2648-\u2653]|[\u2692-\u2694]|[\u26f0-\u26f5]|[\u26f7-\u26fa]|[\u2708-\u270d]|[\u2753-\u2755]|[\u2795-\u2797]|[\u2b05-\u2b07]|[\U0001f191-\U0001f19a]|[\U0001f1e6-\U0001f1ff]|[\U0001f232-\U0001f23a]|[\U0001f300-\U0001f321]|[\U0001f324-\U0001f393]|[\U0001f399-\U0001f39b]|[\U0001f39e-\U0001f3f0]|[\U0001f3f3-\U0001f3f5]|[\U0001f3f7-\U0001f3fa]|[\U0001f400-\U0001f4fd]|[\U0001f4ff-\U0001f53d]|[\U0001f549-\U0001f54e]|[\U0001f550-\U0001f567]|[\U0001f573-\U0001f57a]|[\U0001f58a-\U0001f58d]|[\U0001f5c2-\U0001f5c4]|[\U0001f5d1-\U0001f5d3]|[\U0001f5dc-\U0001f5de]|[\U0001f5fa-\U0001f64f]|[\U0001f680-\U0001f6c5]|[\U0001f6cb-\U0001f6d2]|[\U0001f6e0-\U0001f6e5]|[\U0001f6f3-\U0001f6f6]|[\U0001f910-\U0001f91e]|[\U0001f920-\U0001f927]|[\U0001f933-\U0001f93a]|[\U0001f93c-\U0001f93e]|[\U0001f940-\U0001f945]|[\U0001f947-\U0001f94b]|[\U0001f950-\U0001f95e]|[\U0001f980-\U0001f991]|\u00a9|\u00ae|\u203c|\u2049|\u2122|\u2139|\u21a9|\u21aa|\u231a|\u231b|\u2328|\u23cf|\u24c2|\u25aa|\u25ab|\u25b6|\u25c0|\u260e|\u2611|\u2614|\u2615|\u2618|\u261d|\u2620|\u2622|\u2623|\u2626|\u262a|\u262e|\u262f|\u2660|\u2663|\u2665|\u2666|\u2668|\u267b|\u267f|\u2696|\u2697|\u2699|\u269b|\u269c|\u26a0|\u26a1|\u26aa|\u26ab|\u26b0|\u26b1|\u26bd|\u26be|\u26c4|\u26c5|\u26c8|\u26ce|\u26cf|\u26d1|\u26d3|\u26d4|\u26e9|\u26ea|\u26fd|\u2702|\u2705|\u270f|\u2712|\u2714|\u2716|\u271d|\u2721|\u2728|\u2733|\u2734|\u2744|\u2747|\u274c|\u274e|\u2757|\u2763|\u2764|\u27a1|\u27b0|\u27bf|\u2934|\u2935|\u2b1b|\u2b1c|\u2b50|\u2b55|\u3030|\u303d|\u3297|\u3299|\U0001f004|\U0001f0cf|\U0001f170|\U0001f171|\U0001f17e|\U0001f17f|\U0001f18e|\U0001f201|\U0001f202|\U0001f21a|\U0001f22f|\U0001f250|\U0001f251|\U0001f396|\U0001f397|\U0001f56f|\U0001f570|\U0001f587|\U0001f590|\U0001f595|\U0001f596|\U0001f5a4|\U0001f5a5|\U0001f5a8|\U0001f5b1|\U0001f5b2|\U0001f5bc|\U0001f5e1|\U0001f5e3|\U0001f5e8|\U0001f5ef|\U0001f5f3|\U0001f6e9|\U0001f6eb|\U0001f6ec|\U0001f6f0|\U0001f930|\U0001f9c0|[#|0-9]\u20e3", text, ) for word in data: name = emoji_literals.UNICODE_TO_NAME.get(word) if name is not None: emoji_list.add(name) return emoji_list def find_custom_emojis(text): emoji_list = set() data = regex.findall(r"<(a?):([a-zA-Z0-9\_]+):([0-9]+)>", text) for _a, emoji_name, emoji_id in data: emoji_list.add((emoji_name, emoji_id)) return emoji_list async def image_info_from_url(url): async with aiohttp.ClientSession() as session: async with session.get(str(url)) as response: filesize = int(response.headers.get("Content-Length")) / 1024 filetype = response.headers.get("Content-Type") try: image = Image.open(io.BytesIO(await response.read())) except UnidentifiedImageError: return None dimensions = image.size if filesize > 1024: filesize = f"{filesize/1024:.2f}MB" else: filesize = f"{filesize:.2f}KB" return { "filesize": filesize, "filetype": filetype, "dimensions": f"{dimensions[0]}x{dimensions[1]}", } class OptionalSubstitute(dict): def __missing__(self, key): return "{" + key + "}" def create_welcome_embed(user, guild, messageformat): if messageformat is None: messageformat = "Welcome **{username}** {mention} to **{server}**" content = discord.Embed(title="New member! :wave:", color=int("5dadec", 16)) content.set_thumbnail(url=user.avatar_url) content.timestamp = arrow.utcnow().datetime content.set_footer(text=f"👤#{len(guild.members)}") substitutes = OptionalSubstitute( { "mention": user.mention, "user": user, "id": user.id, "server": guild.name, "guild": guild.name, "username": user.name, } ) content.description = messageformat.format_map(substitutes) return content def create_goodbye_message(user, guild, messageformat): if messageformat is None: messageformat = "Goodbye **{username}** {mention}" substitutes = OptionalSubstitute( { "mention": user.mention, "user": user, "id": user.id, "server": guild.name, "guild": guild.name, "username": user.name, } ) return messageformat.format_map(substitutes)
MIT License
jonzia/recurrent_autoencoder
network.py
DecoderNetwork.__init__
python
def __init__(self, batch_size, num_steps, input_features): self.batch_size = batch_size self.num_steps = num_steps self.num_lstm_hidden = 15 self.input_features = input_features self.i_keep_prob = 1.0 self.o_keep_prob = 1.0
Initialize network attributes
https://github.com/jonzia/recurrent_autoencoder/blob/fa30471dbae1fcf96a5e949bbb76ddea63086f5f/network.py#L30-L39
class EncoderNetwork(): def __init__(self): self.batch_size = 5 self.num_steps = 100 self.num_lstm_hidden = 15 self.input_features = 9 self.i_keep_prob = 1.0 self.o_keep_prob = 1.0 self.latent = 10 class DecoderNetwork():
MIT License
bloomberg/phabricator-tools
py/abd/abdt_git.py
Repo.get_remote_branches
python
def get_remote_branches(self): return phlgit_branch.get_remote(self, self._remote)
Return a list of string names of remote branches. :returns: list of string names
https://github.com/bloomberg/phabricator-tools/blob/09bd1587fe8945d93a891162fd4c89640c6fada7/py/abd/abdt_git.py#L147-L153
from __future__ import absolute_import from __future__ import division from __future__ import print_function import phlgit_branch import phlgit_checkout import phlgit_commit import phlgit_diff import phlgit_fetch import phlgit_log import phlgit_merge import phlgit_push import phlgit_showref import phlgitu_ref import abdt_branch import abdt_lander import abdt_logging import abdt_naming _ARCYD_REFSPACE = 'refs/arcyd' _PRIVATE_ARCYD_BRANCHSPACE = '__private_arcyd' _LANDED_ARCHIVE_BRANCH_MESSAGE = """ Create an archive branch for landed branches Landed branches will be automatically merged here by Arcyd for your reference. This branch is useful for: o: cleaning up branches contained by the landed branch (see 'git branch --merged') o: finding the pre-landed version of a commit (see 'git log --grep' - you can search for the landed sha1) o: keeping track of Arcyd's landing activity (see 'git log --first-parent') """.strip() ARCYD_LANDED_REF = "{}/landed".format(_ARCYD_REFSPACE) _ARCYD_LANDED_BRANCH = "{}/landed".format(_PRIVATE_ARCYD_BRANCHSPACE) ARCYD_LANDED_BRANCH_FQ = "refs/heads/" + _ARCYD_LANDED_BRANCH _ABANDONED_ARCHIVE_BRANCH_MESSAGE = """ Create an archive branch for abandoned branches Abandoned branches will be automatically merged here by Arcyd for your reference. This branch is useful for: o: keeping track of Arcyd's abandoning activity (see 'git log --first-parent') o: recovering abandoned branches (use 'git branch <branch name> <commit hash>') """.strip() ARCYD_ABANDONED_REF = "{}/abandoned".format(_ARCYD_REFSPACE) _ARCYD_ABANDONED_BRANCH = "{}/abandoned".format(_PRIVATE_ARCYD_BRANCHSPACE) ARCYD_ABANDONED_BRANCH_FQ = "refs/heads/" + _ARCYD_ABANDONED_BRANCH class Repo(object): def __init__( self, refcache_repo, differ_cache, remote, description): super(Repo, self).__init__() self._repo = refcache_repo self._remote = remote self._description = description self._is_landing_archive_enabled = None self._differ_cache = differ_cache def is_identical(self, branch1, branch2): return phlgit_branch.is_identical(self, branch1, branch2) def _is_ref(self, ref): ref_names = phlgit_showref.names(self) return ref in ref_names
Apache License 2.0
zapatacomputing/z-quantum-core
src/python/zquantum/core/utils.py
get_ordered_list_of_bitstrings
python
def get_ordered_list_of_bitstrings(num_qubits: int) -> List[str]: bitstrings = [] for i in range(2 ** num_qubits): bitstring = "{0:b}".format(i) while len(bitstring) < num_qubits: bitstring = "0" + bitstring bitstrings.append(bitstring) return bitstrings
Create list of binary strings corresponding to 2^num_qubits integers and save them in ascending order. Args: num_qubits: number of binary digits in each bitstring Returns: The ordered bitstring representations of the integers
https://github.com/zapatacomputing/z-quantum-core/blob/5fa4fd5d8682bbae696f8c2c2d386133ccf7f378/src/python/zquantum/core/utils.py#L645-L661
import collections import copy import importlib import inspect import json import sys import warnings from functools import partial from types import FunctionType from typing import Any, Dict, Iterable, List, Optional, Tuple import lea import numpy as np import sympy from openfermion import InteractionRDM, hermitian_conjugated from .typing import AnyPath, LoadSource, Specs SCHEMA_VERSION = "zapata-v1" RNDSEED = 12345 def convert_dict_to_array(dictionary: dict) -> np.ndarray: array = np.array(dictionary["real"]) if dictionary.get("imag"): array = array + 1j * np.array(dictionary["imag"]) return array def convert_array_to_dict(array: np.ndarray) -> dict: dictionary = {} if np.iscomplexobj(array): dictionary["real"] = array.real.tolist() dictionary["imag"] = array.imag.tolist() else: dictionary["real"] = array.tolist() return dictionary def dec2bin(number: int, length: int) -> List[int]: if pow(2, length) < number: sys.exit( "Insufficient number of bits for representing the number {}".format(number) ) bit_str = bin(number) bit_str = bit_str[2 : len(bit_str)] bit_string = [int(x) for x in list(bit_str)] if len(bit_string) < length: len_zeros = length - len(bit_string) bit_string = [int(x) for x in list(np.zeros(len_zeros))] + bit_string return bit_string def bin2dec(x: List[int]) -> int: dec = 0 coeff = 1 for i in range(len(x)): dec = dec + coeff * x[len(x) - 1 - i] coeff = coeff * 2 return dec pauli_x = np.array([[0.0, 1.0], [1.0, 0.0]]) pauli_y = np.array([[0.0, -1.0j], [1.0j, 0.0]]) pauli_z = np.array([[1.0, 0.0], [0.0, -1.0]]) identity = np.array([[1.0, 0.0], [0.0, 1.0]]) def is_identity(u: np.ndarray, tol=1e-15) -> bool: dims = np.array(u).shape if dims[0] != dims[1]: raise Exception("Input matrix is not square.") return np.allclose(u, np.eye(u.shape[0]), atol=tol) def is_unitary(u: np.ndarray, tol=1e-15) -> bool: dims = np.array(u).shape if dims[0] != dims[1]: raise Exception("Input matrix is not square.") test_matrix = np.dot(hermitian_conjugated(np.array(u)), u) return is_identity(test_matrix, tol) def compare_unitary(u1: np.ndarray, u2: np.ndarray, tol: float = 1e-15) -> bool: if not is_unitary(u1, tol): raise Exception("The first input matrix is not unitary.") if not is_unitary(u2, tol): raise Exception("The second input matrix is not unitary.") test_matrix = np.dot(u1.conj().T, u2) phase = test_matrix.item((0, 0)) ** -1 return is_identity(phase * test_matrix, tol) def sample_from_probability_distribution( probability_distribution: dict, n_samples: int ) -> collections.Counter: if isinstance(probability_distribution, dict): prob_pmf = lea.pmf(probability_distribution) sampled_dict: collections.Counter = collections.Counter( prob_pmf.random(n_samples) ) return sampled_dict else: raise RuntimeError( "Probability distribution should be a dictionary with key value \ being the thing being sampled and the value being probability of getting \ sampled " ) def convert_bitstrings_to_tuples(bitstrings: Iterable[str]) -> List[Tuple[int, ...]]: measurements: List[Tuple[int, ...]] = [] for bitstring in bitstrings: measurement: Tuple[int, ...] = () for char in bitstring: measurement = measurement + (int(char),) measurements.append(measurement) return measurements def convert_tuples_to_bitstrings(tuples: List[Tuple[int]]) -> List[str]: bitstrings = ["".join(map(str, tup)) for tup in tuples] return bitstrings class ValueEstimate(float): def __init__(self, value, precision: Optional[float] = None): super().__init__() self.precision = precision def __new__(cls, value, precision=None): return super().__new__(cls, value) @property def value(self): warnings.warn( "The value attribute is deprecated. Use ValueEstimate object directly " "instead.", DeprecationWarning, ) return float(self) def __eq__(self, other): super_eq = super().__eq__(other) if super_eq is NotImplemented: return super_eq return super_eq and self.precision == getattr(other, "precision", None) def __ne__(self, other): return not self == other def __str__(self): value_str = super().__str__() if self.precision is not None: return f"{value_str} ± {self.precision}" else: return f"{value_str}" def to_dict(self): data = {"schema": SCHEMA_VERSION + "-value_estimate"} if type(self.value).__module__ == np.__name__: data["value"] = self.value.item() else: data["value"] = self.value if type(self.precision).__module__ == np.__name__: data["precision"] = self.precision.item() else: data["precision"] = self.precision return data @classmethod def from_dict(cls, dictionary): value = dictionary["value"] if "precision" in dictionary: precision = dictionary["precision"] return cls(value, precision) else: return cls(value) def load_value_estimate(file: LoadSource) -> ValueEstimate: if isinstance(file, str): with open(file, "r") as f: data = json.load(f) else: data = json.load(file) return ValueEstimate.from_dict(data) def save_value_estimate(value_estimate: ValueEstimate, filename: AnyPath): dictionary = value_estimate.to_dict() dictionary["schema"] = SCHEMA_VERSION + "-value_estimate" with open(filename, "w") as f: f.write(json.dumps(dictionary, indent=2)) def load_list(file: LoadSource) -> List: if isinstance(file, str): with open(file, "r") as f: data = json.load(f) else: data = json.load(file) return data["list"] def save_list(array: List, filename: AnyPath, artifact_name: str = ""): dictionary: Dict[str, Any] = {} dictionary["schema"] = SCHEMA_VERSION + "-" + artifact_name + "-list" dictionary["list"] = array with open(filename, "w") as f: f.write(json.dumps(dictionary, indent=2)) def save_generic_dict(dictionary: Dict, filename: AnyPath): dictionary_stored = {"schema": SCHEMA_VERSION + "-dict"} dictionary_stored.update(dictionary) with open(filename, "w") as f: f.write(json.dumps(dictionary_stored, indent=2)) def get_func_from_specs(specs: Dict): warnings.warn( "zquantum.core.utils.get_func_from_specs will be deprecated. Please use " "zquantum.core.utils.create_object instead", DeprecationWarning, ) return create_object(specs) def create_object(specs: Dict, **kwargs): specs = copy.copy(specs) module_name = specs.pop("module_name") module = importlib.import_module(module_name) creator_name = specs.pop("function_name") creator = getattr(module, creator_name) for key in specs.keys(): if key in kwargs.keys(): raise ValueError("Cannot have same parameter assigned to multiple values") if isinstance(creator, FunctionType): if kwargs != {} or specs != {}: function_parameter_names = inspect.signature(creator).parameters.keys() function_args = { key: value for key, value in {**specs, **kwargs}.items() if key in function_parameter_names } return partial(creator, **function_args) else: return creator else: return creator(**specs, **kwargs) def load_noise_model(file: LoadSource): if isinstance(file, str): with open(file, "r") as f: specs = json.load(f) else: specs = json.load(file) noise_model_data = specs.pop("data", None) func = create_object(specs) return func(noise_model_data) def save_noise_model( noise_model_data: dict, module_name: str, function_name: str, filename ): data = { "module_name": module_name, "function_name": function_name, "data": noise_model_data, } with open(filename, "w") as f: f.write(json.dumps(data, indent=2)) def create_symbols_map( symbols: List[sympy.Symbol], params: np.ndarray ) -> Dict[sympy.Symbol, float]: if len(symbols) != len(params): raise ( ValueError( "Length of symbols: {0} doesn't match length of params: {1}".format( len(symbols), len(params) ) ) ) return {symbol: param for symbol, param in zip(symbols, params.tolist())} def save_timing(walltime: float, filename: AnyPath) -> None: with open(filename, "w") as f: f.write( json.dumps({"schema": SCHEMA_VERSION + "-timing", "walltime": walltime}) ) def save_nmeas_estimate( nmeas: float, nterms: int, filename: AnyPath, frame_meas: np.ndarray = None ) -> None: data: Dict[str, Any] = {} data["schema"] = SCHEMA_VERSION + "-hamiltonian_analysis" data["K"] = nmeas data["nterms"] = nterms if frame_meas is not None: data["frame_meas"] = convert_array_to_dict(frame_meas) with open(filename, "w") as f: f.write(json.dumps(data, indent=2)) def load_nmeas_estimate(filename: AnyPath) -> Tuple[float, int, np.ndarray]: with open(filename, "r") as f: data = json.load(f) frame_meas = convert_dict_to_array(data["frame_meas"]) K_coeff = data["K"] nterms = data["nterms"] return K_coeff, nterms, frame_meas def scale_and_discretize(values: Iterable[float], total: int) -> List[int]: value_sum = sum(values) scale_factor = total / value_sum result = [np.floor(value * scale_factor) for value in values] remainders = [ value * scale_factor - np.floor(value * scale_factor) for value in values ] indexes_sorted_by_remainder = np.argsort(remainders)[::-1] for index in range(int(round(total - sum(result)))): result[indexes_sorted_by_remainder[index]] += 1 result = [int(value) for value in result] assert sum(result) == total, "The scaled list does not sum to the desired total." return result def hf_rdm(n_alpha: int, n_beta: int, n_orbitals: int) -> InteractionRDM: occ = np.zeros(2 * n_orbitals) occ[: (2 * n_alpha) : 2] = 1 occ[1 : (2 * n_beta + 1) : 2] = 1 one_body_tensor = np.diag(occ) two_body_tensor = np.zeros([2 * n_orbitals for i in range(4)]) for i in range(2 * n_orbitals): for j in range(2 * n_orbitals): if i != j and occ[i] and occ[j]: two_body_tensor[i, j, j, i] = 1 two_body_tensor[i, j, i, j] = -1 return InteractionRDM(one_body_tensor, two_body_tensor) def load_from_specs(specs: Specs): if isinstance(specs, str): specs = json.loads(specs) return create_object(specs)
Apache License 2.0
sammchardy/python-binance-chain
binance_chain/http.py
HttpApiClient.get_block_exchange_fee
python
def get_block_exchange_fee( self, address: Optional[str] = None, offset: Optional[int] = 0, total: Optional[int] = 0, limit: Optional[int] = 500, start_time: Optional[int] = None, end_time: Optional[int] = None ): data = {} if address is not None: data['address'] = address if offset is not None: data['offset'] = offset if limit is not None: data['limit'] = limit if start_time is not None: data['start'] = start_time if end_time is not None: data['end'] = end_time if total is not None: data['total'] = total return self._get("block-exchange-fee", data=data)
Trading fee of the address grouped by block https://docs.binance.org/api-reference/dex-api/paths.html#apiv1block-exchange-fee :param address: :param offset: :param limit: :param start_time: :param end_time: :param total: .. code:: python transactions = client.get_transactions('') :return: API Response
https://github.com/sammchardy/python-binance-chain/blob/19d7d639cc912a27ec86831338c2a2dc96289d50/binance_chain/http.py#L690-L726
import logging import ujson from typing import Optional, Dict import asyncio import aiohttp import requests import binance_chain.messages from binance_chain.environment import BinanceEnvironment from binance_chain.constants import PeerType, KlineInterval, OrderSide, OrderStatus, TransactionSide, TransactionType from binance_chain.exceptions import ( BinanceChainAPIException, BinanceChainRequestException, BinanceChainBroadcastException ) requests.models.json = ujson class BaseApiClient: API_VERSION = 'v1' def __init__(self, env: Optional[BinanceEnvironment] = None, requests_params: Optional[Dict] = None, **kwargs): self._env = env or BinanceEnvironment.get_production_env() self._requests_params = requests_params self.session = self._init_session(**kwargs) def _init_session(self, **kwargs): session = requests.session() headers = self._get_headers() session.headers.update(headers) return session @property def env(self): return self._env def _create_uri(self, path): full_path = '/api/{}/{}'.format(self.API_VERSION, path) return '{}{}'.format(self._env.api_url, full_path) def _get_headers(self): return { 'Accept': 'application/json', 'User-Agent': 'python-binance-chain', } def _get_request_kwargs(self, method, **kwargs): kwargs['timeout'] = 10 if self._requests_params: kwargs.update(self._requests_params) kwargs['data'] = kwargs.get('data', {}) kwargs['headers'] = kwargs.get('headers', {}) if kwargs['data'] and method == 'get': kwargs['params'] = kwargs['data'] del(kwargs['data']) if method == 'post': kwargs['headers']['content-type'] = 'text/plain' return kwargs class HttpApiClient(BaseApiClient): def _request(self, method, path, **kwargs): uri = self._create_uri(path) kwargs = self._get_request_kwargs(method, **kwargs) response = getattr(self.session, method)(uri, **kwargs) return self._handle_response(response) @staticmethod def _handle_response(response): if not str(response.status_code).startswith('2'): raise BinanceChainAPIException(response, response.status_code) try: res = response.json() if 'code' in res and res['code'] not in [0, "200000"]: raise BinanceChainAPIException(response, response.status_code) if 'success' in res and not res['success']: raise BinanceChainAPIException(response, response.status_code) if 'data' in res: res = res['data'] return res except ValueError: raise BinanceChainRequestException('Invalid Response: %s' % response.text) def _get(self, path, **kwargs): return self._request('get', path, **kwargs) def _post(self, path, **kwargs): return self._request('post', path, **kwargs) def _put(self, path, **kwargs): return self._request('put', path, **kwargs) def _delete(self, path, **kwargs): return self._request('delete', path, **kwargs) def get_time(self): return self._get("time") def get_node_info(self): return self._get("node-info") def get_validators(self): return self._get("validators") def get_peers(self, peer_type: Optional[PeerType] = None): peers = self._get("peers") if peer_type: peers = [p for p in peers if peer_type in p['capabilities']] return peers def get_node_peers(self): return self.get_peers(peer_type=PeerType.NODE) def get_websocket_peers(self): return self.get_peers(peer_type=PeerType.WEBSOCKET) def get_account(self, address: str): return self._get(f"account/{address}") def get_account_sequence(self, address: str): return self._get(f"account/{address}/sequence") def get_transaction(self, transaction_hash: str): return self._get(f"tx/{transaction_hash}?format=json") def get_tokens(self): return self._get("tokens") def get_markets(self): return self._get("markets?limit=1000") def get_fees(self): return self._get("fees") def get_order_book(self, symbol: str): data = { 'symbol': symbol } return self._get("depth", data=data) def broadcast_msg(self, msg: binance_chain.messages.Msg, sync: bool = False): if self._env != msg.wallet.env: raise BinanceChainBroadcastException("Wallet environment doesn't match HttpApiClient environment") msg.wallet.initialise_wallet() data = msg.to_hex_data() req_path = 'broadcast' if sync: req_path += f'?sync=1' res = self._post(req_path, data=data) msg.wallet.increment_account_sequence() return res def broadcast_hex_msg(self, hex_msg: str, sync: bool = False): req_path = 'broadcast' if sync: req_path += f'?sync=1' res = self._post(req_path, data=hex_msg) return res def get_klines(self, symbol: str, interval: KlineInterval, limit: Optional[int] = 300, start_time: Optional[int] = None, end_time: Optional[int] = None): data = { 'symbol': symbol, 'interval': interval.value } if limit is not None: data['limit'] = limit if start_time is not None: data['startTime'] = start_time if end_time is not None: data['endTime'] = end_time return self._get("klines", data=data) def get_closed_orders( self, address: str, symbol: Optional[str] = None, status: Optional[OrderStatus] = None, side: Optional[OrderSide] = None, offset: Optional[int] = 0, limit: Optional[int] = 500, start_time: Optional[int] = None, end_time: Optional[int] = None, total: Optional[int] = 0 ): data = { 'address': address } if symbol is not None: data['symbol'] = symbol if status is not None: data['status'] = status.value if side is not None: data['side'] = side.value if offset is not None: data['offset'] = offset if limit is not None: data['limit'] = limit if start_time is not None: data['start'] = start_time if end_time is not None: data['end'] = end_time if total is not None: data['total'] = total return self._get("orders/closed", data=data) def get_open_orders( self, address: str, symbol: Optional[str] = None, offset: Optional[int] = 0, limit: Optional[int] = 500, total: Optional[int] = 0 ): data = { 'address': address } if symbol is not None: data['symbol'] = symbol if offset is not None: data['offset'] = offset if limit is not None: data['limit'] = limit if total is not None: data['total'] = total return self._get("orders/open", data=data) def get_order(self, order_id: str): return self._get(f"orders/{order_id}") def get_ticker(self, symbol: str): data = { 'symbol': symbol } return self._get("ticker/24hr", data=data) def get_trades( self, address: Optional[str] = None, symbol: Optional[str] = None, side: Optional[OrderSide] = None, quote_asset: Optional[str] = None, buyer_order_id: Optional[str] = None, seller_order_id: Optional[str] = None, height: Optional[str] = None, offset: Optional[int] = 0, limit: Optional[int] = 500, start_time: Optional[int] = None, end_time: Optional[int] = None, total: Optional[int] = 0 ): data = {} if address is not None: data['address'] = address if symbol is not None: data['symbol'] = symbol if side is not None: data['side'] = side.value if quote_asset is not None: data['quoteAsset'] = quote_asset if buyer_order_id is not None: data['buyerOrderId'] = buyer_order_id if seller_order_id is not None: data['sellerOrderId'] = seller_order_id if height is not None: data['height'] = height if offset is not None: data['offset'] = offset if limit is not None: data['limit'] = limit if start_time is not None: data['start'] = start_time if end_time is not None: data['end'] = end_time if total is not None: data['total'] = total return self._get("trades", data=data) def get_transactions( self, address: str, symbol: Optional[str] = None, side: Optional[TransactionSide] = None, tx_asset: Optional[str] = None, tx_type: Optional[TransactionType] = None, height: Optional[str] = None, offset: Optional[int] = 0, limit: Optional[int] = 500, start_time: Optional[int] = None, end_time: Optional[int] = None ): data = { 'address': address } if symbol is not None: data['symbol'] = symbol if side is not None: data['side'] = side.value if tx_asset is not None: data['txAsset'] = tx_asset if tx_type is not None: data['txType'] = tx_type.value if height is not None: data['blockHeight'] = height if offset is not None: data['offset'] = offset if limit is not None: data['limit'] = limit if start_time is not None: data['startTime'] = start_time if end_time is not None: data['endTime'] = end_time return self._get("transactions", data=data)
MIT License
sulab/wikidataintegrator
wikidataintegrator/wdi_core.py
WDFunctionsEngine.check_shex_conformance
python
def check_shex_conformance(qid=None,data=None, eid=None, entity_schema_repo=None, output='confirm'): if not bool(qid): raise ValueError('Please provide a QID even with a json object of a Wikidata item') rdfdata = Graph() if not bool(data): rdfdata.parse(config["CONCEPT_BASE_URI"] + qid + ".ttl") else: rdfdata.parse(data=data) entity_schema_repo = config["ENTITY_SCHEMA_REPO"] if entity_schema_repo is None else entity_schema_repo schema = requests.get(entity_schema_repo+eid).text for result in ShExEvaluator(rdf=rdfdata, schema=schema, focus=config["CONCEPT_BASE_URI"] + qid).evaluate(): shex_result = dict() if result.result: shex_result["result"] = True else: shex_result["result"] = False shex_result["reason"] = result.reason shex_result["focus"] = result.focus if output == "confirm": return shex_result["result"] elif output == "reason": return shex_result["reason"] else: return shex_result
Static method which can be used to check for conformance of a Wikidata item to an EntitySchema any SPARQL query :param qid: The URI prefixes required for an endpoint, default is the Wikidata specific prefixes :param eid: The EntitySchema identifier from Wikidata :param sparql_endpoint_url: The URL string for the SPARQL endpoint. Default is the URL for the Wikidata SPARQL endpoint :param output: results of a test of conformance on a given shape expression :return: The results of the query are returned in string format
https://github.com/sulab/wikidataintegrator/blob/5feff88d7e97ffad1713a086202efa93ebe49516/wikidataintegrator/wdi_core.py#L209-L244
import copy import datetime import json import logging import os import re import time import warnings from collections import defaultdict from typing import List import pandas as pd import requests from pyshex import ShExEvaluator from rdflib import Graph from shexer.shaper import Shaper from wikidataintegrator.wdi_backoff import wdi_backoff from wikidataintegrator.wdi_config import config from wikidataintegrator.wdi_fastrun import FastRunContainer from wikidataintegrator.wdi_helpers import MappingRelationHelper from wikidataintegrator import wdi_rdf __author__ = 'Andra Waagmeester, Gregory Stupp, Sebastian Burgstaller ' __license__ = 'MIT' class WDFunctionsEngine(object): def __init__(self, mediawiki_api_url=None, sparql_endpoint_url=None,): self.mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url self.sparql_endpoint_url = config['SPARQL_ENDPOINT_URL'] if sparql_endpoint_url is None else sparql_endpoint_url @staticmethod def get_rdf(wd_item_id='', format="turtle", mediawiki_api_url=None): localcopy = Graph() localcopy.parse(config["CONCEPT_BASE_URI"] + wd_item_id + ".ttl") return localcopy.serialize(format=format) @staticmethod def get_linked_by(qid, mediawiki_api_url=None): mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url linkedby = [] whatlinkshere = json.loads(requests.get( mediawiki_api_url + "?action=query&list=backlinks&format=json&bllimit=500&bltitle=" + qid).text) for link in whatlinkshere["query"]["backlinks"]: if link["title"].startswith("Q"): linkedby.append(link["title"]) while 'continue' in whatlinkshere.keys(): whatlinkshere = json.loads(requests.get( mediawiki_api_url + "?action=query&list=backlinks&blcontinue=" + whatlinkshere['continue']['blcontinue'] + "&format=json&bllimit=500&bltitle=" + qid).text) for link in whatlinkshere["query"]["backlinks"]: if link["title"].startswith("Q"): linkedby.append(link["title"]) return linkedby @staticmethod @wdi_backoff() def execute_sparql_query(query, prefix=None, endpoint=None, user_agent=None, as_dataframe=False, max_retries=1000, retry_after=60): sparql_endpoint_url = config['SPARQL_ENDPOINT_URL'] if endpoint is None else endpoint user_agent = config['USER_AGENT_DEFAULT'] if user_agent is None else user_agent if prefix: query = prefix + '\n' + query params = { 'query': '#Tool: wdi_core fastrun\n' + query, 'format': 'json' } headers = { 'Accept': 'application/sparql-results+json', 'User-Agent': user_agent } response = None for n in range(max_retries): try: response = requests.post(sparql_endpoint_url, params=params, headers=headers) except requests.exceptions.ConnectionError as e: print("Connection error: {}. Sleeping for {} seconds.".format(e, retry_after)) time.sleep(retry_after) continue if response.status_code == 503: print("service unavailable. sleeping for {} seconds".format(retry_after)) time.sleep(retry_after) continue if response.status_code == 429: if "retry-after" in response.headers.keys(): retry_after = response.headers["retry-after"] print("service unavailable. sleeping for {} seconds".format(retry_after)) time.sleep(retry_after) continue response.raise_for_status() results = response.json() if as_dataframe: return WDItemEngine._sparql_query_result_to_df(results) else: return results @staticmethod def _sparql_query_result_to_df(results): def parse_value(item): if item.get("datatype") == "http://www.w3.org/2001/XMLSchema#decimal": return float(item['value']) if item.get("datatype") == "http://www.w3.org/2001/XMLSchema#integer": return int(item['value']) if item.get("datatype") == "http://www.w3.org/2001/XMLSchema#dateTime": return datetime.datetime.strptime(item['value'], '%Y-%m-%dT%H:%M:%SZ') return item['value'] results = results['results']['bindings'] results = [{k: parse_value(v) for k, v in item.items()} for item in results] df = pd.DataFrame(results) return df @staticmethod def delete_item(item, reason, login, mediawiki_api_url=None, user_agent=None): mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url user_agent = config['USER_AGENT_DEFAULT'] if user_agent is None else user_agent params = { 'action': 'delete', 'title': 'Item:' + item, 'reason': reason, 'token': login.get_edit_token(), 'format': 'json' } headers = { 'User-Agent': user_agent } r = requests.post(url=mediawiki_api_url, data=params, cookies=login.get_edit_cookie(), headers=headers) print(r.json()) @staticmethod def delete_statement(statement_id, revision, login, mediawiki_api_url='https://www.wikidata.org/w/api.php', user_agent=config['USER_AGENT_DEFAULT']): params = { 'action': 'wbremoveclaims', 'claim': statement_id, 'token': login.get_edit_token(), 'baserevid': revision, 'bot': True, 'format': 'json' } headers = { 'User-Agent': user_agent } r = requests.post(url=mediawiki_api_url, data=params, cookies=login.get_edit_cookie(), headers=headers) print(r.json()) @staticmethod
MIT License
probml/pyprobml
scripts/hmm_discrete_em_lib.py
init_random_params_numpy
python
def init_random_params_numpy(sizes, random_state): num_hidden, num_obs = sizes np.random.seed(random_state) return hmm.HMMNumpy(softmax(np.random.randn(num_hidden, num_hidden), axis=1), softmax(np.random.randn(num_hidden, num_obs), axis=1), softmax(np.random.randn(num_hidden)))
Initializes the components of HMM from normal distibution Parameters ---------- sizes: List Consists of the number of hidden states and observable events, respectively random_state : int Seed value Returns ------- * HMMNumpy Hidden Markov Model
https://github.com/probml/pyprobml/blob/9d5a94449ee76c0ca37ca953c502864315ae7724/scripts/hmm_discrete_em_lib.py#L30-L52
import superimport from scipy.special import softmax import numpy as np import jax import jax.numpy as jnp from jax import vmap, jit from jax.ops import index_update, index from jax.random import PRNGKey import hmm_discrete_lib as hmm from dataclasses import dataclass @dataclass class PriorsNumpy: trans_pseudo_counts: np.array obs_pseudo_counts: np.array init_pseudo_counts: np.array @dataclass class PriorsJax: trans_pseudo_counts: jnp.array obs_pseudo_counts: jnp.array init_pseudo_counts: jnp.array
MIT License
qfgaohao/pytorch-ssd
vision/utils/model_book.py
ModelBook.num_of_conv2d_filters
python
def num_of_conv2d_filters(self): num_filters = 0 for _, m in self.conv2d_modules(): num_filters += m.out_channels return num_filters
Return the sum of out_channels of all conv2d layers. Here we treat the sub weight with size of [in_channels, h, w] as a single filter.
https://github.com/qfgaohao/pytorch-ssd/blob/f61ab424d09bf3d4bb3925693579ac0a92541b0d/vision/utils/model_book.py#L45-L53
from collections import OrderedDict import torch.nn as nn class ModelBook: def __init__(self, model): self._model = model self._modules = OrderedDict() self._paths = OrderedDict() path = [] self._construct(self._model, path) def _construct(self, module, path): if not module._modules: return for name, m in module._modules.items(): cur_path = tuple(path + [name]) self._paths[m] = cur_path self._modules[cur_path] = m self._construct(m, path + [name]) def conv2d_modules(self): return self.modules(nn.Conv2d) def linear_modules(self): return self.modules(nn.Linear) def modules(self, module_type=None): for p, m in self._modules.items(): if not module_type or isinstance(m, module_type): yield p, m def num_of_conv2d_modules(self): return self.num_of_modules(nn.Conv2d)
MIT License
bokulich-lab/rescript
rescript/trim_alignment.py
_locate_primer_positions
python
def _locate_primer_positions( alignment_with_primers: AlignedDNAFASTAFormat) -> dict: primers_aligned = dict() for aln_seq in alignment_with_primers.view(DNAIterator): if aln_seq.metadata["id"] in ["forward", "reverse"]: primers_aligned[aln_seq.metadata["id"]] = (str(aln_seq)) primer_positions = dict() for primer_id, primer_seq in primers_aligned.items(): primer_positions[primer_id] = { 'start': next( (i for i, nt in enumerate(primer_seq) if nt != "-")), 'end': len(primer_seq) - next( (i for i, nt in enumerate(primer_seq[::-1]) if nt != "-")) } pos_start, pos_end = _find_terminal_positions(primer_positions) return {"start": pos_start, "end": pos_end}
Identify position of each primer within the alignment. Arguments: alignment_with_primers (AlignedDNAFASTAFormat): sequence alignment containing at least one aligned primer Returns: (dict): dictionary containing trimming positions using 0-based indexing
https://github.com/bokulich-lab/rescript/blob/b1549a12f11ae4c42a905af8502fcc5f146a3948/rescript/trim_alignment.py#L83-L117
from typing import Union import qiime2 from q2_types.feature_data import ( AlignedDNAFASTAFormat, DNAFASTAFormat, DNAIterator, AlignedDNAIterator) from skbio import DNA def _trim_sequence(sequence: DNA, position_start: int, position_end: int) -> DNA: return DNA(sequence[position_start:position_end]) def _trim_all_sequences(aligned_sequences: AlignedDNAFASTAFormat, trim_positions: dict) -> AlignedDNAFASTAFormat: result = AlignedDNAFASTAFormat() with result.open() as out_fasta: for seq in aligned_sequences.view(AlignedDNAIterator): seq_trimmed = _trim_sequence( seq, trim_positions["start"], trim_positions["end"]) seq_trimmed.write(out_fasta) return result def _find_terminal_positions(primer_positions: dict) -> (int, int): pos_start, pos_end = None, None if len(primer_positions) == 2: if primer_positions["reverse"]["start"] < primer_positions["forward"]["end"]: raise ValueError("Reverse primer overlaps or aligned upstream the " "forward primer. Are the primers correct?") pos_start = min([x["start"] for x in primer_positions.values()]) pos_end = max([x["end"] for x in primer_positions.values()]) elif "forward" in primer_positions.keys(): pos_start = primer_positions["forward"]["start"] elif "reverse" in primer_positions.keys(): pos_end = primer_positions["reverse"]["end"] return pos_start, pos_end
BSD 3-Clause New or Revised License
yadage/yadage
yadage/controllers.py
PersistentController.sync_backend
python
def sync_backend(self): log.debug("transaction to sync but (without sync in tx)") with self.transaction(sync=False): super(PersistentController, self).sync_backend()
synchronize node data with backend
https://github.com/yadage/yadage/blob/cbb26515f02265800b4f5156e6c099211121d6a8/yadage/controllers.py#L142-L148
import contextlib import importlib import logging import os from adage.wflowcontroller import BaseController from packtivity.syncbackends import defaultsyncbackend from .reset import collective_downstream, remove_rules, reset_steps, undo_rules from .wflow import YadageWorkflow from .handlers.utils import handler_decorator log = logging.getLogger(__name__) ctrlhandlers, controller = handler_decorator() class YadageController(BaseController): def __init__(self, *args, **kwargs): self.prepublishing_backend = defaultsyncbackend() self.disable_backend = False self.disable_prepublishing = kwargs.pop("disable_prepub", False) super(YadageController, self).__init__(*args, **kwargs) def sync_expected(self): for n in self.adageobj.dag.nodes(): if ( "YADAGE_IGNORE_PREPUBLISHING" in os.environ or self.disable_prepublishing ): continue node = self.adageobj.dag.getNode(n) node.expected_result = self.prepublishing_backend.prepublish( node.task.spec, node.task.parameters.json(), node.task.state ) def sync_backend(self): self.sync_expected() if not self.disable_backend: super(YadageController, self).sync_backend() @controller("frommodel") def frommodel_controller(ctrlstring, ctrlopts, model=None): if isinstance(model, YadageWorkflow): return YadageController(model, **ctrlopts) else: return PersistentController(model, **ctrlopts) @controller("http") def http_controller(ctrlstring, ctrlopts, model=None): try: from yadagehttpctrl.clientcontroller import YadageHTTPController ctrl = YadageHTTPController(server=ctrlstring, **ctrlopts) return ctrl except ImportError: log.exception("try installing yadagehttpctrl") @controller("py:") def frompython_controller(ctrlstring, ctrlopts, model=None): _, module, ctrlclass = ctrlstring.split(":") module = importlib.import_module(module) ctrlclass = getattr(module, ctrlclass) if ctrlopts.pop("pass_model", False): ctrlopts["model"] = model return ctrlclass(**ctrlopts) def setup_controller(model=None, controller="frommodel", ctrlopts=None): ctrlopts = ctrlopts or {} for k in ctrlhandlers.keys(): if controller.startswith(k): return ctrlhandlers[k](controller, ctrlopts, model) raise RuntimeError("unknown controller type %s" % controller) class PersistentController(YadageController): def __init__(self, model, backend=None): self.model = model super(PersistentController, self).__init__(self.model.load(), backend) @contextlib.contextmanager def transaction(self, sync=True): self.adageobj = self.model.load() if sync: log.debug("syncing to setup tx %s", self) super(PersistentController, self).sync_backend() yield isvalid = self.validate() if not isvalid: log.warning("commit is invalid %s", isvalid) if sync: log.debug("syncing to teardown tx %s", self) super(PersistentController, self).sync_backend() self.model.commit(self.adageobj) def submit_nodes(self, nodeids): log.debug("transaction to submit") with self.transaction(): nodes = [self.adageobj.dag.getNode(nodeid) for nodeid in nodeids] super(PersistentController, self).submit_nodes(nodes) def apply_rules(self, ruleids): log.debug("transaction to apply") with self.transaction(): rules = [r for r in self.adageobj.rules if r.identifier in ruleids] super(PersistentController, self).apply_rules(rules)
MIT License
xarray-contrib/xpublish
xpublish/utils/api.py
normalize_datasets
python
def normalize_datasets(datasets) -> Dict[str, xr.Dataset]: error_msg = 'Can only publish a xarray.Dataset object or a mapping of Dataset objects' if isinstance(datasets, xr.Dataset): return {} elif isinstance(datasets, Mapping): if not all([isinstance(obj, xr.Dataset) for obj in datasets.values()]): raise TypeError(error_msg) return {str(k): ds.assign_attrs({DATASET_ID_ATTR_KEY: k}) for k, ds in datasets.items()} else: raise TypeError(error_msg)
Normalize the given collection of datasets. - raise TypeError if objects other than xarray.Dataset are found - return an empty dictionary in the special case where a single dataset is given - convert all keys (dataset ids) to strings - add dataset ids to their corresponding dataset object as global attribute (so that it can be easily retrieved within path operation functions).
https://github.com/xarray-contrib/xpublish/blob/b3c20ba6121355a3bebf0a961f2e3d587a368317/xpublish/utils/api.py#L11-L30
from collections.abc import Mapping from typing import Dict, List, Tuple import xarray as xr from fastapi import APIRouter from fastapi.openapi.utils import get_openapi DATASET_ID_ATTR_KEY = '_xpublish_id'
MIT License
atomlinter/linter-pylama
bin/deps/pylint/checkers/utils.py
has_known_bases
python
def has_known_bases(klass, context=None): try: return klass._all_bases_known except AttributeError: pass for base in klass.bases: result = safe_infer(base, context=context) if (not isinstance(result, astroid.ClassDef) or result is klass or not has_known_bases(result, context=context)): klass._all_bases_known = False return False klass._all_bases_known = True return True
Return true if all base classes of a class could be inferred.
https://github.com/atomlinter/linter-pylama/blob/9157f7f84083007161814c93b537a712984f3c86/bin/deps/pylint/checkers/utils.py#L834-L849
import collections import functools try: from functools import singledispatch as singledispatch except ImportError: from singledispatch import singledispatch as singledispatch try: from functools import lru_cache except ImportError: from backports.functools_lru_cache import lru_cache import itertools import re import sys import string import warnings import six from six.moves import map, builtins import astroid from astroid import bases as _bases from astroid import scoped_nodes BUILTINS_NAME = builtins.__name__ COMP_NODE_TYPES = (astroid.ListComp, astroid.SetComp, astroid.DictComp, astroid.GeneratorExp) PY3K = sys.version_info[0] == 3 if not PY3K: EXCEPTIONS_MODULE = "exceptions" else: EXCEPTIONS_MODULE = "builtins" ABC_METHODS = set(('abc.abstractproperty', 'abc.abstractmethod', 'abc.abstractclassmethod', 'abc.abstractstaticmethod')) ITER_METHOD = '__iter__' NEXT_METHOD = 'next' if six.PY2 else '__next__' GETITEM_METHOD = '__getitem__' SETITEM_METHOD = '__setitem__' DELITEM_METHOD = '__delitem__' CONTAINS_METHOD = '__contains__' KEYS_METHOD = 'keys' _SPECIAL_METHODS_PARAMS = { None: ('__new__', '__init__', '__call__'), 0: ('__del__', '__repr__', '__str__', '__bytes__', '__hash__', '__bool__', '__dir__', '__len__', '__length_hint__', '__iter__', '__reversed__', '__neg__', '__pos__', '__abs__', '__invert__', '__complex__', '__int__', '__float__', '__neg__', '__pos__', '__abs__', '__complex__', '__int__', '__float__', '__index__', '__enter__', '__aenter__', '__getnewargs_ex__', '__getnewargs__', '__getstate__', '__reduce__', '__copy__', '__unicode__', '__nonzero__', '__await__', '__aiter__', '__anext__', '__fspath__'), 1: ('__format__', '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', '__getattr__', '__getattribute__', '__delattr__', '__delete__', '__instancecheck__', '__subclasscheck__', '__getitem__', '__missing__', '__delitem__', '__contains__', '__add__', '__sub__', '__mul__', '__truediv__', '__floordiv__', '__mod__', '__divmod__', '__lshift__', '__rshift__', '__and__', '__xor__', '__or__', '__radd__', '__rsub__', '__rmul__', '__rtruediv__', '__rmod__', '__rdivmod__', '__rpow__', '__rlshift__', '__rrshift__', '__rand__', '__rxor__', '__ror__', '__iadd__', '__isub__', '__imul__', '__itruediv__', '__ifloordiv__', '__imod__', '__ilshift__', '__irshift__', '__iand__', '__ixor__', '__ior__', '__ipow__', '__setstate__', '__reduce_ex__', '__deepcopy__', '__cmp__', '__matmul__', '__rmatmul__', '__div__'), 2: ('__setattr__', '__get__', '__set__', '__setitem__', '__set_name__'), 3: ('__exit__', '__aexit__'), (0, 1): ('__round__', ), } SPECIAL_METHODS_PARAMS = { name: params for params, methods in _SPECIAL_METHODS_PARAMS.items() for name in methods } PYMETHODS = set(SPECIAL_METHODS_PARAMS) class NoSuchArgumentError(Exception): pass def is_inside_except(node): current = node while current and not isinstance(current.parent, astroid.ExceptHandler): current = current.parent return current and current is current.parent.name def get_all_elements(node): if isinstance(node, (astroid.Tuple, astroid.List)): for child in node.elts: for e in get_all_elements(child): yield e else: yield node def clobber_in_except(node): if isinstance(node, astroid.AssignAttr): return (True, (node.attrname, 'object %r' % (node.expr.as_string(),))) elif isinstance(node, astroid.AssignName): name = node.name if is_builtin(name): return (True, (name, 'builtins')) else: stmts = node.lookup(name)[1] if (stmts and not isinstance(stmts[0].assign_type(), (astroid.Assign, astroid.AugAssign, astroid.ExceptHandler))): return (True, (name, 'outer scope (line %s)' % stmts[0].fromlineno)) return (False, None) def is_super(node): if getattr(node, 'name', None) == 'super' and node.root().name == BUILTINS_NAME: return True return False def is_error(node): for child_node in node.get_children(): if isinstance(child_node, astroid.Raise): return True return False def is_raising(body): for node in body: if isinstance(node, astroid.Raise): return True return False builtins = builtins.__dict__.copy() SPECIAL_BUILTINS = ('__builtins__',) def is_builtin_object(node): return node and node.root().name == BUILTINS_NAME def is_builtin(name): return name in builtins or name in SPECIAL_BUILTINS def is_defined_before(var_node): varname = var_node.name _node = var_node.parent while _node: if isinstance(_node, COMP_NODE_TYPES): for ass_node in _node.nodes_of_class(astroid.AssignName): if ass_node.name == varname: return True elif isinstance(_node, astroid.For): for ass_node in _node.target.nodes_of_class(astroid.AssignName): if ass_node.name == varname: return True elif isinstance(_node, astroid.With): for expr, ids in _node.items: if expr.parent_of(var_node): break if (ids and isinstance(ids, astroid.AssignName) and ids.name == varname): return True elif isinstance(_node, (astroid.Lambda, astroid.FunctionDef)): if _node.args.is_argument(varname): if _node.args.parent_of(var_node): try: _node.args.default_value(varname) _node = _node.parent continue except astroid.NoDefault: pass return True if getattr(_node, 'name', None) == varname: return True break elif isinstance(_node, astroid.ExceptHandler): if isinstance(_node.name, astroid.AssignName): ass_node = _node.name if ass_node.name == varname: return True _node = _node.parent stmt = var_node.statement() _node = stmt.previous_sibling() lineno = stmt.fromlineno while _node and _node.fromlineno == lineno: for ass_node in _node.nodes_of_class(astroid.AssignName): if ass_node.name == varname: return True for imp_node in _node.nodes_of_class((astroid.ImportFrom, astroid.Import)): if varname in [name[1] or name[0] for name in imp_node.names]: return True _node = _node.previous_sibling() return False def is_func_default(node): parent = node.scope() if isinstance(parent, astroid.FunctionDef): for default_node in parent.args.defaults: for default_name_node in default_node.nodes_of_class(astroid.Name): if default_name_node is node: return True return False def is_func_decorator(node): parent = node.parent while parent is not None: if isinstance(parent, astroid.Decorators): return True if (parent.is_statement or isinstance(parent, (astroid.Lambda, scoped_nodes.ComprehensionScope, scoped_nodes.ListComp))): break parent = parent.parent return False def is_ancestor_name(frame, node): try: bases = frame.bases except AttributeError: return False for base in bases: if node in base.nodes_of_class(astroid.Name): return True return False def assign_parent(node): while node and isinstance(node, (astroid.AssignName, astroid.Tuple, astroid.List)): node = node.parent return node def overrides_a_method(class_node, name): for ancestor in class_node.ancestors(): if name in ancestor and isinstance(ancestor[name], astroid.FunctionDef): return True return False def check_messages(*messages): def store_messages(func): func.checks_msgs = messages return func return store_messages class IncompleteFormatString(Exception): pass class UnsupportedFormatCharacter(Exception): def __init__(self, index): Exception.__init__(self, index) self.index = index def parse_format_string(format_string): keys = set() num_args = 0 def next_char(i): i += 1 if i == len(format_string): raise IncompleteFormatString return (i, format_string[i]) i = 0 while i < len(format_string): char = format_string[i] if char == '%': i, char = next_char(i) key = None if char == '(': depth = 1 i, char = next_char(i) key_start = i while depth != 0: if char == '(': depth += 1 elif char == ')': depth -= 1 i, char = next_char(i) key_end = i - 1 key = format_string[key_start:key_end] while char in '#0- +': i, char = next_char(i) if char == '*': num_args += 1 i, char = next_char(i) else: while char in string.digits: i, char = next_char(i) if char == '.': i, char = next_char(i) if char == '*': num_args += 1 i, char = next_char(i) else: while char in string.digits: i, char = next_char(i) if char in 'hlL': i, char = next_char(i) if PY3K: flags = 'diouxXeEfFgGcrs%a' else: flags = 'diouxXeEfFgGcrs%' if char not in flags: raise UnsupportedFormatCharacter(i) if key: keys.add(key) elif char != '%': num_args += 1 i += 1 return keys, num_args def is_attr_protected(attrname): return attrname[0] == '_' and attrname != '_' and not ( attrname.startswith('__') and attrname.endswith('__')) def node_frame_class(node): klass = node.frame() while klass is not None and not isinstance(klass, astroid.ClassDef): if klass.parent is None: klass = None else: klass = klass.parent.frame() return klass def is_attr_private(attrname): regex = re.compile('^_{2,}.*[^_]+_?$') return regex.match(attrname) def get_argument_from_call(call_node, position=None, keyword=None): if position is None and keyword is None: raise ValueError('Must specify at least one of: position or keyword.') if position is not None: try: return call_node.args[position] except IndexError: pass if keyword and call_node.keywords: for arg in call_node.keywords: if arg.arg == keyword: return arg.value raise NoSuchArgumentError def inherit_from_std_ex(node): if node.name in ('Exception', 'BaseException') and node.root().name == EXCEPTIONS_MODULE: return True if not hasattr(node, 'ancestors'): return False return any(inherit_from_std_ex(parent) for parent in node.ancestors(recurs=True)) def error_of_type(handler, error_type): def stringify_error(error): if not isinstance(error, six.string_types): return error.__name__ return error if not isinstance(error_type, tuple): error_type = (error_type, ) expected_errors = {stringify_error(error) for error in error_type} if not handler.type: return False return handler.catch(expected_errors) def decorated_with_property(node): if not node.decorators: return False for decorator in node.decorators.nodes: if not isinstance(decorator, astroid.Name): continue try: if _is_property_decorator(decorator): return True except astroid.InferenceError: pass return False def _is_property_decorator(decorator): for infered in decorator.infer(): if isinstance(infered, astroid.ClassDef): if infered.root().name == BUILTINS_NAME and infered.name == 'property': return True for ancestor in infered.ancestors(): if ancestor.name == 'property' and ancestor.root().name == BUILTINS_NAME: return True return None def decorated_with(func, qnames): decorators = func.decorators.nodes if func.decorators else [] for decorator_node in decorators: try: if any(i is not None and i.qname() in qnames for i in decorator_node.infer()): return True except astroid.InferenceError: continue return False @lru_cache(maxsize=1024) def unimplemented_abstract_methods(node, is_abstract_cb=None): if is_abstract_cb is None: is_abstract_cb = functools.partial( decorated_with, qnames=ABC_METHODS) visited = {} try: mro = reversed(node.mro()) except NotImplementedError: return {} except astroid.ResolveError: return {} for ancestor in mro: for obj in ancestor.values(): infered = obj if isinstance(obj, astroid.AssignName): infered = safe_infer(obj) if not infered: if obj.name in visited: del visited[obj.name] continue if not isinstance(infered, astroid.FunctionDef): if obj.name in visited: del visited[obj.name] if isinstance(infered, astroid.FunctionDef): abstract = is_abstract_cb(infered) if abstract: visited[obj.name] = infered elif not abstract and obj.name in visited: del visited[obj.name] return visited def _import_node_context(node): current = node ignores = (astroid.ExceptHandler, astroid.TryExcept) while current and not isinstance(current.parent, ignores): current = current.parent if current and isinstance(current.parent, ignores): return current.parent return None def is_from_fallback_block(node): context = _import_node_context(node) if not context: return False if isinstance(context, astroid.ExceptHandler): other_body = context.parent.body handlers = context.parent.handlers else: other_body = itertools.chain.from_iterable( handler.body for handler in context.handlers) handlers = context.handlers has_fallback_imports = any(isinstance(import_node, (astroid.ImportFrom, astroid.Import)) for import_node in other_body) ignores_import_error = _except_handlers_ignores_exception(handlers, ImportError) return ignores_import_error or has_fallback_imports def _except_handlers_ignores_exception(handlers, exception): func = functools.partial(error_of_type, error_type=(exception, )) return any(map(func, handlers)) def get_exception_handlers(node, exception): context = _import_node_context(node) if isinstance(context, astroid.TryExcept): return (_handler for _handler in context.handlers if error_of_type(_handler, exception)) return None def is_node_inside_try_except(node): context = _import_node_context(node) return isinstance(context, astroid.TryExcept) def node_ignores_exception(node, exception): managing_handlers = get_exception_handlers(node, exception) if not managing_handlers: return False return any(managing_handlers) def class_is_abstract(node): for method in node.methods(): if method.parent.frame() is node: if method.is_abstract(pass_is_abstract=False): return True return False def _supports_protocol_method(value, attr): try: attributes = value.getattr(attr) except astroid.NotFoundError: return False first = attributes[0] if isinstance(first, astroid.AssignName): if isinstance(first.parent.value, astroid.Const): return False return True def is_comprehension(node): comprehensions = (astroid.ListComp, astroid.SetComp, astroid.DictComp, astroid.GeneratorExp) return isinstance(node, comprehensions) def _supports_mapping_protocol(value): return ( _supports_protocol_method(value, GETITEM_METHOD) and _supports_protocol_method(value, KEYS_METHOD) ) def _supports_membership_test_protocol(value): return _supports_protocol_method(value, CONTAINS_METHOD) def _supports_iteration_protocol(value): return ( _supports_protocol_method(value, ITER_METHOD) or _supports_protocol_method(value, GETITEM_METHOD) ) def _supports_getitem_protocol(value): return _supports_protocol_method(value, GETITEM_METHOD) def _supports_setitem_protocol(value): return _supports_protocol_method(value, SETITEM_METHOD) def _supports_delitem_protocol(value): return _supports_protocol_method(value, DELITEM_METHOD) def _is_abstract_class_name(name): lname = name.lower() is_mixin = lname.endswith('mixin') is_abstract = lname.startswith('abstract') is_base = lname.startswith('base') or lname.endswith('base') return is_mixin or is_abstract or is_base def is_inside_abstract_class(node): while node is not None: if isinstance(node, astroid.ClassDef): if class_is_abstract(node): return True name = getattr(node, 'name', None) if name is not None and _is_abstract_class_name(name): return True node = node.parent return False def _supports_protocol(value, protocol_callback): if isinstance(value, astroid.ClassDef): if not has_known_bases(value): return True meta = value.metaclass() if meta is not None: if protocol_callback(meta): return True if isinstance(value, astroid.BaseInstance): if not has_known_bases(value): return True if value.has_dynamic_getattr(): return True if protocol_callback(value): return True if (isinstance(value, _bases.Proxy) and isinstance(value._proxied, astroid.BaseInstance) and has_known_bases(value._proxied)): value = value._proxied return protocol_callback(value) return False def is_iterable(value): return _supports_protocol(value, _supports_iteration_protocol) def is_mapping(value): return _supports_protocol(value, _supports_mapping_protocol) def supports_membership_test(value): supported = _supports_protocol(value, _supports_membership_test_protocol) return supported or is_iterable(value) def supports_getitem(value): return _supports_protocol(value, _supports_getitem_protocol) def supports_setitem(value): return _supports_protocol(value, _supports_setitem_protocol) def supports_delitem(value): return _supports_protocol(value, _supports_delitem_protocol) @lru_cache(maxsize=1024) def safe_infer(node, context=None): try: inferit = node.infer(context=context) value = next(inferit) except astroid.InferenceError: return None try: next(inferit) return None except astroid.InferenceError: return None except StopIteration: return value
MIT License
microsoft/pointersql
model/rnn.py
bidirectional_static_rnn
python
def bidirectional_static_rnn(fw_cell, bw_cell, inputs, masks, init_fw_state=None, keep_all_states=False, dtype=tf.float32, scope=None): with tf.variable_scope(scope or "bidirectional_rnn_encoder"): with tf.variable_scope("fw") as fw_scope: fw_outs, fw_state = static_rnn(fw_cell, inputs, init_state=init_fw_state, dtype=dtype, scope=fw_scope, keep_all_states=keep_all_states) with tf.variable_scope("bw") as bw_scope: reversed_inputs = inputs[::-1] reversed_masks = masks[::-1] init_bw_state = fw_state if not keep_all_states else fw_state[-1] bw_outs, bw_state = static_rnn(bw_cell, reversed_inputs, init_state=init_bw_state, dtype=dtype, scope=bw_scope, keep_all_states=keep_all_states) reversed_bw_outs = bw_outs[::-1] outs = tf.concat([fw_outs, reversed_bw_outs], -1) return outs, fw_state, bw_state
The bidirectional rnn Args: fw_cell: a RNN cell bw_cell: another RNN cell for the backward pass inputs: inputs to the rnn, of shape [(batch_size, hidden_size)], length equals to rnn length init_state: the inital state of the RNN, if not specified, the init_state would be all zero, of shape cell.state.get_shape() Returns: outs: the list of output emits from the network, of shape [(batch_size, hidden_size * 2)], length equals to rnn length fw_state: the last state of the fw rnn bw_state: the last state of the backward rnn
https://github.com/microsoft/pointersql/blob/3688dab3ee67b005e858722cd33844c3d0bd6292/model/rnn.py#L127-L162
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np from tensorflow.python.ops import rnn_cell_impl from pprint import pprint def static_rnn(cell, inputs, init_state=None, dtype=tf.float32, keep_all_states=False, scope=None): with tf.variable_scope(scope or "static_rnn") as scope: batch_size = tf.shape(inputs[0])[0] if init_state is None: init_state = cell.zero_state(batch_size, dtype) step_num = len(inputs) outs = [] if keep_all_states: all_states = [] state = init_state for i in range(step_num): (output, state) = cell(inputs[i], state) outs.append(output) if keep_all_states: all_states.append(state) if keep_all_states: return outs, all_states else: return outs, state def dynamic_rnn(cell, inputs, masks, init_state=None, keep_all_states=False, dtype=tf.float32, scope=None): with tf.variable_scope(scope or "dynamic_rnn") as scope: batch_size = tf.shape(inputs[0])[0] if init_state is None: init_state = cell.zero_state(batch_size, dtype) step_num = len(inputs) outs = [] if keep_all_states: all_states = [] state = init_state for i in range(step_num): (output, new_state) = cell(inputs[i], state) if isinstance(cell, tf.contrib.rnn.LSTMCell): state = f_apply_lstm_state(new_state, state, lambda s1, s2: tf.where(masks[i], s1, s2)) elif isinstance(cell, tf.contrib.rnn.MultiRNNCell): state = f_apply_multirnn_lstm_state(new_state, state, lambda s1, s2: tf.where(masks[i], s1, s2)) output = tf.where(masks[i], output, tf.zeros(tf.shape(output), dtype)) outs.append(output) if keep_all_states: all_states.append(state) if keep_all_states: return outs, all_states else: return outs, state def bidirectional_dynamic_rnn(fw_cell, bw_cell, inputs, masks, init_fw_state=None, keep_all_states=False, dtype=tf.float32, scope=None): with tf.variable_scope(scope or "bidirectional_rnn_encoder"): with tf.variable_scope("fw") as fw_scope: fw_outs, fw_state = dynamic_rnn(fw_cell, inputs, masks, init_state=init_fw_state, keep_all_states=keep_all_states, dtype=dtype, scope=fw_scope) with tf.variable_scope("bw") as bw_scope: reversed_inputs = inputs[::-1] reversed_masks = masks[::-1] init_bw_state = fw_state if not keep_all_states else fw_state[-1] bw_outs, bw_state = dynamic_rnn(bw_cell, reversed_inputs, reversed_masks, init_state=init_bw_state, keep_all_states=keep_all_states, dtype=dtype, scope=bw_scope) reversed_bw_outs = bw_outs[::-1] outs = tf.concat([fw_outs, reversed_bw_outs], -1) return outs, fw_state, bw_state
MIT License
tum-pbs/phiflow
phi/physics/_world.py
World.remove
python
def remove(self, obj): if inspect.isclass(obj): states = self.state.all_instances(obj) self.remove(states) elif isinstance(obj, (tuple, list)): for state in obj: self.remove(state) else: key = obj if isinstance(obj, str) else obj.name self.state = self.state.state_removed(key) self.physics.remove(key)
Remove a system or collection of systems from the world. Args: obj: one of the following: State, state name, subclass of State, tuple or list thereof Returns:
https://github.com/tum-pbs/phiflow/blob/4a85f8a5029aa4e30a791daa659f2c8e1536e37e/phi/physics/_world.py#L296-L315
import inspect import warnings from typing import TypeVar from phi import geom from phi.field import GeometryMask from phi.physics._effect import Gravity from ._physics import Physics, State, struct, _as_physics, Static class StateProxy(object): def __init__(self, enclosing_world, state_name): self.world = enclosing_world self.state_name = state_name @property def state(self): state = self.world.state[self.state_name] assert state is not None return state @state.setter def state(self, state): assert state.name == self.state_name self.world.state = self.world.state.state_replaced(state) @property def physics(self): physics = self.world.physics.for_(self.state) assert physics is not None return physics @physics.setter def physics(self, physics): assert isinstance(physics, Physics) self.world.physics.add(self.state_name, physics) def step(self, dt=1.0, physics=None): self.world.step(self, dt=dt, physics=physics) def __getattr__(self, item): assert item not in ('world', 'state_name', 'physics', 'state') return getattr(self.state, item) def __setattr__(self, key, value): if key in ('world', 'state_name', 'physics', 'state'): object.__setattr__(self, key, value) else: self.state = self.state.copied_with(**{key:value}) S = TypeVar('S', bound=State) class World(object): def __init__(self, batch_size=None, add_default_objects=True): self._state = self.physics = self.observers = self.batch_size = None self.reset(batch_size, add_default_objects) def reset(self, batch_size=None, add_default_objects=True): self._state = StateCollection() self.physics = self._state.default_physics() self.observers = set() self.batch_size = batch_size if add_default_objects: self.add(Gravity()) @property def state(self): return self._state @property def age(self): return self._state.age @state.setter def state(self, state): assert state is not None assert isinstance(state, StateCollection) self._state = state for observer in self.observers: observer(self) def step(self, state=None, dt=1.0, physics=None): if state is None: if physics is None: physics = self.physics self.state = physics.step(self._state, dt) return self.state else: if isinstance(state, StateProxy): state = state.state s = self.physics.substep(state, self._state, dt, override_physics=physics) self.state = self._state.state_replaced(s) return s def stepped(self, state=None, dt=1.0, physics=None): if state is None: if physics is None: physics = self.physics return physics.step(self._state, None, dt) else: if isinstance(state, StateProxy): state = state.state return self.physics.substep(state, self._state, dt, override_physics=physics) def add(self, state, physics=None): if isinstance(state, dict): raise ValueError('Cannot add dict to world. Maybe you meant world.add(**dict)?') if isinstance(state, (tuple, list)): assert isinstance(physics, (tuple, list)) assert len(state) == len(physics) return [self.add(s, p) for s, p in zip(state, physics)] else: if physics is not None: self.physics.add(state.name, physics) elif state.default_physics() is not None and not isinstance(state.default_physics(), Static): warnings.warn('No physics provided to world.add(%s). In the future this will default to static physics' % state) self.state = self.state.state_added(state) return StateProxy(self, state.name) def add_all(self, *states): warnings.warn('World.add_all() is deprecated. Use World.add(list_of_states) instead.', DeprecationWarning) for state in states: self.add(state)
MIT License
rpryzant/deconfounded_lexicon_induction
text-performance-attribution/src/analysis/evaluator.py
evaluate
python
def evaluate(config, dataset, predictions, model_dir, eval_variable_name, eval_level_name=''): print('EVALUATOR: evaluating words for %s, level %s' % ( eval_variable_name, eval_level_name)) pre_eval_split = dataset.current_split dataset.set_active_split(config.test_suffix) all_features = sorted( predictions.feature_importance.items(), key=lambda x: x[1])[::-1] top_features = all_features[:config.num_eval_features] top_features = [x[0] for x in top_features if x[0] in dataset.features] with open( os.path.join( model_dir, '%s|%s_top_words.txt' % (eval_variable_name, eval_level_name)), 'w') as f: f.write('\n'.join([ '%s\t%.4f' % (f, predictions.feature_importance[f]) for f in top_features ])) outcome = next( (variable for variable in config.data_spec[1:] if variable['name'] == eval_variable_name and eval_level_name)) confounds = [ variable for variable in config.data_spec[1:] if not variable['skip'] and variable['control'] ] text_xentropy, confound_xentropy, both_xentropy = run_model( config, dataset, top_features, eval_variable_name, eval_level_name, confounds) target_log_odds = [] with open( os.path.join( model_dir, '%s|%s_log_odds.txt' % (eval_variable_name, eval_level_name)), 'a') as debug_file: for f in top_features: result = stats.log_odds(f, outcome['name'], dataset, config) debug_file.write('%s\t%s\n' % (f, str(result))) target_log_odds.append(result[eval_level_name]) mean_target_log_odds = np.mean(target_log_odds) dataset.set_active_split(pre_eval_split) return { 'mu_target_log_odds': mean_target_log_odds, 'confound_correlations': -1, 'target_correlatoins': -1, 'mu_confound_corr': -1, 'mu_target_corr': -1, 'performance': -1, 'mu_reg_perf': text_xentropy, 'mu_fixed_perf': both_xentropy, 'mu_confound_perf': confound_xentropy }
Evaluates the predictions of a trained model. Evaluation consists of the following: (1) Harvest the best (and worst) words from the predictions. (2) Train regressions with (a) just these words, (b) these words and counfounds, and (c) just confounds. (3) Get the correlation between these words and each outcome variable we are "controlling for". (4) Get the performance of models from step (2) on each outcome variable we are *not* controlling for. Args: config: A config.yaml file which has been parsed into an object. dataset: A data.dataset.Dataset object. predictions: An instance of models.abstract_model.Prediction, which holds per-example predictions, as well as an "importance value" for each feature. model_dir: string, A path to the model's working directory. eval_variable_name: string, the name of the variable we are evaluating. eval_level_name: string, the name of the categorical level we are evaluating "against". This is optional. If not provided then the system will assume that `eval_variable_name` corresponds to a continuous variable. Returns: results: A dictionary that maps metric names to their values.
https://github.com/rpryzant/deconfounded_lexicon_induction/blob/47d22ca7743f798a4169c6c949f90abddce5a0d0/text-performance-attribution/src/analysis/evaluator.py#L75-L179
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from sklearn import linear_model from sklearn import model_selection import sklearn.metrics import sys; sys.path.append('../..') import src.analysis.stats as stats def run_model(config, dataset, features, eval_variable_name, eval_level_name, confounds): m = linear_model.LogisticRegression() dataset.set_active_split(config.train_suffix) kfold = model_selection.KFold(n_splits=10, random_state=7) def scorer(estimator, x, y): yhats = estimator.predict_proba(x) return sklearn.metrics.log_loss(y, yhats, labels=[0, 1]) eval_level_index = dataset.class_to_id_map[eval_variable_name][ eval_level_name] y = np.squeeze(dataset.np_data[config.train_suffix][eval_variable_name] [:, eval_level_index].toarray()) retain_indices = [dataset.features[f] for f in features] x_text = dataset.np_data[config.train_suffix]['text-input'].toarray() x_text = x_text[:, retain_indices] x_confound = None for variable in confounds: variable_data = dataset.np_data[config.train_suffix][variable[ 'name']].toarray() if x_confound is None: x_confound = variable_data else: x_confound = np.column_stack([x_confound, variable_data]) x_both = np.column_stack([x_text, x_confound]) print('EVALUATOR: running linear model with selected features.') text_xentropy = model_selection.cross_val_score( m, x_text, y, cv=kfold, scoring=scorer).mean() print('EVALUATOR: running confound model.') confound_xentropy = model_selection.cross_val_score( m, x_confound, y, cv=kfold, scoring=scorer).mean() print('EVALUATOR: running fixed model with selected features.') both_xentropy = model_selection.cross_val_score( m, x_both, y, cv=kfold, scoring=scorer).mean() return text_xentropy, confound_xentropy, both_xentropy
MIT License
nbottenus/refocus
Python/kSpaceSimulations/KSpaceFunctions.py
kspace2wavefield
python
def kspace2wavefield(kx, kz, kspace, c, t): Kx, Kz, T = np.meshgrid(kx, kz, t); delayFactors = np.exp(-1j*2*np.pi*c*np.sqrt(Kx**2+Kz**2)*T); psf_kx_kz_t = np.tile(kspace[:,:,np.newaxis], (1, 1, t.size)) * delayFactors; nx = kx.size; nz = kz.size; psf_t = np.zeros((nz, nx, t.size), dtype=np.complex); for t_idx in np.arange(t.size): psf_t[:,:,t_idx] = np.fft.ifft2(np.fft.ifftshift(psf_kx_kz_t[:,:,t_idx], axes=(0,1))); dx = 1/(2*np.max(np.abs(kx))); dz = 1/(2*np.max(np.abs(kz))); x = dx*np.arange((-(nx-1)/2),((nx-1)/2)+1); z = dz*np.arange(nz); return x, z, psf_t;
x, z, psf_t = kspace2wavefield(kx, kz, kspace, c, t) KSPACE2WAVEFIELD - Convert K-Space Representation to Physical Wavefield This function k-space for the transmitted wavefield (lateral and axial wavenumbers k_x and k_z) into physical wavefield (as a function of lateral and axial grid positions x and z). INPUTS: kx - 1 x M array of lateral spatial frequencies [1/m] kz - 1 x P array of axial spatial frequencies [1/m] kspace - P x M array of k-space for transmit wavefield c - speed of sounds [m/s] (default 1540 m/s) t - 1 x T array of times [s] OUTPUTS: x - 1 x M array of lateral positions on computational grid [m] z - 1 x P array of axial positions on computational grid [m] psf_t - P x M x T array of k-space for transmit wavefield
https://github.com/nbottenus/refocus/blob/7c00cb2a32ed5e71e7695a5c083333626cd4337c/Python/kSpaceSimulations/KSpaceFunctions.py#L85-L119
import numpy as np import pdb def pwResp(x, elemSpace, apod, delay, P_f, f, c): Nelem = apod.size; xpos = elemSpace*np.arange(-(Nelem-1)/2, 1+(Nelem-1)/2); apod_x = np.interp(x, xpos, apod, left=0, right=0); delayIdeal = np.interp(x, xpos, delay, left=0, right=0); apod_x_cmpx = np.dot(np.diag(apod_x), np.exp(-1j*2*np.pi*np.outer(delayIdeal,f))); apodPulse = apod_x_cmpx * np.tile(P_f/(4*np.pi*f/c+np.finfo(np.float32).eps), (apod_x.size, 1)); tx_pwResp = np.fft.fftshift(np.fft.fft(apodPulse, axis=0), axes=0); dx = np.mean(np.diff(x)); nxFFT = x.size; kx = np.mod(np.fft.fftshift(np.arange(nxFFT)/(dx*nxFFT))+1/(2*dx), 1/dx)-1/(2*dx); return kx, tx_pwResp; def pwResp2kSpace(kx, f, tx_pwResp, z, c): dz = np.mean(np.diff(z)); nzFFT = z.size; nxFFT = kx.size; kz = np.mod(np.fft.fftshift(np.arange(nzFFT)/(dz*nzFFT))+1/(2*dz), 1/dz)-1/(2*dz); apodPulse_kx_kz = np.zeros((nzFFT, nxFFT), dtype=np.complex); for i in np.arange(kx.size): apodPulse_kx_kz[:,i] = np.interp(c*np.sqrt(kx[i]**2+kz**2), np.abs(f), tx_pwResp[i,:], left=0, right=0); Kx, Kz = np.meshgrid(kx, kz); tx_kspace = apodPulse_kx_kz * np.abs(np.cos(np.arctan2(Kx, Kz))); return kz, tx_kspace;
MIT License
dojot/device-manager
DeviceManager/utils.py
format_response
python
def format_response(status, message=None): if message: payload = {'message': message, 'status': status} elif 200 <= status < 300: payload = {'message': 'ok', 'status': status} else: payload = {'message': 'Request failed', 'status': status} return make_response(jsonify(payload), status)
Utility helper to generate default status responses
https://github.com/dojot/device-manager/blob/31b630fe0969f6666f07db59a489772d7f0639d6/DeviceManager/utils.py#L14-L23
import base64 import json import random from flask import make_response, jsonify from Crypto.Cipher import AES from DeviceManager.conf import CONFIG BS = AES.block_size pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS) unpad = lambda s : s[:-ord(s[len(s)-1:])]
Apache License 2.0
marcharper/stationary
stationary/processes/incentive_process.py
multivariate_transitions_gen
python
def multivariate_transitions_gen(N, incentive, num_types=3, mu=0.001, no_boundary=False): d = num_types - 1 one_step_indicies = list(one_step_indicies_generator(d)) if no_boundary: lower, upper = 1, N-1 else: lower, upper = 0, N for state in simplex_generator(N, d): if no_boundary: is_boundary = False for i in state: if i == 0: is_boundary = True break if is_boundary: continue s = 0. inc = incentive(state) denom = float(sum(inc)) for plus_index, minus_index in one_step_indicies: target_state = list(state) target_state[plus_index] += 1 target_state[minus_index] -= 1 target_state = tuple(target_state) if not is_valid_state(target_state, lower, upper): continue mutations = [mu / d] * num_types mutations[plus_index] = 1. - mu r = dot_product(inc, mutations) / denom transition = r * state[minus_index] / float(N) yield (state, target_state, transition) s += transition yield (state, state, 1. - s)
Computes transition probabilities the Incentive process (generator), Parameters ---------- N: int Population size / simplex divisor incentive: function An incentive function from incentives.py num_types: int, 3 Number of types in population mu: float, 0.001 The mutation rate of the process no_boundary: bool, False Exclude the boundary states
https://github.com/marcharper/stationary/blob/c62d7d4ca98d43b8aa4c1805fdc25fc1da0801fd/stationary/processes/incentive_process.py#L52-L107
from ..utils.math_helpers import ( simplex_generator, one_step_indicies_generator, logsumexp, log_factorial, log_inc_factorial, factorial, inc_factorial) from ..utils.edges import ( edge_func_to_edges, states_from_edges, power_transitions) from numpy import log, exp from .incentives import * def is_valid_state(state, lower, upper): for i in state: if i < lower or i > upper: return False return True def multivariate_transitions(N, incentive, num_types=3, mu=0.001, no_boundary=False): return list(multivariate_transitions_gen( N, incentive, num_types=num_types, mu=mu, no_boundary=no_boundary))
MIT License
leonhard-s/auraxium
auraxium/ps2/_character.py
Character.stat
python
async def stat(self, results: int = 1, **kwargs: Any) -> List[CensusData]: collection: Final[str] = 'characters_stat' query = Query(collection, service_id=self._client.service_id, **kwargs) query.add_term(field=self.id_field, value=self.id) query.limit(results) payload = await self._client.request(query) data = extract_payload(payload, collection) return data
Return global statistics for this character. Any keyword arguments passed are forwarded to :class:`auraxium.census.Query`. .. warning:: This method is part of a provisional API and may be removed or altered in upcoming versions.
https://github.com/leonhard-s/auraxium/blob/8a1b7fb6e6e1b11334d69875df032ccc6da330bf/auraxium/ps2/_character.py#L494-L511
import logging from typing import Any, ClassVar, Final, List, Optional, Tuple, Type, Union, cast from ..base import Named, NamedT from .._cache import TLRUCache from ..census import Query from ..errors import NotFoundError from ..models import (CharacterAchievement, CharacterData, CharacterDirective, CharacterDirectiveObjective, CharacterDirectiveTier, CharacterDirectiveTree, TitleData) from .._proxy import InstanceProxy, SequenceProxy from .._rest import RequestClient, extract_payload, extract_single from ..types import CensusData, LocaleData from .._support import deprecated from ._faction import Faction from ._item import Item from ._outfit import Outfit, OutfitMember from ._profile import Profile from ._world import World __all__ = [ 'Character', 'Title' ] log = logging.getLogger('auraxium.ps2') class Title(Named, cache_size=300, cache_ttu=300.0): collection = 'title' data: TitleData id_field = 'title_id' _model = TitleData id: int name: LocaleData class Character(Named, cache_size=256, cache_ttu=30.0): _cache: ClassVar[TLRUCache[Union[int, str], 'Character']] collection = 'character' data: CharacterData id_field = 'character_id' _model = CharacterData id: int faction_id: int head_id: int title_id: int times: CharacterData.Times certs: CharacterData.Certs battle_rank: CharacterData.BattleRank profile_id: int prestige_level: int async def achievements(self, **kwargs: Any) -> List[CharacterAchievement]: collection: Final[str] = 'characters_achievement' query = Query(collection, service_id=self._client.service_id, **kwargs) query.limit(5000) query.add_term(field=self.id_field, value=self.id) payload = await self._client.request(query) data = extract_payload(payload, collection) return [CharacterAchievement(**d) for d in data] async def currency(self) -> Tuple[int, int]: collection: Final[str] = 'characters_currency' query = Query(collection, service_id=self._client.service_id) query.add_term(field=self.id_field, value=self.id) payload = await self._client.request(query) data = extract_single(payload, collection) return int(str(data['quantity'])), int(str(data['prestige_currency'])) async def directive(self, results: int = 1, **kwargs: Any) -> List[CharacterDirective]: collection: Final[str] = 'characters_directive' query = Query(collection, service_id=self._client.service_id, **kwargs) query.add_term(field=self.id_field, value=self.id) query.limit(results) payload = await self._client.request(query) data = extract_payload(payload, collection) return [CharacterDirective(**d) for d in data] async def directive_objective(self, results: int = 1, **kwargs: Any ) -> List[CharacterDirectiveObjective]: collection: Final[str] = 'characters_directive_objective' query = Query(collection, service_id=self._client.service_id, **kwargs) query.add_term(field=self.id_field, value=self.id) query.limit(results) payload = await self._client.request(query) data = extract_payload(payload, collection) return [CharacterDirectiveObjective(**d) for d in data] async def directive_tier(self, results: int = 1, **kwargs: Any) -> List[CharacterDirectiveTier]: collection: Final[str] = 'characters_directive_tier' query = Query(collection, service_id=self._client.service_id, **kwargs) query.add_term(field=self.id_field, value=self.id) query.limit(results) payload = await self._client.request(query) data = extract_payload(payload, collection) return [CharacterDirectiveTier(**d) for d in data] async def directive_tree(self, results: int = 1, **kwargs: Any) -> List[CharacterDirectiveTree]: collection: Final[str] = 'characters_directive_tree' query = Query(collection, service_id=self._client.service_id, **kwargs) query.add_term(field=self.id_field, value=self.id) query.limit(results) payload = await self._client.request(query) data = extract_payload(payload, collection) return [CharacterDirectiveTree(**d) for d in data] async def events(self, **kwargs: Any) -> List[CensusData]: collection: Final[str] = 'characters_event' query = Query(collection, service_id=self._client.service_id, **kwargs) query.add_term(field=self.id_field, value=self.id) query.limit(1000) payload = await self._client.request(query) data = extract_payload(payload, collection=collection) return data async def events_grouped(self, **kwargs: Any) -> List[CensusData]: collection: Final[str] = 'characters_event_grouped' query = Query(collection, service_id=self._client.service_id, **kwargs) query.add_term(field=self.id_field, value=self.id) query.limit(100_000) payload = await self._client.request(query) data = extract_payload(payload, collection=collection) return data def faction(self) -> InstanceProxy[Faction]: query = Query(Faction.collection, service_id=self._client.service_id) query.add_term(field=Faction.id_field, value=self.data.faction_id) return InstanceProxy(Faction, query, client=self._client) async def friends(self) -> List['Character']: collection: Final[str] = 'characters_friend' query = Query(collection, service_id=self._client.service_id) query.add_term(field=Character.id_field, value=self.id) join = query.create_join(self.collection) join.set_list(True) payload = await self._client.request(query) data = extract_single(payload, collection) character_ids: List[str] = [ str(d['character_id']) for d in cast(List[CensusData], data['friend_list'])] characters = await Character.find( results=len(character_ids), client=self._client, character_id=','.join(character_ids)) return characters @classmethod @deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.get`') async def get_by_name(cls: Type[NamedT], name: str, *, locale: str = 'en', client: RequestClient) -> Optional[NamedT]: log.debug('%s "%s"[%s] requested', cls.__name__, name, locale) if (instance := cls._cache.get(f'_{name.lower()}')) is not None: log.debug('%r restored from cache', instance) return instance log.debug('%s "%s"[%s] not cached, generating API query...', cls.__name__, name, locale) query = Query(cls.collection, service_id=client.service_id, name__first_lower=name.lower()).limit(1) data = await client.request(query) try: payload = extract_single(data, cls.collection) except NotFoundError: return None return cls(payload, client=client) @classmethod async def get_online(cls, id_: int, *args: int, client: RequestClient ) -> List['Character']: char_ids = [id_] char_ids.extend(args) log.debug('Retrieving online status for %s characters', len(char_ids)) query = Query(cls.collection, service_id=client.service_id, character_id=','.join(str(c) for c in char_ids)) query.limit(len(char_ids)).resolve('online_status') data = await client.request(query) payload = extract_payload(data, cls.collection) return [cls(c, client=client) for c in payload if int(str(c['online_status']))] def items(self) -> SequenceProxy[Item]: collection: Final[str] = 'characters_item' query = Query(collection, service_id=self._client.service_id) query.add_term(field=self.id_field, value=self.id) query.limit(5000) join = query.create_join(Item.collection) join.set_fields(Item.id_field) return SequenceProxy(Item, query, client=self._client) async def is_online(self) -> bool: return bool(int(await self.online_status())) async def name_long(self, locale: str = 'en') -> str: if self.title_id != 0: title = await self.title() if title is not None: title_name = getattr(title.name, locale, None) if title_name is not None: return f'{title_name} {self.name.first}' return self.name.first async def online_status(self) -> int: collection: Final[str] = 'characters_online_status' query = Query(collection, service_id=self._client.service_id) query.add_term(field=self.id_field, value=self.id) payload = await self._client.request(query) data = extract_single(payload, collection) return int(str(data['online_status'])) def outfit(self) -> InstanceProxy[Outfit]: collection: Final[str] = 'outfit_member_extended' query = Query(collection, service_id=self._client.service_id) query.add_term(field=self.id_field, value=self.id) return InstanceProxy(Outfit, query, client=self._client) def outfit_member(self) -> InstanceProxy[OutfitMember]: query = Query( OutfitMember.collection, service_id=self._client.service_id) query.add_term(field=self.id_field, value=self.id) return InstanceProxy(OutfitMember, query, client=self._client) def profile(self) -> InstanceProxy[Profile]: query = Query(Profile.collection, service_id=self._client.service_id) query.add_term(field=Profile.id_field, value=self.data.profile_id) return InstanceProxy(Profile, query, client=self._client) async def skill(self, results: int = 1, **kwargs: Any) -> List[CensusData]: collection: Final[str] = 'characters_skill' query = Query(collection, service_id=self._client.service_id, **kwargs) query.add_term(field=self.id_field, value=self.id) query.limit(results) payload = await self._client.request(query) data = extract_payload(payload, collection) return data
MIT License
ucfopen/canvasapi
canvasapi/discussion_topic.py
DiscussionTopic.mark_as_unread
python
def mark_as_unread(self, **kwargs): response = self._requester.request( "DELETE", "{}s/{}/discussion_topics/{}/read".format( self._parent_type, self._parent_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) return response.status_code == 204
Mark the initial text of the discussion topic as unread. :calls: `DELETE /api/v1/courses/:course_id/discussion_topics/:topic_id/read \ <https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics_api.mark_topic_unread>`_ or `DELETE /api/v1/groups/:group_id/discussion_topics/:topic_id/read \ <https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics_api.mark_topic_unread>`_ :rtype: bool
https://github.com/ucfopen/canvasapi/blob/2ac9979d17979932a3f43eb8737b7648566c1c68/canvasapi/discussion_topic.py#L175-L194
from canvasapi.canvas_object import CanvasObject from canvasapi.paginated_list import PaginatedList from canvasapi.util import combine_kwargs, obj_or_id class DiscussionTopic(CanvasObject): def __str__(self): return "{} ({})".format(self.title, self.id) @property def _parent_id(self): if hasattr(self, "course_id"): return self.course_id elif hasattr(self, "group_id"): return self.group_id elif hasattr(self, "context_code"): if self.context_code.startswith("course_"): self.course_id = self.context_code.split("_")[1] return self.course_id elif self.context_code.startswith("group_"): self.group_id = self.context_code.split("_")[1] return self.group_id else: raise ValueError("Discussion Topic does not have a course_id or group_id") @property def _parent_type(self): if hasattr(self, "course_id"): return "course" elif hasattr(self, "group_id"): return "group" elif hasattr(self, "context_code"): if self.context_code.startswith("course"): return "course" elif self.context_code.startswith("group"): return "group" else: raise ValueError("Discussion Topic does not have a course_id or group_id") def delete(self, **kwargs): response = self._requester.request( "DELETE", "{}s/{}/discussion_topics/{}".format( self._parent_type, self._parent_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) return "deleted_at" in response.json() def get_entries(self, ids, **kwargs): entry_ids = [obj_or_id(item, "ids", (DiscussionEntry,)) for item in ids] kwargs.update(ids=entry_ids) return PaginatedList( DiscussionEntry, self._requester, "GET", "{}s/{}/discussion_topics/{}/entry_list".format( self._parent_type, self._parent_id, self.id ), { "discussion_id": self.id, "{}_id".format(self._parent_type): self._parent_id, }, _kwargs=combine_kwargs(**kwargs), ) def get_parent(self, **kwargs): from canvasapi.course import Course from canvasapi.group import Group response = self._requester.request( "GET", "{}s/{}".format(self._parent_type, self._parent_id), _kwargs=combine_kwargs(**kwargs), ) if self._parent_type == "group": return Group(self._requester, response.json()) elif self._parent_type == "course": return Course(self._requester, response.json()) def get_topic_entries(self, **kwargs): return PaginatedList( DiscussionEntry, self._requester, "GET", "{}s/{}/discussion_topics/{}/entries".format( self._parent_type, self._parent_id, self.id ), { "discussion_id": self.id, "{}_id".format(self._parent_type): self._parent_id, }, _kwargs=combine_kwargs(**kwargs), ) def mark_as_read(self, **kwargs): response = self._requester.request( "PUT", "{}s/{}/discussion_topics/{}/read".format( self._parent_type, self._parent_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) return response.status_code == 204
MIT License
vforwater/hydrobox
hydrobox/utils/decorators.py
accept
python
def accept(**types): def decorator(f): _f = f while hasattr(_f, '__wrapped__'): _f = getattr(_f, '__wrapped__') code = _f.__code__ fname = _f.__name__ names = code.co_varnames[:code.co_argcount] @wraps(f) def decorated(*args, **kwargs): for argname, argtype in types.items(): if argname in kwargs: argval = kwargs.get(argname) else: try: argval = args[names.index(argname)] except IndexError: continue if argtype == 'callable': if not callable(argval): raise TypeError('%s(...): arg %s: type is %s, but shall be callable.' % (fname, argname, type(argval))) else: continue elif argval is None: if not (argtype == 'None' or (isinstance(argtype, (list, tuple)) and 'None' in argtype)): raise TypeError('%s(...): arg %s: is None, must be %s.' %(fname, argname, argtype)) else: continue if isinstance(argtype, (tuple, list)) and 'None' in argtype: argtype = list(argtype) argtype.remove('None') argtype = tuple(argtype) if isinstance(argtype, (tuple, list)) and 'callable' in argtype: argtype = list(argtype) argtype.remove('callable') argtype = tuple(argtype) if not isinstance(argval, argtype) and not callable(argval): raise TypeError("{0}(...); arg {1}: is not callable or of type {2}".format(fname, argname, argtype)) else: continue if not isinstance(argval, argtype): raise TypeError("%s(...): arg %s: type is %s, must be %s." % (fname, argname, type(argval), argtype)) return f(*args, **kwargs) return decorated return decorator
Decorator used to define accepted argument types for the toolbox functions. Usage ----- .. code-block:: python @accept(foo=str, bar=(int,float)) def f(foo, bar): pass :param types: arguments to the decorated function and the allowed types :return: decorated function
https://github.com/vforwater/hydrobox/blob/ae7d10bf5aa48bf7daf3d1094e6bb66f0a7ce96b/hydrobox/utils/decorators.py#L8-L86
from functools import wraps
MIT License
gsong/python-epo-ops-client
tests/middlewares/throttle/conftest.py
generate_sample_throttle_snapshot_reprs
python
def generate_sample_throttle_snapshot_reprs(throttle_snapshot): sample_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "sample") makedirs(sample_path) fheader = os.path.join(sample_path, "throttle_snapshot.header") fdict = os.path.join(sample_path, "throttle_snapshot.dict") with open(fheader, "wb+", encoding="utf-8") as of: of.write(throttle_snapshot.as_header()) with open(fdict, "wb+", encoding="utf-8") as of: of.write(pformat(throttle_snapshot.as_dict()))
Generate sample header and dict representations
https://github.com/gsong/python-epo-ops-client/blob/a36fe38202f0aa5a3e81b067d9cbcdacfdcb0b78/tests/middlewares/throttle/conftest.py#L176-L185
import os from codecs import open from datetime import timedelta from pprint import pformat from random import choice, shuffle import pytest from requests.structures import CaseInsensitiveDict from epo_ops.middlewares.throttle.storages import SQLite from epo_ops.utils import makedirs, now from .helpers.conftest_helpers import ServiceSnapshot, ThrottleSnapshot def generate_timestamps(deltas): return [now() - timedelta(minutes=d) for d in deltas] def make_throttle_snapshot(system_status, services): snapshots = [ ServiceSnapshot(service, status, limit) for service, (status, limit) in services.items() ] return ThrottleSnapshot(system_status, snapshots) def make_header(control, retry=None): h = CaseInsensitiveDict({"X-Throttling-Control": control}) if retry: h["Retry-After"] = retry return h @pytest.fixture def cols(storage): return storage.service_columns() @pytest.fixture def expired_timestamps(): deltas = (2, 1.5) return generate_timestamps(deltas) @pytest.fixture def valid_timestamps(): deltas = (0.75, 0.5, 0) return generate_timestamps(deltas) @pytest.fixture def service_status(): class ServiceStatus(object): @property def green(self): return ("green", 200) @property def yellow(self): return ("yellow", 50) @property def red(self): return ("red", 5) @property def black(self): return ("black", 0) return ServiceStatus() @pytest.fixture def retry_after_value(): return 60000 @pytest.fixture def throttle_snapshot(service_status): return make_throttle_snapshot( "idle", { "images": service_status.green, "inpadoc": service_status.yellow, "other": service_status.red, "retrieval": service_status.black, "search": service_status.green, }, ) @pytest.fixture def header(throttle_snapshot, retry_after_value): return make_header(throttle_snapshot.as_header(), retry_after_value) @pytest.fixture def expired_throttle_history(storage, expired_timestamps): def _services_dict(limit): sd = {} for s in SQLite.SERVICES: sd[s] = ("green", limit) return sd limits = (1000, 2000) for limit in limits: snapshot = make_throttle_snapshot("idle", _services_dict(limit)) storage.update(make_header(snapshot.as_header())) sql = "UPDATE throttle_history SET timestamp=? WHERE images_limit=?" for param in zip(expired_timestamps, limits): storage.db.execute(sql, param) return storage @pytest.fixture def throttle_history(expired_throttle_history, retry_after_value): storage = expired_throttle_history system_stats = ("idle", "busy", "overloaded") lights = ("green", "yellow", "red") sample_count = 4 expected = {} service_limits = {} def _range(end, step=-10): start = end + -step * (sample_count - 1) return range(start, end - 1, step) def _services_dicts(limits): snapshots = [] for i in range(sample_count): sd = {} for k, v in limits.items(): sd[k] = (choice(lights), v[i]) snapshots.append(sd) return snapshots for service, limit in zip(SQLite.SERVICES, (200, 100, 60, 10, 5)): expected[service] = 60.0 / limit service_limits[service] = list(_range(limit)) shuffle(service_limits[service]) for d in _services_dicts(service_limits): storage.update( make_header(make_throttle_snapshot(choice(system_stats), d).as_header()) ) services = list(SQLite.SERVICES) services.remove("search") services = ", ".join(["{0}=green:1000".format(s) for s in services]) storage.update( make_header( "{0} (search=black:0, {1})".format(choice(system_stats), services), retry_after_value, ) ) expected["search"] = retry_after_value / 1000.0 th = {"expected": expected, "storage": storage} return th @pytest.fixture
Apache License 2.0
fuyukai/asyncqlio
asyncqlio/backends/base.py
BaseDialect.has_cascade
python
def has_cascade(self) -> bool: return False
Returns True if this dialect has DROP TABLE ... CASCADE.
https://github.com/fuyukai/asyncqlio/blob/9bdb49076dea14730ec39e6d033061d6bccc016c/asyncqlio/backends/base.py#L67-L71
import collections import typing from abc import abstractmethod from collections import OrderedDict from urllib.parse import ParseResult, parse_qs from asyncqlio.meta import AsyncABC class BaseDialect: @property def has_checkpoints(self) -> bool: return False @property def has_serial(self) -> bool: return False @property def has_returns(self) -> bool: return False @property def has_ilike(self) -> bool: return False @property def has_default(self) -> bool: return False @property def has_truncate(self) -> bool: return False @property
MIT License
pythonhacker/rotating-proxy-daemon
base.py
ProxyRotator.alive
python
def alive(self): return os.path.isfile(self.hbf)
Return whether I should be alive
https://github.com/pythonhacker/rotating-proxy-daemon/blob/46c8a9f4a72562c0606467d8623a97d3b08fcda1/base.py#L261-L264
import os import threading import signal import time import collections import email_report from config import * from utils import daemonize class ProxyRotator(object): def __init__(self, cfg='proxy.conf', test_mode=False, rotate=False, region=None): self.config = ProxyConfig(cfg=cfg) print 'Frequency set to',self.config.frequency,'seconds.' self.test_mode = test_mode self.alarm = threading.Event() self.alarm.clear() self.hbf = '.heartbeat' self.vps_command = None if rotate: print 'Rotating a node' self.rotate(region=region) signal.signal(signal.SIGTERM, self.sighandler) signal.signal(signal.SIGUSR1, self.sighandler) def pick_region(self): regions = self.config.get_active_regions() random.shuffle(self.config.region_ids) for reg in self.config.region_ids: if reg not in regions: return reg return random.choice(self.config.region_ids) def rotate(self, region=None): proxy_out_label = None if region == None: print 'Picking a region ...' region = self.pick_region() else: print 'Using supplied region',region,'...' new_proxy, proxy_id = self.make_new_instance(region) if self.config.policy == Policy.ROTATION_RANDOM: proxy_out = self.config.get_proxy_for_rotation(use_random=True, input_region=region) elif self.config.policy == Policy.ROTATION_NEW_REGION: proxy_out = self.config.get_proxy_for_rotation(region_switch=True, input_region=region) elif self.config.policy == Policy.ROTATION_LRU: proxy_out = self.config.get_proxy_for_rotation(least_used=True, input_region=region) elif self.config.policy == Policy.ROTATION_LRU_NEW_REGION: proxy_out = self.config.get_proxy_for_rotation(least_used=True, region_switch=True, input_region=region) self.config.switch_in_proxy(new_proxy, proxy_id, region) print 'Switched in new proxy',new_proxy self.config.write() print 'Wrote new configuration.' ret1 = self.config.write_lb_config() ret2 = self.config.reload_lb() if ret1 and ret2: if proxy_out != None: print 'Switched out proxy',proxy_out proxy_out_id = int(self.config.get_proxy_id(proxy_out)) if proxy_out_id != 0: proxy_out_label = self.get_instance_label(proxy_out_id) print 'Removing switched out instance',proxy_out_id self.delete_instance(proxy_out_id) else: ,proxy_out else: print 'Error - Did not switch out proxy as there was a problem in writing/restarting LB' if proxy_out_label != None: print 'Assigning label',proxy_out_label,'to new instance',proxy_id time.sleep(5) self.update_instance(proxy_id, proxy_out_label, self.config.group) print 'Post-processing',new_proxy,'...' self.post_process(new_proxy) self.send_email(proxy_out, proxy_out_label, new_proxy, region) def post_process(self, ip): time.sleep(5) cmd = post_process_cmd_template % (self.config.user, ip, iptables_restore_cmd) print 'SSH command 1=>',cmd os.system(cmd) cmd = post_process_cmd_template % (self.config.user, ip, squid_restart_cmd) print 'SSH command 2=>',cmd os.system(cmd) def provision(self, count=8, add=False): if not add: self.drop() num, idx = 0, 0 if add: start = len(self.config.get_active_proxies()) else: start = 0 for i in range(start, start + count): region = self.config.region_ids[idx % len(self.config.region_ids) ] try: ip, lid = self.make_new_instance(region) new_label = self.config.proxy_prefix + str(i+1) self.update_instance(int(lid), new_label, self.config.group) num += 1 except Exception, e: print 'Error creating instance',e idx += 1 print 'Provisioned',num,' proxies.' self.write_proxies() def write_proxies(self): proxies_list = self.vps_command.get_proxies() for i in range(5): random.shuffle(proxies_list) filename = self.config.proxylist print >> open(filename, 'w'), '\n'.join(proxies_list) print 'Saved current proxy configuration to {}'.format(filename) def test(self): proxy_out_label = '' region = self.pick_region() print 'Rotating proxy to new region',region,'...' new_proxy, proxy_id = self.make_new_instance(region, test=True) proxy_out = self.config.get_proxy_for_rotation(least_used=True, region_switch=True, input_region=region) if proxy_out != None: print 'Switched out proxy',proxy_out proxy_out_id = int(self.config.get_proxy_id(proxy_out)) proxy_out_label = self.get_instance_label(proxy_out_id) self.config.switch_in_proxy(new_proxy, proxy_id, region) print 'Switched in new proxy',new_proxy self.config.write_lb_config(test=True) self.send_email(proxy_out, proxy_out_label, new_proxy, region) def stop(self): try: os.remove(self.hbf) self.alarm.set() return True except (IOError, OSError), e: pass return False def sighandler(self, signum, stack): self.stop() def run(self): open(self.hbf,'w').write('') print 'Daemonizing...' daemonize('rotator.pid',logfile='rotator.log', drop=True) print 'Proxy rotate daemon started.' count = 1 while True: self.alarm.wait(self.config.frequency) status = self.alive() if not status: print 'Daemon signalled to exit. Quitting ...' break print 'Rotating proxy node, round #%d ...' % count if self.test_mode: self.test() else: self.rotate() count += 1 sys.exit(0) def create(self, region=3): print 'Creating new instance in region',region,'...' new_proxy = self.make_new_instance(region, verbose=True) return new_proxy def send_email(self, proxy_out, label, proxy_in, region): print 'Sending email...' region = region_dict[region] content = email_template % locals() email_config = self.config.get_email_config() email_report.email_report(email_config, "%s", content)
MIT License
nsls-ii/pyxrf
pyxrf/gui_support/gpc_class.py
GlobalProcessingClasses.get_metadata_scan_id
python
def get_metadata_scan_id(self): if self.is_scan_metadata_available(): scan_id = self.io_model.scan_metadata["scan_id"] else: scan_id = 0 return scan_id
Reads Run ID from metadata. Check if metadata exists using `is_scan_metadata_available()` before calling this function.
https://github.com/nsls-ii/pyxrf/blob/0aa4e175f541edfaa8f71daf54b54a07e4ab2b04/pyxrf/gui_support/gpc_class.py#L327-L336
from __future__ import absolute_import import os import copy import math from ..model.fileio import FileIOModel from ..model.lineplot import LinePlotModel from ..model.parameters import ParamModel, save_as, fit_strategy_list, bound_options from ..model.draw_image import DrawImageAdvanced from ..model.draw_image_rgb import DrawImageRGB from ..model.fit_spectrum import Fit1D, get_cs from ..model.roi_model import ROIModel from ..model.param_data import param_data from ..core.xrf_utils import get_eline_energy import logging logger = logging.getLogger(__name__) class GlobalProcessingClasses: def __init__(self): self.defaults = None self.io_model = None self.param_model = None self.plot_model = None self.fit_model = None self.roi_model = None self.img_model_adv = None self.img_model_rgb = None def _get_defaults(self): working_directory = os.getcwd() logger.info(f"Starting PyXRF in the current working directory '{working_directory}'") default_parameters = param_data return working_directory, default_parameters def initialize(self): working_directory, default_parameters = self._get_defaults() self.io_model = FileIOModel(working_directory=working_directory) self.param_model = ParamModel(default_parameters=default_parameters, io_model=self.io_model) self.plot_model = LinePlotModel(param_model=self.param_model, io_model=self.io_model) self.fit_model = Fit1D( param_model=self.param_model, io_model=self.io_model, working_directory=working_directory ) self.roi_model = ROIModel(param_model=self.param_model, io_model=self.io_model) self.img_model_adv = DrawImageAdvanced(io_model=self.io_model) self.img_model_rgb = DrawImageRGB(io_model=self.io_model, img_model_adv=self.img_model_adv) self.plot_model.roi_dict = self.roi_model.roi_dict self.io_model.observe("working_directory", self.fit_model.result_folder_changed) self.io_model.observe("working_directory", self.roi_model.result_folder_changed) self.io_model.observe("selected_file_name", self.fit_model.data_title_update) self.io_model.observe("selected_file_name", self.plot_model.exp_label_update) self.io_model.observe("selected_file_name", self.roi_model.data_title_update) self.io_model.observe("file_name", self.fit_model.filename_update) self.io_model.observe("file_name", self.plot_model.plot_exp_data_update) self.io_model.observe("file_name", self.roi_model.filename_update) self.io_model.observe("runid", self.fit_model.runid_update) self.io_model.observe("data", self.plot_model.exp_data_update) self.io_model.observe("img_dict_is_updated", self.fit_model.img_dict_updated) self.io_model.observe("img_dict_is_updated", self.img_model_adv.img_dict_updated) self.io_model.observe("img_dict_is_updated", self.img_model_rgb.img_dict_updated) self.io_model.observe("incident_energy_set", self.plot_model.set_incident_energy) self.io_model.observe("incident_energy_set", self.img_model_adv.set_incident_energy) self.img_model_adv.observe("scaler_name_index", self.fit_model.scaler_index_update) self.img_model_adv.observe("dict_to_plot", self.fit_model.dict_to_plot_update) self.img_model_adv.observe("img_title", self.fit_model.img_title_update) self.param_model.observe("energy_bound_high_buf", self.plot_model.energy_bound_high_update) self.param_model.observe("energy_bound_low_buf", self.plot_model.energy_bound_low_update) logger.info("pyxrf started.") def add_parameters_changed_cb(self, cb): self.param_model.add_parameters_changed_cb(cb) def remove_parameters_changed_cb(self, cb): self.param_model.remove_parameters_changed_cb(cb) def fitting_parameters_changed(self): self.param_model.parameters_changed() def get_window_title(self): return self.io_model.window_title def is_databroker_available(self): return self.io_model.is_databroker_available() def open_data_file(self, file_path): self.io_model.data_ready = False self.io_model.file_name = "temp" f_dir, f_name = os.path.split(file_path) self.io_model.working_directory = f_dir def _update_data(): self.fit_model.fit_img = {} self.plot_model.update_preview_spectrum_plot() self.plot_model.update_total_count_map_preview(new_plot=True) try: self.io_model.file_name = f_name except Exception: _update_data() self.fitting_parameters_changed() self.plot_model.plot_exp_opt = False self.plot_model.show_fit_opt = False logger.info(f"Failed to load the file '{f_name}'.") self.io_model.window_title_clear() raise else: _update_data() self.fitting_parameters_changed() try: self.plot_model.plot_exp_opt = False except Exception as ex: logger.error(f"Exception was raised while removing experimental data plot: {str(ex)}") try: self.plot_model.plot_exp_opt = True except Exception as ex: logger.error(f"Exception was raised while plotting experimental data: {str(ex)}") try: self.plot_model.show_fit_opt = False except Exception as ex: logger.error(f"Exception was raised while removing fitted data plot: {str(ex)}") try: self.plot_model.show_fit_opt = True except Exception as ex: logger.error(f"Exception was raised while plotting fitted data: {str(ex)}") self.io_model.window_title_set_file_name(f_name) print("======== Set window title") if not self.io_model.incident_energy_available: msg = ( "Incident energy is not available in scan metadata and must be set manually. " "Incident energy may be set by changing 'Incident energy, keV' parameter " "in the dialog boxes opened using 'Find Automatically...' ('Find Elements " "in sample' or 'General...' ('General Settings for Fitting Alogirthm') " "buttons in 'Model' tab." ) else: msg = "" logger.info(f"Loading of the file '{file_path}' is completed.") return msg def load_run_from_db(self, run_id_uid): self.io_model.data_ready = False def _update_data(): self.fit_model.fit_img = {} self.plot_model.update_preview_spectrum_plot() self.plot_model.update_total_count_map_preview(new_plot=True) try: self.io_model.load_data_runid(run_id_uid) except Exception: _update_data() self.fitting_parameters_changed() self.plot_model.plot_exp_opt = False self.plot_model.show_fit_opt = False logger.info(f"Failed to load the run #{run_id_uid}.") self.io_model.window_title_clear() raise else: _update_data() self.fitting_parameters_changed() self.plot_model.plot_exp_opt = False self.plot_model.plot_exp_opt = True self.plot_model.show_fit_opt = False self.plot_model.show_fit_opt = True self.io_model.window_title_set_run_id(self.io_model.runid) file_name = self.io_model.file_name msg = "" logger.info("Loading of the run is completed") return msg, file_name def select_preview_dataset(self, *, dset_name, is_visible): self.io_model.data_sets[dset_name].selected_for_preview = True if is_visible else False self.io_model.update_data_set_buffers() self.plot_model.update_preview_spectrum_plot() self.plot_model.update_total_count_map_preview() def get_current_working_directory(self): return self.io_model.working_directory def set_current_working_directory(self, working_directory): self.io_model.working_directory = working_directory def get_load_each_channel(self): return self.io_model.load_each_channel def set_load_each_channel(self, load_each_channel): self.io_model.load_each_channel = load_each_channel def get_file_channel_list(self): return self.io_model.file_channel_list def is_dset_item_selected_for_preview(self, item): return bool(self.io_model.data_sets[item].selected_for_preview) def get_loaded_file_name(self): return self.io_model.file_name def is_scan_metadata_available(self): return bool(self.io_model.scan_metadata_available) def get_formatted_metadata(self): return self.io_model.scan_metadata.get_formatted_output()
BSD 3-Clause New or Revised License
brython-dev/brython
www/src/Lib/csv.py
Sniffer._guess_delimiter
python
def _guess_delimiter(self, data, delimiters): data = list(filter(None, data.split('\n'))) ascii = [chr(c) for c in range(127)] chunkLength = min(10, len(data)) iteration = 0 charFrequency = {} modes = {} delims = {} start, end = 0, chunkLength while start < len(data): iteration += 1 for line in data[start:end]: for char in ascii: metaFrequency = charFrequency.get(char, {}) freq = line.count(char) metaFrequency[freq] = metaFrequency.get(freq, 0) + 1 charFrequency[char] = metaFrequency for char in charFrequency.keys(): items = list(charFrequency[char].items()) if len(items) == 1 and items[0][0] == 0: continue if len(items) > 1: modes[char] = max(items, key=lambda x: x[1]) items.remove(modes[char]) modes[char] = (modes[char][0], modes[char][1] - sum(item[1] for item in items)) else: modes[char] = items[0] modeList = modes.items() total = float(min(chunkLength * iteration, len(data))) consistency = 1.0 threshold = 0.9 while len(delims) == 0 and consistency >= threshold: for k, v in modeList: if v[0] > 0 and v[1] > 0: if ((v[1]/total) >= consistency and (delimiters is None or k in delimiters)): delims[k] = v consistency -= 0.01 if len(delims) == 1: delim = list(delims.keys())[0] skipinitialspace = (data[0].count(delim) == data[0].count("%c " % delim)) return (delim, skipinitialspace) start = end end += chunkLength if not delims: return ('', 0) if len(delims) > 1: for d in self.preferred: if d in delims.keys(): skipinitialspace = (data[0].count(d) == data[0].count("%c " % d)) return (d, skipinitialspace) items = [(v,k) for (k,v) in delims.items()] items.sort() delim = items[-1][1] skipinitialspace = (data[0].count(delim) == data[0].count("%c " % delim)) return (delim, skipinitialspace)
The delimiter /should/ occur the same number of times on each row. However, due to malformed data, it may not. We don't want an all or nothing approach, so we allow for small variations in this number. 1) build a table of the frequency of each character on every line. 2) build a table of frequencies of this frequency (meta-frequency?), e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows, 7 times in 2 rows' 3) use the mode of the meta-frequency to determine the /expected/ frequency for that character 4) find out how often the character actually meets that goal 5) the character that best meets its goal is the delimiter For performance reasons, the data is evaluated in chunks, so it can try and evaluate the smallest portion of the data possible, evaluating additional chunks as necessary.
https://github.com/brython-dev/brython/blob/33aeaab551f1b73209326c5a0aecf98642d4c126/www/src/Lib/csv.py#L280-L380
import re from _csv import Error, __version__, writer, reader, register_dialect, unregister_dialect, get_dialect, list_dialects, field_size_limit, QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, __doc__ from _csv import Dialect as _Dialect from io import StringIO __all__ = ["QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE", "Error", "Dialect", "__doc__", "excel", "excel_tab", "field_size_limit", "reader", "writer", "register_dialect", "get_dialect", "list_dialects", "Sniffer", "unregister_dialect", "__version__", "DictReader", "DictWriter", "unix_dialect"] class Dialect: _name = "" _valid = False delimiter = None quotechar = None escapechar = None doublequote = None skipinitialspace = None lineterminator = None quoting = None def __init__(self): if self.__class__ != Dialect: self._valid = True self._validate() def _validate(self): try: _Dialect(self) except TypeError as e: raise Error(str(e)) class excel(Dialect): delimiter = ',' quotechar = '"' doublequote = True skipinitialspace = False lineterminator = '\r\n' quoting = QUOTE_MINIMAL register_dialect("excel", excel) class excel_tab(excel): delimiter = '\t' register_dialect("excel-tab", excel_tab) class unix_dialect(Dialect): delimiter = ',' quotechar = '"' doublequote = True skipinitialspace = False lineterminator = '\n' quoting = QUOTE_ALL register_dialect("unix", unix_dialect) class DictReader: def __init__(self, f, fieldnames=None, restkey=None, restval=None, dialect="excel", *args, **kwds): self._fieldnames = fieldnames self.restkey = restkey self.restval = restval self.reader = reader(f, dialect, *args, **kwds) self.dialect = dialect self.line_num = 0 def __iter__(self): return self @property def fieldnames(self): if self._fieldnames is None: try: self._fieldnames = next(self.reader) except StopIteration: pass self.line_num = self.reader.line_num return self._fieldnames @fieldnames.setter def fieldnames(self, value): self._fieldnames = value def __next__(self): if self.line_num == 0: self.fieldnames row = next(self.reader) self.line_num = self.reader.line_num while row == []: row = next(self.reader) d = dict(zip(self.fieldnames, row)) lf = len(self.fieldnames) lr = len(row) if lf < lr: d[self.restkey] = row[lf:] elif lf > lr: for key in self.fieldnames[lr:]: d[key] = self.restval return d class DictWriter: def __init__(self, f, fieldnames, restval="", extrasaction="raise", dialect="excel", *args, **kwds): self.fieldnames = fieldnames self.restval = restval if extrasaction.lower() not in ("raise", "ignore"): raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'" % extrasaction) self.extrasaction = extrasaction self.writer = writer(f, dialect, *args, **kwds) def writeheader(self): header = dict(zip(self.fieldnames, self.fieldnames)) return self.writerow(header) def _dict_to_list(self, rowdict): if self.extrasaction == "raise": wrong_fields = rowdict.keys() - self.fieldnames if wrong_fields: raise ValueError("dict contains fields not in fieldnames: " + ", ".join([repr(x) for x in wrong_fields])) return (rowdict.get(key, self.restval) for key in self.fieldnames) def writerow(self, rowdict): return self.writer.writerow(self._dict_to_list(rowdict)) def writerows(self, rowdicts): return self.writer.writerows(map(self._dict_to_list, rowdicts)) try: complex except NameError: complex = float class Sniffer: def __init__(self): self.preferred = [',', '\t', ';', ' ', ':'] def sniff(self, sample, delimiters=None): quotechar, doublequote, delimiter, skipinitialspace = self._guess_quote_and_delimiter(sample, delimiters) if not delimiter: delimiter, skipinitialspace = self._guess_delimiter(sample, delimiters) if not delimiter: raise Error("Could not determine delimiter") class dialect(Dialect): _name = "sniffed" lineterminator = '\r\n' quoting = QUOTE_MINIMAL dialect.doublequote = doublequote dialect.delimiter = delimiter dialect.quotechar = quotechar or '"' dialect.skipinitialspace = skipinitialspace return dialect def _guess_quote_and_delimiter(self, data, delimiters): matches = [] for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): regexp = re.compile(restr, re.DOTALL | re.MULTILINE) matches = regexp.findall(data) if matches: break if not matches: return ('', False, None, 0) quotes = {} delims = {} spaces = 0 groupindex = regexp.groupindex for m in matches: n = groupindex['quote'] - 1 key = m[n] if key: quotes[key] = quotes.get(key, 0) + 1 try: n = groupindex['delim'] - 1 key = m[n] except KeyError: continue if key and (delimiters is None or key in delimiters): delims[key] = delims.get(key, 0) + 1 try: n = groupindex['space'] - 1 except KeyError: continue if m[n]: spaces += 1 quotechar = max(quotes, key=quotes.get) if delims: delim = max(delims, key=delims.get) skipinitialspace = delims[delim] == spaces if delim == '\n': delim = '' else: delim = '' skipinitialspace = 0 dq_regexp = re.compile( r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % {'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE) if dq_regexp.search(data): doublequote = True else: doublequote = False return (quotechar, doublequote, delim, skipinitialspace)
BSD 3-Clause New or Revised License
tenable/pytenable
tenable/io/session.py
SessionAPI.change_password
python
def change_password(self, old_password, new_password): self._api.put('session/chpasswd', json={ 'password': self._check('new_password', new_password, str), 'current_password': self._check('old_password', old_password, str) })
Change the password of the current user. :devportal:`session: password <session-password>` Args: old_password (str): The current password. new_password (str): The new password. Returns: :obj:`None`: The password has been successfully changed. Examples: >>> tio.session.change_password('old_pass', 'new_pass')
https://github.com/tenable/pytenable/blob/32b925f0cebd4d3032f85e65571dd9593778b9f1/tenable/io/session.py#L68-L88
from .base import TIOEndpoint from tenable.errors import ImpersonationError, UnknownError class SessionAPI(TIOEndpoint): def edit(self, name, email): return self._api.put('session', json={ 'name': self._check('name', name, str), 'email': self._check('email', email, str) }).json() def details(self): return self._api.get('session').json()
MIT License
deepsphere/deepsphere-weather
modules/utils_config.py
write_config_file
python
def write_config_file(cfg, fpath): with open(fpath, "w") as output_file: json.dump(cfg, output_file, indent=4)
Write a json config file from the python dictionary config file.
https://github.com/deepsphere/deepsphere-weather/blob/a9c75de9c9852a2832883cd998efd16d6542b083/modules/utils_config.py#L131-L134
import os import sys import json import torch import pickle import shutil import inspect import types import numpy as np import deepdiff from modules.utils_torch import set_pytorch_deterministic from modules.utils_torch import set_pytorch_numeric_precision def get_default_model_settings(): model_settings = {"pretrained_model_name": None, "model_name_prefix": None, "model_name": None, "model_name_suffix": None, "kernel_size_conv": 3, "bias": True, "batch_norm": False, "batch_norm_before_activation": False, "activation": True, "activation_fun": 'relu', "pool_method": "Max", "kernel_size_pooling": 4, "conv_type": 'graph', "graph_type": "knn", "knn": 20, "periodic_padding": 'True', } return model_settings def get_default_training_settings(): training_settings = {"epochs": 15, "ar_training_strategy": "RNN", "learning_rate": 0.001, "training_batch_size": 16, "validation_batch_size": 16, "scoring_interval": 20, "save_model_each_epoch": False, "numeric_precision": "float32", "deterministic_training": False, "seed_model_weights": 100, "seed_random_shuffling": 120, "benchmark_cudnn": True, "gpu_training": True, "gpu_devices_ids": [0], "dataparallel_training": False, } return training_settings def get_default_ar_settings(): ar_settings = {"input_k": [-3,-2,-1], "output_k": [0], "forecast_cycle": 1, "ar_iterations": 6, "stack_most_recent_prediction": True, } return ar_settings def get_default_dataloader_settings(): dataloader_settings = {"random_shuffling": True, "drop_last_batch": True, "prefetch_in_gpu": False, "prefetch_factor": 2, "pin_memory": False, "asyncronous_gpu_transfer": True, "num_workers": 8, "autotune_num_workers": False, } return dataloader_settings def get_default_SWAG_settings(): dataloader_settings = {"SWAG": False, "target_learning_rate": 0.007, "no_cov_mat": False, "max_num_models": 40, "swag_freq": 10, "swa_start": 0, "sampling_scale": 0.1, "nb_samples": 10 } return dataloader_settings def get_default_settings(): ar_settings = get_default_ar_settings() training_settings = get_default_training_settings() model_settings = get_default_model_settings() dataloader_settings = get_default_dataloader_settings() default_settings = {"model_settings": model_settings, "dataloader_settings": dataloader_settings, "training_settings": training_settings, "ar_settings": ar_settings, } return default_settings def read_config_file(fpath): with open(fpath) as input_file: cfg = json.load(input_file) return cfg
MIT License
cineuse/cncgtoolkit
apps/pw_multiScriptEditor/managers/nuke/callbacks.py
addAfterBackgroundFrameRender
python
def addAfterBackgroundFrameRender(call, args=(), kwargs={}):
Add code to execute after each frame of a background render. The call must be in the form of: def foo(context): pass The context object that will be passed in is a dictionary containing the following elements: id => The identifier for the task that's making progress frame => the current frame number being rendered numFrames => the total number of frames that is being rendered frameProgress => the number of frames rendered so far. Please be aware that the current Nuke context will not make sense in the callback (e.g. nuke.thisNode will return a random node).
https://github.com/cineuse/cncgtoolkit/blob/7a21f358e34aa276cf209a6d5887a7964190cf0a/apps/pw_multiScriptEditor/managers/nuke/callbacks.py#L170-L183
onUserCreates={} onCreates={} onScriptLoads={} onScriptSaves={} onScriptCloses={} onDestroys={} knobChangeds={} updateUIs={} autolabels={} beforeRenders={} beforeFrameRenders={} afterFrameRenders={} afterRenders={} renderProgresses={} beforeBackgroundRenders=[] afterBackgroundFrameRenders=[] afterBackgroundRenders=[] filenameFilters={} validateFilenames={} autoSaveFilters={} autoSaveRestoreFilters={} autoSaveDeleteFilters={} def _addCallback(dict, call, args, kwargs, nodeClass, node=None): pass def _removeCallback(dict, call, args, kwargs, nodeClass, node=None): pass def _doCallbacks(dict, node=None): pass def addOnUserCreate(call, args=(), kwargs={}, nodeClass='*'): pass def removeOnUserCreate(call, args=(), kwargs={}, nodeClass='*'): pass def onUserCreate(): pass def addOnCreate(call, args=(), kwargs={}, nodeClass='*'): pass def removeOnCreate(call, args=(), kwargs={}, nodeClass='*'): pass def onCreate(): pass def addOnScriptLoad(call, args=(), kwargs={}, nodeClass='Root'): pass def removeOnScriptLoad(call, args=(), kwargs={}, nodeClass='Root'): pass def onScriptLoad(): pass def addOnScriptSave(call, args=(), kwargs={}, nodeClass='Root'): pass def removeOnScriptSave(call, args=(), kwargs={}, nodeClass='Root'): pass def onScriptSave(): pass def addOnScriptClose(call, args=(), kwargs={}, nodeClass='Root'): pass def removeOnScriptClose(call, args=(), kwargs={}, nodeClass='Root'): pass def onScriptClose(): pass def addOnDestroy(call, args=(), kwargs={}, nodeClass='*'): pass def removeOnDestroy(call, args=(), kwargs={}, nodeClass='*'): pass def onDestroy(): pass def addKnobChanged(call, args=(), kwargs={}, nodeClass='*', node=None): pass def removeKnobChanged(call, args=(), kwargs={}, nodeClass='*', node=None): pass def knobChanged(): pass def addUpdateUI(call, args=(), kwargs={}, nodeClass='*'): def removeUpdateUI(call, args=(), kwargs={}, nodeClass='*'): def updateUI(): pass def addAutolabel(call, args=(), kwargs={}, nodeClass='*'): def removeAutolabel(call, args=(), kwargs={}, nodeClass='*'): def autolabel(): pass def addBeforeRender(call, args=(), kwargs={}, nodeClass='Write'): def removeBeforeRender(call, args=(), kwargs={}, nodeClass='Write'): def beforeRender(): pass def addBeforeFrameRender(call, args=(), kwargs={}, nodeClass='Write'): def removeBeforeFrameRender(call, args=(), kwargs={}, nodeClass='Write'): def beforeFrameRender(): pass def addAfterFrameRender(call, args=(), kwargs={}, nodeClass='Write'): def removeAfterFrameRender(call, args=(), kwargs={}, nodeClass='Write'): def afterFrameRender(): pass def addAfterRender(call, args=(), kwargs={}, nodeClass='Write'): def removeAfterRender(call, args=(), kwargs={}, nodeClass='Write'): def afterRender(): pass def addRenderProgress(call, args=(), kwargs={}, nodeClass='Write'): def removeRenderProgress(call, args=(), kwargs={}, nodeClass='Write'): def renderProgress(): pass def _addBackgroundCallback(list, call, args, kwargs): pass def _removeBackgroundCallback(list, call, args, kwargs): pass def _doBackgroundCallbacks(list, context): pass def addBeforeBackgroundRender(call, args=(), kwargs={}): def removeBeforeBackgroundRender(call, args=(), kwargs={}): def beforeBackgroundRender(context): pass
MIT License
osmr/imgclsmob
pytorch/datasets/librispeech_asr_dataset.py
LibriSpeechMetaInfo.add_dataset_parser_arguments
python
def add_dataset_parser_arguments(self, parser, work_dir_path): super(LibriSpeechMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path) parser.add_argument( "--subset", type=str, default="dev-clean", help="data subset")
Create python script parameters (for dataset specific metainfo). Parameters: ---------- parser : ArgumentParser ArgumentParser instance. work_dir_path : str Path to working directory.
https://github.com/osmr/imgclsmob/blob/ea5f784eea865ce830f3f97c5c1d1f6491d9cbb2/pytorch/datasets/librispeech_asr_dataset.py#L91-L109
__all__ = ['LibriSpeech', 'LibriSpeechMetaInfo'] import os import numpy as np from .dataset_metainfo import DatasetMetaInfo from .asr_dataset import AsrDataset, asr_test_transform class LibriSpeech(AsrDataset): def __init__(self, root=os.path.join("~", ".torch", "datasets", "LibriSpeech"), mode="test", subset="dev-clean", transform=None): super(LibriSpeech, self).__init__( root=root, mode=mode, transform=transform) self.vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] vocabulary_dict = {c: i for i, c in enumerate(self.vocabulary)} import soundfile root_dir_path = os.path.expanduser(root) assert os.path.exists(root_dir_path) data_dir_path = os.path.join(root_dir_path, subset) assert os.path.exists(data_dir_path) for speaker_id in os.listdir(data_dir_path): speaker_dir_path = os.path.join(data_dir_path, speaker_id) for chapter_id in os.listdir(speaker_dir_path): chapter_dir_path = os.path.join(speaker_dir_path, chapter_id) transcript_file_path = os.path.join(chapter_dir_path, "{}-{}.trans.txt".format(speaker_id, chapter_id)) with open(transcript_file_path, "r") as f: transcripts = dict(x.split(" ", maxsplit=1) for x in f.readlines()) for flac_file_name in os.listdir(chapter_dir_path): if flac_file_name.endswith(".flac"): wav_file_name = flac_file_name.replace(".flac", ".wav") wav_file_path = os.path.join(chapter_dir_path, wav_file_name) if not os.path.exists(wav_file_path): flac_file_path = os.path.join(chapter_dir_path, flac_file_name) pcm, sample_rate = soundfile.read(flac_file_path) soundfile.write(wav_file_path, pcm, sample_rate) text = transcripts[wav_file_name.replace(".wav", "")] text = text.strip("\n ").lower() text = np.array([vocabulary_dict[c] for c in text], dtype=np.long) self.data.append((wav_file_path, text)) class LibriSpeechMetaInfo(DatasetMetaInfo): def __init__(self): super(LibriSpeechMetaInfo, self).__init__() self.label = "LibriSpeech" self.short_label = "ls" self.root_dir_name = "LibriSpeech" self.dataset_class = LibriSpeech self.dataset_class_extra_kwargs = {"subset": "dev-clean"} self.ml_type = "asr" self.num_classes = 29 self.val_metric_extra_kwargs = [{"vocabulary": None}] self.val_metric_capts = ["Val.WER"] self.val_metric_names = ["WER"] self.test_metric_extra_kwargs = [{"vocabulary": None}] self.test_metric_capts = ["Test.WER"] self.test_metric_names = ["WER"] self.val_transform = asr_test_transform self.test_transform = asr_test_transform self.test_net_extra_kwargs = {"from_audio": True} self.saver_acc_ind = 0
MIT License
openhumans/open-humans
data_import/utils.py
get_upload_dir
python
def get_upload_dir(instance): return "member-files/{0}/{1}/".format(get_source(instance), str(uuid1()))
Construct a unique S3 key for a source.
https://github.com/openhumans/open-humans/blob/5209d0096d9811e679890f7bf4e2440c2a026b0d/data_import/utils.py#L24-L28
from uuid import uuid1 def get_upload_path(instance, filename): return "{0}{1}".format(get_upload_dir(instance=instance), filename) def get_source(instance): if hasattr(instance, "source"): return instance.source if hasattr(instance, "_meta"): return instance._meta.app_label return instance
MIT License
googlecloudplatform/gsutil
gslib/utils/stet_util.py
_get_stet_binary_from_path
python
def _get_stet_binary_from_path(): for path_directory in os.getenv('PATH').split(os.path.pathsep): binary_path = os.path.join(path_directory, 'stet') if os.path.exists(binary_path): return binary_path
Retrieves STET binary from path if available. Python 2 compatible.
https://github.com/googlecloudplatform/gsutil/blob/b1361dd5e9c2a246b328e871603f3a2b0d5fd5fa/gslib/utils/stet_util.py#L38-L43
from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals import os import shutil from gslib import storage_url from gslib.utils import execution_util from gslib.utils import temporary_file_util from boto import config class StetSubcommandName(object): ENCRYPT = 'encrypt' DECRYPT = 'decrypt'
Apache License 2.0
yougov/mongo-connector
mongo_connector/namespace_config.py
NamespaceConfig.unmap_namespace
python
def unmap_namespace(self, plain_target_ns): if not self._regex_map and not self._plain: return plain_target_ns src_name_set = self._reverse_plain.get(plain_target_ns) if src_name_set: for src_name in src_name_set: return src_name for _, namespace in self._regex_map: original_name = match_replace_regex( namespace_to_regex(namespace.dest_name), plain_target_ns, namespace.source_name, ) if original_name: return original_name return None
Given a plain target namespace, return the corresponding source namespace.
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L261-L283
import logging import re from collections import namedtuple, MutableSet from itertools import combinations from mongo_connector import errors LOG = logging.getLogger(__name__) _Namespace = namedtuple( "Namespace", ["dest_name", "source_name", "gridfs", "include_fields", "exclude_fields"], ) class Namespace(_Namespace): def __new__( cls, dest_name=None, source_name=None, gridfs=False, include_fields=None, exclude_fields=None, ): include_fields = set(include_fields or []) exclude_fields = set(exclude_fields or []) return super(Namespace, cls).__new__( cls, dest_name, source_name, gridfs, include_fields, exclude_fields ) def with_options(self, **kwargs): new_options = dict( dest_name=self.dest_name, source_name=self.source_name, gridfs=self.gridfs, include_fields=self.include_fields, exclude_fields=self.exclude_fields, ) new_options.update(kwargs) return Namespace(**new_options) class RegexSet(MutableSet): def __init__(self, regexes, strings): self._regexes = set(regexes) self._plain = set(strings) self._not_found_cache = set() def __contains__(self, item): if item in self._not_found_cache: return False if item in self._plain: return True if item in self._regexes: return True for regex in self._regexes: if regex.match(item): self._plain.add(item) return True self._not_found_cache.add(item) return False def __iter__(self): for regex in self._regexes: yield regex for string in self._plain: yield string def __len__(self): return len(self._regexes) + len(self._plain) def add(self, string): self._plain.add(string) self._not_found_cache.discard(string) def discard(self, string): self._plain.discard(string) @staticmethod def from_namespaces(namespaces): regexes = set() strings = set() for ns in namespaces: if "*" in ns: regexes.add(namespace_to_regex(ns)) else: strings.add(ns) return RegexSet(regexes, strings) class NamespaceConfig(object): def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): self._plain = {} self._reverse_plain = {} self._plain_db = {} self._regex_map = [] self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): self._add_namespace(namespace) cmd_name = namespace.source_name.split(".", 1)[0] + ".$cmd" dest_cmd_name = namespace.dest_name.split(".", 1)[0] + ".$cmd" self._add_namespace(Namespace(dest_name=dest_cmd_name, source_name=cmd_name)) def _add_namespace(self, namespace): src_name = namespace.source_name if "*" in src_name: self._regex_map.append((namespace_to_regex(src_name), namespace)) else: self._add_plain_namespace(namespace) def _add_plain_namespace(self, namespace): src_name = namespace.source_name target_name = namespace.dest_name src_names = self._reverse_plain.setdefault(target_name, set()) src_names.add(src_name) if len(src_names) > 1: existing_src = (src_names - set([src_name])).pop() raise errors.InvalidConfiguration( "Multiple namespaces cannot be combined into one target " "namespace. Trying to map '%s' to '%s' but there already " "exists a mapping from '%s' to '%s'" % (src_name, target_name, existing_src, target_name) ) self._plain[src_name] = namespace src_db, _ = src_name.split(".", 1) target_db, _ = target_name.split(".", 1) self._plain_db.setdefault(src_db, set()).add(target_db) def lookup(self, plain_src_ns): if plain_src_ns in self._ex_namespace_set: return None if not self._regex_map and not self._plain: return Namespace( dest_name=plain_src_ns, source_name=plain_src_ns, include_fields=self._include_fields, exclude_fields=self._exclude_fields, ) try: return self._plain[plain_src_ns] except KeyError: for regex, namespace in self._regex_map: new_name = match_replace_regex(regex, plain_src_ns, namespace.dest_name) if not new_name: continue new_namespace = namespace.with_options( dest_name=new_name, source_name=plain_src_ns ) self._add_plain_namespace(new_namespace) return new_namespace self._ex_namespace_set.add(plain_src_ns) return None def map_namespace(self, plain_src_ns): namespace = self.lookup(plain_src_ns) if namespace: return namespace.dest_name return None def gridfs_namespace(self, plain_src_ns): namespace = self.lookup(plain_src_ns) if namespace and namespace.gridfs: return namespace.dest_name return None
Apache License 2.0
kieranjol/ifiscripts
copyit.py
check_overwrite
python
def check_overwrite(file2check): if os.path.isfile(file2check): print('A manifest already exists at your destination. Overwrite? Y/N?') overwrite_destination_manifest = '' while overwrite_destination_manifest not in ('Y', 'y', 'N', 'n'): overwrite_destination_manifest = input() if overwrite_destination_manifest not in ('Y', 'y', 'N', 'n'): print('Incorrect input. Please enter Y or N') return overwrite_destination_manifest
Asks user if they want to overwrite pre-existing manifests.
https://github.com/kieranjol/ifiscripts/blob/4a94789d6884774d3a0cee5e6a5032e59b401727/copyit.py#L315-L326
import sys import subprocess import os import filecmp import tempfile import time import argparse import hashlib import shutil import unicodedata from builtins import input import ififuncs from ififuncs import make_desktop_logs_dir, make_desktop_manifest_dir, generate_log def hashlib_md5(filename): read_size = 0 last_percent_done = 0 md5_object = hashlib.md5() total_size = os.path.getsize(filename) with open(str(filename), 'rb') as file_object: while True: buf = file_object.read(2**20) if not buf: break read_size += len(buf) md5_object.update(buf) percent_done = 100 * read_size / total_size if percent_done > last_percent_done: sys.stdout.write('[%d%%]\r' % percent_done) sys.stdout.flush() last_percent_done = percent_done md5_output = md5_object.hexdigest() return md5_output + ' ' + os.path.abspath(filename) + '\n' def test_write_capabilities(directory, log_name_source): if os.path.isdir(directory): temp = tempfile.mkstemp(dir=directory, suffix='.tmp') os.close(temp[0]) os.remove(temp[1]) elif os.path.isfile(directory): print('\nFile transfer is not currently supported, only directories.\n') generate_log( log_name_source, 'Error: Attempted file transfer. Source and Destination must be a directory' ) generate_log(log_name_source, 'move.py exit') sys.exit() else: print((' %s is either not a directory or it does not exist' % directory)) generate_log( log_name_source, ' %s is either not a directory or it does not exist' % directory ) generate_log(log_name_source, 'move.py exit') sys.exit() def remove_bad_files(root_dir, log_name_source): rm_these = ['.DS_Store', 'Thumbs.db', 'desktop.ini', 'Desktop.ini'] for root, _, files in os.walk(root_dir): for name in files: path = os.path.join(root, name) for i in rm_these: if name == i: print(('***********************' + 'removing: ' + path)) if not log_name_source == None: generate_log( log_name_source, 'EVENT = Unwanted file removal - %s was removed' % path ) try: os.remove(path) except OSError: print('can\'t delete as source is read-only') def make_manifest( manifest_dir, manifest_textfile, path_to_remove ): checksum_list = [] manifest_generator = '' source_counter = 0 print('Counting the amount of files to be processed.') for root, directories, filenames in os.walk(manifest_dir): directories[:] = [ d for d in directories if d[0] != '.' ] directories[:] = [ d for d in directories if d != 'System Volume Information' ] directories[:] = [ d for d in directories if d != '$RECYCLE.BIN' ] directories[:] = [ d for d in directories if d != 'Seagate' ] filenames = [ f for f in filenames if os.path.basename(root) != 'System Volume Information' ] filenames = [ f for f in filenames if f[0] != '.' ] for files in filenames: source_counter += 1 counter2 = 1 if os.path.isdir(manifest_dir): os.chdir(manifest_dir) for root, directories, filenames in os.walk(manifest_dir): directories[:] = [ d for d in directories if d[0] != '.' ] directories[:] = [ d for d in directories if d != 'System Volume Information' ] directories[:] = [ d for d in directories if d != '$RECYCLE.BIN' ] directories[:] = [ d for d in directories if d != 'Seagate' ] filenames = [ f for f in filenames if os.path.basename(root) != 'System Volume Information' ] filenames = [ f for f in filenames if f[0] != '.' ] for files in filenames: checksum_list.append([root, files]) elif os.path.isfile(manifest_dir): checksum_list = [[os.path.dirname(manifest_dir), os.path.basename(manifest_dir)]] if len(checksum_list) == 1: source_counter = 1 for files in checksum_list: print(('Generating MD5 for %s - %d of %d' % ( os.path.join(files[0], files[1]), counter2, source_counter) )) md5 = hashlib_md5(os.path.join(files[0], files[1])) root2 = files[0].replace(path_to_remove, '') try: if root2[0] == '/': root2 = root2[1:] if root2[0] == '\\': root2 = root2[1:] except: IndexError manifest_generator += md5[:32] + ' ' + os.path.join( root2, files[1] ).replace("\\", "/") + '\n' counter2 += 1 manifest_list = manifest_generator.splitlines() files_in_manifest = len(manifest_list) manifest_list = sorted(manifest_list, key=lambda x: (x[34:])) with open(manifest_textfile, "w", encoding='utf-8') as text: for i in manifest_list: text.write(i + '\n') return files_in_manifest def copy_dir( source, destination_final_path, log_name_source, rootpos, destination, dirname, args ): if sys.platform == "win32": if os.path.isfile(source): generate_log( log_name_source, 'EVENT = File Transfer, status=started, agentName=Windows, module=shutil.copy2' ) print('copying file with python/shutil') shutil.copy2(source, destination_final_path) else: subprocess.call([ 'robocopy', source, destination_final_path, '/E', '/XA:SH', '/XD', '.*', '/XD', '*System Volume Information*', '/XD', 'Seagate', '/XD', '$Recycle.bin', '/a-:SH', '/a+:R' ]) generate_log( log_name_source, 'EVENT = File Transfer, status=started, agentName=Windows O.S, agentName=Robocopy' ) elif sys.platform == "darwin": if args.l: cmd = [ 'gcp', '--preserve=mode,timestamps', '-nRv', source, destination_final_path ] generate_log( log_name_source, 'EVENT = File Transfer, status=started, agentName=OSX - agentName=gcp' ) subprocess.call(cmd) else: if rootpos == 'y': if not os.path.isdir(destination + '/' + dirname): os.makedirs(destination + '/' + dirname) cmd = [ 'rsync', '-rtv', '--exclude=.*', '--exclude=.*/', '--stats', '--progress', source, destination + '/' + dirname ] else: cmd = [ 'rsync', '-rtv', '--exclude=.*', '--exclude=.*/', '--stats', '--progress', source, destination ] generate_log( log_name_source, 'EVENT = File Transfer, status=started, agentName=OSX, agentName=rsync' ) print(cmd) subprocess.call(cmd) elif 'linux' in sys.platform: cmd = [ 'cp', '--preserve=mode,timestamps', '-nRv', source, destination_final_path ] generate_log( log_name_source, 'EVENT = File Transfer, status=started, agentName=Linux, agentName=cp' ) subprocess.call(cmd) generate_log( log_name_source, 'EVENT = File Transfer, status=completed' ) def diff_report(file1, file2, log_name_source): print('Comparing manifests to verify file transfer') try: with open(file1, 'r') as file1_manifest: sourcelist = file1_manifest.readlines() except UnicodeDecodeError: with open(file1, 'r', encoding='cp1252') as file1_manifest: sourcelist = file1_manifest.readlines() try: with open(file2, 'r') as file2_manifest: destlist = file2_manifest.readlines() except UnicodeDecodeError: with open(file2, 'r') as file2_manifest: destlist = file2_manifest.readlines() for i in sourcelist: if i not in destlist: print(('%s was expected, but a different value was found in destination manifest' % i.rstrip())) generate_log( log_name_source, 'ERROR = %s was expected, but a different value was found in destination manifest' % i.rstrip()) print(' - End of Diff report\n') def check_extra_files(file1, file2, log_name_source): try: with open(file1, 'r') as file1_manifest: sourcelist = file1_manifest.readlines() except UnicodeDecodeError: with open(file1, 'r', encoding='cp1251') as file1_manifest: sourcelist = file1_manifest.readlines() try: with open(file2, 'r') as file2_manifest: destlist = file2_manifest.readlines() except UnicodeDecodeError: with open(file2, 'r') as file2_manifest: destlist = file2_manifest.readlines() destlist_files = [] sourcelist_files = [] for dest_files in destlist: destlist_files.append(dest_files[32:]) for source_files in sourcelist: sourcelist_files.append(source_files[32:]) for i in destlist_files: if i not in sourcelist_files: print(('%s is in your destination manifest but is not in the source manifest' % i.rstrip())) generate_log( log_name_source, 'ERROR = %s is in your destination manifest but is not in the source manifest' % i.rstrip()) print(' - End of extra file report - if source and destination manifests appear visually identical, perhaps one manifest is utf-8 and the other is cp1252')
MIT License
chaffelson/whoville
whoville/cloudbreak/models/stack_repo_details_json.py
StackRepoDetailsJson.stack
python
def stack(self): return self._stack
Gets the stack of this StackRepoDetailsJson. :return: The stack of this StackRepoDetailsJson. :rtype: dict(str, str)
https://github.com/chaffelson/whoville/blob/f71fda629c9fd50d0a482120165ea5abcc754522/whoville/cloudbreak/models/stack_repo_details_json.py#L57-L64
from pprint import pformat from six import iteritems import re class StackRepoDetailsJson(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'stack': 'dict(str, str)', 'util': 'dict(str, str)' } attribute_map = { 'stack': 'stack', 'util': 'util' } def __init__(self, stack=None, util=None): self._stack = None self._util = None if stack is not None: self.stack = stack if util is not None: self.util = util @property
Apache License 2.0
driftasimov/animec
animec/waifu.py
Waifu.neko
python
def neko(cls) -> str: return cls.__image__(cls.base + "neko")
Returns waifus from neko category
https://github.com/driftasimov/animec/blob/1f1940e69e13e68e6e4891f786694ee2dfd8994d/animec/waifu.py#L70-L73
import json from .errors import NoResultFound from urllib.request import urlopen, Request from urllib.error import HTTPError from random import choice __all__ = ['Waifu'] class Waifu: base = "https://waifu.pics/api/sfw/" @classmethod def __image__(cls, url: str) -> str: headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'} req = Request(url = url, headers = headers) try: page = urlopen(req) except HTTPError as e: raise NoResultFound(f"HTTP Error: {e.code}") res = json.loads(page.read()) return res["url"] @classmethod def random(cls) -> str: select = choice( ['waifu', 'shinobu', 'megumin', 'neko'] ) return cls.__image__(cls.base + select) @classmethod def random_gif(cls) -> str: select = choice( ['bully', 'cuddle', 'cry', 'hug', 'awoo', 'kiss', 'lick', 'pat', 'smug', 'bonk', 'yeet', 'blush', 'smile', 'wave', 'highfive', 'handhold', 'nom', 'bite', 'glomp', 'slap', 'kill', 'kick', 'happy', 'wink', 'poke', 'dance', 'cringe'] ) return cls.__image__(cls.base + select) @classmethod def waifu(cls) -> str: return cls.__image__(cls.base + "waifu") @classmethod def shinobu(cls) -> str: return cls.__image__(cls.base + "shinobu") @classmethod def megumin(cls) -> str: return cls.__image__(cls.base + "megumin") @classmethod
MIT License
xflr6/graphviz
graphviz/tools.py
attach
python
def attach(object: typing.Any, name: str) -> typing.Callable: def decorator(func): setattr(object, name, func) return func return decorator
Return a decorator doing ``setattr(object, name)`` with its argument. >>> spam = type('Spam', (object,), {})() >>> @attach(spam, 'eggs') ... def func(): ... pass >>> spam.eggs # doctest: +ELLIPSIS <function func at 0x...>
https://github.com/xflr6/graphviz/blob/b737195649e0d73df33f68c4709b32406868015c/graphviz/tools.py#L11-L26
import os import typing __all__ = ['attach', 'mkdirs', 'mapping_items']
MIT License
pyccel/pyccel
pyccel/stdlib/internal/openmp.py
omp_get_team_size
python
def omp_get_team_size(level : int): return 1
The omp_get_team_size routine returns, for a given nested level of the current thread, the size of the thread team to which the ancestor or the current thread belongs. Parameters ---------- level : int
https://github.com/pyccel/pyccel/blob/2a5bd75c33d270cdd675ad46b8ce718113b70498/pyccel/stdlib/internal/openmp.py#L175-L185
def omp_set_num_threads(num_threads : int): def omp_get_num_threads(): return 1 def omp_get_max_threads(): return 1 def omp_get_thread_num(): return 0 def omp_get_num_procs(): return 1 def omp_in_parallel(): return False def omp_set_dynamic(dynamic_threads : bool): def omp_get_dynamic(): return False def omp_get_cancellation(): return False def omp_set_nested(nested : bool): def omp_get_nested(): return False def omp_set_schedule(kind : int, chunk_size : int): def omp_get_schedule(): return 1,0 def omp_get_thread_limit(): return 1 def omp_set_max_active_levels(max_levels : int): def omp_get_max_active_levels(): return 1 def omp_get_level(): return 0 def omp_get_ancestor_thread_num(level : int): return -1
MIT License
thomas55555/husqvarna_automower
custom_components/husqvarna_automower/vacuum.py
HusqvarnaAutomowerEntity.name
python
def name(self): return self.mower_name
Return the name of the mower.
https://github.com/thomas55555/husqvarna_automower/blob/a160f29ea7350ea20b1f9f13ddea159cfc0be120/custom_components/husqvarna_automower/vacuum.py#L116-L118
import json import logging import time import voluptuous as vol from homeassistant.components.vacuum import ( ATTR_STATUS, STATE_CLEANING, STATE_DOCKED, STATE_ERROR, STATE_IDLE, STATE_PAUSED, STATE_RETURNING, SUPPORT_BATTERY, SUPPORT_PAUSE, SUPPORT_RETURN_HOME, SUPPORT_SEND_COMMAND, SUPPORT_START, SUPPORT_STATE, SUPPORT_STOP, StateVacuumEntity, ) from homeassistant.helpers import config_validation as cv from homeassistant.helpers import entity_platform from homeassistant.helpers.update_coordinator import UpdateFailed from .const import DOMAIN, ERRORCODES, ICON SUPPORT_STATE_SERVICES = ( SUPPORT_STATE | SUPPORT_PAUSE | SUPPORT_STOP | SUPPORT_RETURN_HOME | SUPPORT_BATTERY | SUPPORT_START ) _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, entry, async_add_devices): session = hass.data[DOMAIN][entry.entry_id] async_add_devices( HusqvarnaAutomowerEntity(session, idx) for idx, ent in enumerate(session.data["data"]) ) platform = entity_platform.current_platform.get() platform.async_register_entity_service( "park_and_start", { vol.Required("command"): cv.string, vol.Required("duration"): vol.Coerce(int), }, "async_custom_command", ) class HusqvarnaAutomowerEntity(StateVacuumEntity): def __init__(self, session, idx): self.session = session self.idx = idx mower = self.session.data["data"][self.idx] mower_attributes = self.__get_mower_attributes() self.mower_id = mower["id"] self.mower_name = mower_attributes["system"]["name"] self.model = mower_attributes["system"]["model"] self._available = None self.session.register_cb( lambda _: self.async_write_ha_state(), schedule_immediately=True ) def __get_mower_attributes(self): return self.session.data["data"][self.idx]["attributes"] @property def device_info(self): return { "identifiers": {(DOMAIN, self.mower_id)}, "name": self.mower_name, "manufacturer": "Husqvarna", "model": self.model, } @property def available(self): available = False try: available = ( self.__get_mower_attributes()["metadata"]["connected"] and self.session.data["data"][self.idx]["id"] == self.mower_id ) except (IndexError, KeyError): pass if self._available != available: if self._available is not None: if available: _LOGGER.info("Connected to %s again", self.mower_name) else: _LOGGER.warning("Connection to %s lost", self.mower_name) self._available = available return available @property
MIT License
bradmontgomery/django-staticflatpages
staticflatpages/util.py
_format_as_url
python
def _format_as_url(path): path = sub(r"\.html$", '', path) if not path.startswith("/"): path = "/{0}".format(path) if not path.endswith("/"): path = "{0}/".format(path) return path
Make sure ``path`` takes the form of ``/some/url/``.
https://github.com/bradmontgomery/django-staticflatpages/blob/76dbed40fa1af0434bf5d8012cb86b8139bb3256/staticflatpages/util.py#L24-L34
from os import walk from os.path import join from re import sub from django.conf import settings def get_template_directories(): templates = set() for t in settings.TEMPLATES: templates = templates.union(set(t.get('DIRS', []))) return templates
MIT License
machine-learning-exchange/mlx
api/client/swagger_client/api/dataset_service_api.py
DatasetServiceApi.download_dataset_files_with_http_info
python
def download_dataset_files_with_http_info(self, id, **kwargs): all_params = ['id', 'include_generated_code'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method download_dataset_files" % key ) params[key] = val del params['kwargs'] if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `download_dataset_files`") collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] query_params = [] if 'include_generated_code' in params: query_params.append(('include_generated_code', params['include_generated_code'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/gzip']) auth_settings = [] return self.api_client.call_api( '/datasets/{id}/download', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='file', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', False), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Returns the dataset artifacts compressed into a .tgz (.tar.gz) file. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.download_dataset_files_with_http_info(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param bool include_generated_code: Include generated run script in download :return: urllib3.response.HTTPResponse (assuming _preload_content=False) If the method is called asynchronously, returns the request thread.
https://github.com/machine-learning-exchange/mlx/blob/be1503c45538dac1a8188560fbec4a07b2a367bf/api/client/swagger_client/api/dataset_service_api.py#L328-L401
from __future__ import absolute_import import re import six from swagger_client.api_client import ApiClient class DatasetServiceApi(object): def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def approve_datasets_for_publishing(self, dataset_ids, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.approve_datasets_for_publishing_with_http_info(dataset_ids, **kwargs) else: (data) = self.approve_datasets_for_publishing_with_http_info(dataset_ids, **kwargs) return data def approve_datasets_for_publishing_with_http_info(self, dataset_ids, **kwargs): all_params = ['dataset_ids'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method approve_datasets_for_publishing" % key ) params[key] = val del params['kwargs'] if ('dataset_ids' not in params or params['dataset_ids'] is None): raise ValueError("Missing the required parameter `dataset_ids` when calling `approve_datasets_for_publishing`") collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'dataset_ids' in params: body_params = params['dataset_ids'] auth_settings = [] return self.api_client.call_api( '/datasets/publish_approved', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_dataset(self, body, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_dataset_with_http_info(body, **kwargs) else: (data) = self.create_dataset_with_http_info(body, **kwargs) return data def create_dataset_with_http_info(self, body, **kwargs): all_params = ['body'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_dataset" % key ) params[key] = val del params['kwargs'] if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_dataset`") collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] auth_settings = [] return self.api_client.call_api( '/datasets', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ApiDataset', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_dataset(self, id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_dataset_with_http_info(id, **kwargs) else: (data) = self.delete_dataset_with_http_info(id, **kwargs) return data def delete_dataset_with_http_info(self, id, **kwargs): all_params = ['id'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_dataset" % key ) params[key] = val del params['kwargs'] if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `delete_dataset`") collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None auth_settings = [] return self.api_client.call_api( '/datasets/{id}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def download_dataset_files(self, id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.download_dataset_files_with_http_info(id, **kwargs) else: (data) = self.download_dataset_files_with_http_info(id, **kwargs) return data
Apache License 2.0
christophercrouzet/revl
revl.py
unparent
python
def unparent(context): oNode = pickTransform(context) if oNode is NULL_OBJ: return context.dag.reparentNode(oNode, NULL_OBJ)
Unparent a random transform node. Parameters ---------- context : revl.Context Command context.
https://github.com/christophercrouzet/revl/blob/ed6be7c5ffc454b94a2c984fba3adfc204d2d741/revl.py#L561-L573
__all__ = ['NULL_OBJ', 'Context', 'Command', 'Primitive', 'PrimitiveType', 'validate', 'run', 'pickTransform', 'createDagNode', 'createDgNode', 'createPrimitive', 'createTransform', 'unparent'] __title__ = 'revl' __version__ = '0.2.0' __summary__ = "Helps to benchmark code for Autodesk Maya" __url__ = 'https://github.com/christophercrouzet/revl' __author__ = "Christopher Crouzet" __contact__ = 'christopher.crouzet@gmail.com' __license__ = "MIT" import collections import numbers import random import sys from maya import OpenMaya if sys.version_info[0] == 2: _BUILTIN_MODULE = '__builtin__' def _iteritems(d, **kwargs): return d.iteritems(**kwargs) _range = xrange else: _BUILTIN_MODULE = 'builtins' def _iteritems(d, **kwargs): return iter(d.items(**kwargs)) _range = range _SEQUENCE_TYPES = (list, tuple) NULL_OBJ = OpenMaya.MObject().kNullObj class Context(object): def __init__(self, **kwargs): self.dg = OpenMaya.MDGModifier() self.dag = OpenMaya.MDagModifier() self.transforms = [] self.__dict__.update(kwargs) def __repr__(self): values = ', '.join(['%s=%r' % (k, v) for k, v in sorted(_iteritems(self.__dict__))]) return "%s(%s)" % (type(self).__name__, values) _Command = collections.namedtuple( 'Command', ( 'weight', 'function', 'args', 'kwargs', )) _Command.__new__.__defaults__ = (None, None) class Command(_Command): __slots__ = () _COMMAND_ATTR_COUNT = len(Command._fields) _COMMAND_REQUIRED_ARG_RANGE = _range( _COMMAND_ATTR_COUNT - len(Command.__new__.__defaults__), _COMMAND_ATTR_COUNT + 1) _Primitive = collections.namedtuple( 'Primitive', ( 'generator', 'transform', 'shapes', )) class Primitive(_Primitive): __slots__ = () class PrimitiveType(object): NURBS_CIRCLE = 0 NURBS_CONE = 1 NURBS_CUBE = 2 NURBS_CYLINDER = 3 NURBS_PLANE = 4 NURBS_SPHERE = 5 NURBS_SQUARE = 6 NURBS_TORUS = 7 POLY_CONE = 8 POLY_CUBE = 9 POLY_CYLINDER = 10 POLY_HELIX = 11 POLY_MISC = 12 POLY_PIPE = 13 POLY_PLANE = 14 POLY_PLATONIC_SOLID = 15 POLY_PRISM = 16 POLY_PYRAMID = 17 POLY_SPHERE = 18 POLY_TORUS = 19 _FIRST = NURBS_CIRCLE _LAST = POLY_TORUS _PrimitiveTraits = collections.namedtuple( '_PrimitiveTraits', ( 'type', 'shapeType', 'outPlugs', 'inPlug', )) def _defineCurveTraits(type, outPlugs=None): outPlugs = ['outputCurve'] if outPlugs is None else outPlugs return _PrimitiveTraits(type=type, shapeType='nurbsCurve', outPlugs=outPlugs, inPlug='create') def _defineMeshTraits(type): return _PrimitiveTraits(type=type, shapeType='mesh', outPlugs=['output'], inPlug='inMesh') def _defineSurfaceTraits(type, outPlugs=None): outPlugs = ['outputSurface'] if outPlugs is None else outPlugs return _PrimitiveTraits(type=type, shapeType='nurbsSurface', outPlugs=outPlugs, inPlug='create') _PRIMITIVE_TRAITS = { PrimitiveType.NURBS_CIRCLE: _defineCurveTraits('makeNurbCircle'), PrimitiveType.NURBS_CONE: _defineSurfaceTraits('makeNurbCone'), PrimitiveType.NURBS_CUBE: _defineSurfaceTraits( 'makeNurbCube', outPlugs=['outputSurface%s' % (_i if _i > 0 else '',) for _i in _range(6)]), PrimitiveType.NURBS_CYLINDER: _defineSurfaceTraits('makeNurbCylinder'), PrimitiveType.NURBS_PLANE: _defineSurfaceTraits('makeNurbPlane'), PrimitiveType.NURBS_SPHERE: _defineSurfaceTraits('makeNurbSphere'), PrimitiveType.NURBS_SQUARE: _defineCurveTraits( 'makeNurbsSquare', outPlugs=['outputCurve%s' % (_i,) for _i in _range(1, 5)]), PrimitiveType.NURBS_TORUS: _defineSurfaceTraits('makeNurbTorus'), PrimitiveType.POLY_CONE: _defineMeshTraits('polyCone'), PrimitiveType.POLY_CUBE: _defineMeshTraits('polyCube'), PrimitiveType.POLY_HELIX: _defineMeshTraits('polyHelix'), PrimitiveType.POLY_CYLINDER: _defineMeshTraits('polyCylinder'), PrimitiveType.POLY_MISC: _defineMeshTraits('polyPrimitiveMisc'), PrimitiveType.POLY_PIPE: _defineMeshTraits('polyPipe'), PrimitiveType.POLY_PLANE: _defineMeshTraits('polyPlane'), PrimitiveType.POLY_PLATONIC_SOLID: _defineMeshTraits('polyPlatonicSolid'), PrimitiveType.POLY_PRISM: _defineMeshTraits('polyPrism'), PrimitiveType.POLY_PYRAMID: _defineMeshTraits('polyPyramid'), PrimitiveType.POLY_SPHERE: _defineMeshTraits('polySphere'), PrimitiveType.POLY_TORUS: _defineMeshTraits('polyTorus'), } def validate(commands): if not isinstance(commands, _SEQUENCE_TYPES): raise TypeError( "The command set is expected to be an instance object of type %s, " "not '%s'." % (_joinTypes(_SEQUENCE_TYPES, "or "), _formatType(type(commands)),)) for command in commands: if not isinstance(command, _SEQUENCE_TYPES): raise TypeError( "Each command is expected to be an instance object of type " "%s, not '%s'." % (_joinTypes(_SEQUENCE_TYPES + (Command,), "or "), _formatType(type(command)),)) if len(command) not in _COMMAND_REQUIRED_ARG_RANGE: raise TypeError( "Each command is expected to be an instance object of type " "%s, and compatible with the '%s' structure, but got '%s' " "instead." % (_joinTypes(_SEQUENCE_TYPES + (Command,), "or "), _formatType(Command), command)) for command in commands: command = Command(*command) if not isinstance(command.weight, numbers.Real): raise TypeError( "The first element of a command, that is the 'weight' " "attribute, is expected to be a real number, not '%s'." % (_formatType(type(command.weight)))) if not callable(command.function): raise TypeError( "The second element of a command, that is the 'function' " "attribute, is expected to be a callable object, not '%s'." % (_formatType(type(command.function)))) if (command.args is not None and not isinstance(command.args, _SEQUENCE_TYPES)): raise TypeError( "The third element of a command, that is the 'args' " "attribute, is expected to be an instance object of type %s, " "not '%s'." % (_joinTypes(_SEQUENCE_TYPES + (type(None),), "or "), _formatType(type(command.args)))) if (command.kwargs is not None and not isinstance(command.kwargs, dict)): raise TypeError( "The fourth element of a command, that is the 'kwargs' " "attribute, is expected to be an instance object of type " "'dict', or 'NoneType', not '%s'." % (_formatType(type(command.kwargs)))) return True def run(commands, count, seed=None, context=None): random.seed(seed) commands = _consolidate(commands) if context is None: context = Context() commands = [c for c in commands if c.weight > 0] if commands: for command in _pick(commands, count): args = () if command.args is None else command.args kwargs = {} if command.kwargs is None else command.kwargs command.function(context, *args, **kwargs) context.dag.doIt() context.dg.doIt() return context def pickTransform(context): if not context.transforms: return NULL_OBJ return context.transforms[random.randint(0, len(context.transforms) - 1)] def createDagNode(context, type, parent=False): if parent: oParent = pickTransform(context) if oParent is NULL_OBJ: return NULL_OBJ else: oParent = context.dag.createNode('transform') context.transforms.append(oParent) return context.dag.createNode(type, oParent) def createDgNode(context, type): return context.dg.createNode(type) def createPrimitive(context, type=None, name=None, parent=False, forceTransformCreation=True): if type is None: type = random.randint(PrimitiveType._FIRST, PrimitiveType._LAST) oParent = pickTransform(context) if parent else NULL_OBJ if forceTransformCreation or oParent is NULL_OBJ: oTransform = context.dag.createNode('transform', oParent) context.transforms.append(oTransform) else: oTransform = oParent traits = _PRIMITIVE_TRAITS[type] oGenerator = context.dg.createNode(traits.type) generator = OpenMaya.MFnDependencyNode(oGenerator) shapes = [] for outPlug in traits.outPlugs: oShape = context.dag.createNode(traits.shapeType, oTransform) shape = OpenMaya.MFnDagNode(oShape) context.dg.connect(generator.findPlug(outPlug), shape.findPlug(traits.inPlug)) shapes.append(oShape) if name is not None: OpenMaya.MFnDagNode(oTransform).setName(name) return Primitive(generator=oGenerator, transform=oTransform, shapes=shapes) def createTransform(context, name=None, parent=False): oParent = pickTransform(context) if parent else NULL_OBJ oTransform = context.dag.createNode('transform', oParent) if name is not None: OpenMaya.MFnDagNode(oTransform).setName(name) context.transforms.append(oTransform) return oTransform
MIT License
melloddy/melloddy-tuner
melloddy_tuner/utils/df_transformer.py
DfTransformer.__init__
python
def __init__( self, calculator_object, input_columns, output_columns, output_types=None, success_column=None, meta=None, nproc=1, verbosity=0, ): self.calculator = calculator_object self.input_columns = input_columns self.output_columns = output_columns if output_types is not None: self.meta = {i: type for i, type in enumerate(output_types)} else: self.meta = None if success_column is not None: if success_column not in output_columns: raise ValueError( "success_column {0} is not in the output columns {1}".format( success_column, output_columns ) ) self.success_column = success_column self.nproc = nproc self.verbosity = verbosity
Constructor Args: calculator_object(): an instantiated calculator object input_columns(dict): dictionary mapping function keyword arguments to dataframe input columns output_columns(list or dict): list of column names to map the function output to output_types success_column(str): Name of a boolean column indicating successfull computation, must be member of output_columns nproc(int): number of prcoessors to use verbosity(int): verbosity level Returns: DfTransformer object
https://github.com/melloddy/melloddy-tuner/blob/6dacc23f2269abe3ba1b4eb73141a1a52025adb3/melloddy_tuner/utils/df_transformer.py#L7-L47
import pandas as pd import dask.dataframe as daskdf import os class DfTransformer(object):
MIT License
common-workflow-language/workflow-service
wes_service/util.py
visit
python
def visit(d, op): op(d) if isinstance(d, list): for i in d: visit(i, op) elif isinstance(d, dict): for i in d.values(): visit(i, op)
Recursively call op(d) for all list subelements and dictionary 'values' that d may have.
https://github.com/common-workflow-language/workflow-service/blob/964dbd450846add74c01d2a34b3dbc8e5a265f94/wes_service/util.py#L10-L18
import tempfile import json import os import logging import connexion from werkzeug.utils import secure_filename
Apache License 2.0
mediawiki-utilities/python-mediawiki-utilities
mw/xml_dump/iteration/page.py
Page.__init__
python
def __init__(self, id, title, namespace, redirect, restrictions, revisions=None): self.id = none_or(id, int) self.title = none_or(title, str) self.namespace = none_or(namespace, int) self.redirect = none_or(redirect, Redirect) self.restrictions = serializable.List.deserialize(restrictions) self.__revisions = revisions or []
Page ID : `int`
https://github.com/mediawiki-utilities/python-mediawiki-utilities/blob/aa066d3d955daa3d20cf09bf5b0d46778dd67a7c/mw/xml_dump/iteration/page.py#L29-L56
from ...types import serializable from ...util import none_or from ..errors import MalformedXML from .redirect import Redirect from .revision import Revision class Page(serializable.Type): __slots__ = ( 'id', 'title', 'namespace', 'redirect', 'restrictions' )
MIT License
lithops-cloud/lithops
lithops/standalone/backends/ibm_vpc/ibm_vpc.py
IBMVPCBackend.clean
python
def clean(self): logger.debug('Cleaning IBM VPC resources') self._delete_vm_instances()
Clean all the backend resources The gateway public IP and the floating IP are never deleted
https://github.com/lithops-cloud/lithops/blob/a274a0bc423e22b9a68834cac5d63130666a4ee8/lithops/standalone/backends/ibm_vpc/ibm_vpc.py#L403-L415
import re import os import time import logging from ibm_vpc import VpcV1 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator from ibm_cloud_sdk_core import ApiException from concurrent.futures import ThreadPoolExecutor from lithops.util.ssh_client import SSHClient from lithops.constants import COMPUTE_CLI_MSG, CACHE_DIR from lithops.config import load_yaml_config, dump_yaml_config from . import config as vpc_config logger = logging.getLogger(__name__) class IBMVPCBackend: def __init__(self, ibm_vpc_config, mode): logger.debug("Creating IBM VPC client") self.name = 'ibm_vpc' self.config = ibm_vpc_config self.mode = mode self.endpoint = self.config['endpoint'] self.region = self.endpoint.split('//')[1].split('.')[0] self.vpc_name = self.config.get('vpc_name') logger.debug('Setting VPC endpoint to: {}'.format(self.endpoint)) self.master = None self.workers = [] iam_api_key = self.config.get('iam_api_key') self.custom_image = self.config.get('custom_lithops_image') authenticator = IAMAuthenticator(iam_api_key) self.ibm_vpc_client = VpcV1('2021-08-31', authenticator=authenticator) self.ibm_vpc_client.set_service_url(self.config['endpoint'] + '/v1') user_agent_string = 'ibm_vpc_{}'.format(self.config['user_agent']) self.ibm_vpc_client._set_user_agent_header(user_agent_string) msg = COMPUTE_CLI_MSG.format('IBM VPC') logger.info("{} - Region: {}".format(msg, self.region)) def _create_vpc(self, vpc_data): if 'vpc_id' in self.config: return if 'vpc_id' in vpc_data: self.config['vpc_id'] = vpc_data['vpc_id'] self.config['security_group_id'] = vpc_data['security_group_id'] return vpc_info = None assert re.match("^[a-z0-9-:-]*$", self.vpc_name), 'VPC name "{}" not valid'.format(self.vpc_name) vpcs_info = self.ibm_vpc_client.list_vpcs().get_result() for vpc in vpcs_info['vpcs']: if vpc['name'] == self.vpc_name: vpc_info = vpc if not vpc_info: logger.debug('Creating VPC {}'.format(self.vpc_name)) vpc_prototype = {} vpc_prototype['address_prefix_management'] = 'auto' vpc_prototype['classic_access'] = False vpc_prototype['name'] = self.vpc_name vpc_prototype['resource_group'] = {'id': self.config['resource_group_id']} response = self.ibm_vpc_client.create_vpc(**vpc_prototype) vpc_info = response.result self.config['vpc_id'] = vpc_info['id'] self.config['security_group_id'] = vpc_info['default_security_group']['id'] deloy_ssh_rule = True deploy_icmp_rule = True sg_rule_prototype_ssh = {} sg_rule_prototype_ssh['direction'] = 'inbound' sg_rule_prototype_ssh['ip_version'] = 'ipv4' sg_rule_prototype_ssh['protocol'] = 'tcp' sg_rule_prototype_ssh['port_min'] = 22 sg_rule_prototype_ssh['port_max'] = 22 sg_rule_prototype_icmp = {} sg_rule_prototype_icmp['direction'] = 'inbound' sg_rule_prototype_icmp['ip_version'] = 'ipv4' sg_rule_prototype_icmp['protocol'] = 'icmp' sg_rule_prototype_icmp['type'] = 8 sg_rules = self.ibm_vpc_client.get_security_group(self.config['security_group_id']) for rule in sg_rules.get_result()['rules']: if all(item in rule.items() for item in sg_rule_prototype_ssh.items()): deloy_ssh_rule = False if all(item in rule.items() for item in sg_rule_prototype_icmp.items()): deploy_icmp_rule = False if deloy_ssh_rule: self.ibm_vpc_client.create_security_group_rule(self.config['security_group_id'], sg_rule_prototype_ssh) if deploy_icmp_rule: self.ibm_vpc_client.create_security_group_rule(self.config['security_group_id'], sg_rule_prototype_icmp) def _create_gateway(self, vpc_data): if 'gateway_id' in self.config: return if 'gateway_id' in vpc_data: self.config['gateway_id'] = vpc_data['gateway_id'] return gateway_name = 'lithops-gateway-{}'.format(self.vpc_key) gateway_data = None gateways_info = self.ibm_vpc_client.list_public_gateways().get_result() for gw in gateways_info['public_gateways']: if gw['vpc']['id'] == self.config['vpc_id']: gateway_data = gw if not gateway_data: logger.debug('Creating Gateway {}'.format(gateway_name)) gateway_prototype = {} gateway_prototype['vpc'] = {'id': self.config['vpc_id']} gateway_prototype['zone'] = {'name': self.config['zone_name']} gateway_prototype['name'] = gateway_name response = self.ibm_vpc_client.create_public_gateway(**gateway_prototype) gateway_data = response.result self.config['gateway_id'] = gateway_data['id'] def _create_subnet(self, vpc_data): if 'subnet_id' in self.config: return if 'subnet_id' in vpc_data: self.config['subnet_id'] = vpc_data['subnet_id'] return subnet_name = 'lithops-subnet-{}'.format(self.vpc_key) subnet_data = None subnets_info = self.ibm_vpc_client.list_subnets(resource_group_id=self.config['resource_group_id']).get_result() for sn in subnets_info['subnets']: if sn['name'] == subnet_name: subnet_data = sn if not subnet_data: logger.debug('Creating Subnet {}'.format(subnet_name)) subnet_prototype = {} subnet_prototype['zone'] = {'name': self.config['zone_name']} subnet_prototype['ip_version'] = 'ipv4' subnet_prototype['name'] = subnet_name subnet_prototype['resource_group'] = {'id': self.config['resource_group_id']} subnet_prototype['vpc'] = {'id': self.config['vpc_id']} subnet_prototype['ipv4_cidr_block'] = '10.241.64.0/22' response = self.ibm_vpc_client.create_subnet(subnet_prototype) subnet_data = response.result self.config['subnet_id'] = subnet_data['id'] self.ibm_vpc_client.set_subnet_public_gateway(self.config['subnet_id'], {'id': self.config['gateway_id']}) def _create_floating_ip(self, vpc_data): if 'floating_ip_id' in self.config: return if 'floating_ip_id' in vpc_data: self.config['floating_ip'] = vpc_data['floating_ip'] self.config['floating_ip_id'] = vpc_data['floating_ip_id'] return floating_ip_name = 'lithops-floatingip-{}'.format(self.vpc_key) floating_ip_data = None floating_ips_info = self.ibm_vpc_client.list_floating_ips().get_result() for fip in floating_ips_info['floating_ips']: if fip['name'] == floating_ip_name: floating_ip_data = fip if not floating_ip_data: logger.debug('Creating floating IP {}'.format(floating_ip_name)) floating_ip_prototype = {} floating_ip_prototype['name'] = floating_ip_name floating_ip_prototype['zone'] = {'name': self.config['zone_name']} floating_ip_prototype['resource_group'] = {'id': self.config['resource_group_id']} response = self.ibm_vpc_client.create_floating_ip(floating_ip_prototype) floating_ip_data = response.result self.config['floating_ip'] = floating_ip_data['address'] self.config['floating_ip_id'] = floating_ip_data['id'] def init(self): vpc_data_filename = os.path.join(CACHE_DIR, self.name, 'data') self.vpc_data = load_yaml_config(vpc_data_filename) cahced_mode = self.vpc_data.get('mode') cahced_instance_id = self.vpc_data.get('instance_id') if self.mode == 'consume': logger.debug('Initializing IBM VPC backend (Consume mode)') if self.mode != cahced_mode or self.config['instance_id'] != cahced_instance_id: ins_id = self.config['instance_id'] instance_data = self.ibm_vpc_client.get_instance(ins_id) name = instance_data.get_result()['name'] self.vpc_data = {'mode': 'consume', 'instance_id': self.config['instance_id'], 'instance_name': name, 'floating_ip': self.config['ip_address']} dump_yaml_config(vpc_data_filename, self.vpc_data) self.master = IBMVPCInstance(self.vpc_data['instance_name'], self.config, self.ibm_vpc_client, public=True) self.master.instance_id = self.config['instance_id'] self.master.public_ip = self.config['ip_address'] self.master.delete_on_dismantle = False elif self.mode in ['create', 'reuse']: logger.debug(f'Initializing IBM VPC backend ({self.mode} mode)') if self.mode != cahced_mode: self.vpc_data = {} self._create_vpc(self.vpc_data) self.vpc_key = self.config['vpc_id'].split('-')[2] self._create_gateway(self.vpc_data) self._create_subnet(self.vpc_data) self._create_floating_ip(self.vpc_data) name = 'lithops-master-{}'.format(self.vpc_key) self.master = IBMVPCInstance(name, self.config, self.ibm_vpc_client, public=True) self.master.public_ip = self.config['floating_ip'] self.master.profile_name = self.config['master_profile_name'] self.master.delete_on_dismantle = False self.vpc_data = { 'mode': 'consume', 'instance_id': '0af1', 'instance_name': self.master.name, 'vpc_id': self.config['vpc_id'], 'subnet_id': self.config['subnet_id'], 'security_group_id': self.config['security_group_id'], 'floating_ip': self.config['floating_ip'], 'floating_ip_id': self.config['floating_ip_id'], 'gateway_id': self.config['gateway_id'] } dump_yaml_config(vpc_data_filename, self.vpc_data) def _delete_vm_instances(self): msg = ('Deleting all Lithops worker VMs in {}'.format(self.vpc_name) if self.vpc_name else 'Deleting all Lithops worker VMs') logger.info(msg) def delete_instance(instance_info): ins_name, ins_id = instance_info try: logger.info('Deleting instance {}'.format(ins_name)) self.ibm_vpc_client.delete_instance(ins_id) except ApiException as e: if e.code == 404: pass else: raise e deleted_instances = set() while True: instances_to_delete = set() instances_info = self.ibm_vpc_client.list_instances().get_result() for ins in instances_info['instances']: if ins['name'].startswith('lithops-worker'): ins_to_dlete = (ins['name'], ins['id']) if ins_to_dlete not in deleted_instances: instances_to_delete.add(ins_to_dlete) if instances_to_delete: with ThreadPoolExecutor(len(instances_to_delete)) as executor: executor.map(delete_instance, instances_to_delete) deleted_instances.update(instances_to_delete) else: break def _delete_subnet(self, vpc_data): subnet_name = 'lithops-subnet-{}'.format(self.vpc_key) if 'subnet_id' not in vpc_data: subnets_info = self.ibm_vpc_client.list_subnets().get_result() for subn in subnets_info['subnets']: if subn['name'] == subnet_name: vpc_data['subnet_id'] = subn['id'] if 'subnet_id' in vpc_data: logger.info('Deleting subnet {}'.format(subnet_name)) try: self.ibm_vpc_client.delete_subnet(vpc_data['subnet_id']) except ApiException as e: if e.code == 404: pass else: raise e time.sleep(5) def _delete_gateway(self, vpc_data): gateway_name = 'lithops-gateway-{}'.format(self.vpc_key) if 'gateway_id' not in vpc_data: gateways_info = self.ibm_vpc_client.list_public_gateways().get_result() for gw in gateways_info['public_gateways']: if ['name'] == gateway_name: vpc_data['gateway_id'] = gw['id'] if 'gateway_id' in vpc_data: logger.info('Deleting gateway {}'.format(gateway_name)) try: self.ibm_vpc_client.delete_public_gateway(vpc_data['gateway_id']) except ApiException as e: if e.code == 404: pass elif e.code == 400: pass else: raise e time.sleep(5) def _delete_vpc(self, vpc_data): if 'vpc_id' not in vpc_data: vpcs_info = self.ibm_vpc_client.list_vpcs().get_result() for vpc in vpcs_info['vpcs']: if vpc['name'] == self.vpc_name: vpc_data['vpc_id'] = vpc['id'] if 'vpc_id' in vpc_data: logger.info('Deleting VPC {}'.format(self.vpc_name)) try: self.ibm_vpc_client.delete_vpc(vpc_data['vpc_id']) except ApiException as e: if e.code == 404: pass else: raise e
Apache License 2.0
romainsabathe/dagmm
model.py
relative_euclidean_distance
python
def relative_euclidean_distance(x1, x2, eps=eps): num = torch.norm(x1 - x2, p=2, dim=1) denom = torch.norm(x1, p=2, dim=1) return num / torch.max(denom, eps)
x1 and x2 are assumed to be Variables or Tensors. They have shape [batch_size, dimension_embedding]
https://github.com/romainsabathe/dagmm/blob/a3bc1c0a5a95935420c47678aa8931fadddacb5a/model.py#L69-L74
import torch import numpy as np from torch import nn from gmm import GMM from compression_networks import CompressionNetworkArrhythmia from estimation_networks import EstimationNetworkArrhythmia eps = torch.autograd.Variable(torch.FloatTensor([1.e-8]), requires_grad=False) class DAGMM(nn.Module): def __init__(self, compression_module, estimation_module, gmm_module): super().__init__() self.compressor = compression_module self.estimator = estimation_module self.gmm = gmm_module def forward(self, input): encoded = self.compressor.encode(input) decoded = self.compressor.decode(encoded) relative_ed = relative_euclidean_distance(input, decoded) cosine_sim = cosine_similarity(input, decoded) relative_ed = relative_ed.view(-1, 1) cosine_sim = relative_ed.view(-1, 1) latent_vectors = torch.cat([encoded, relative_ed, cosine_sim], dim=1) if self.training: mixtures_affiliations = self.estimator(latent_vectors) self.gmm._update_mixtures_parameters(latent_vectors, mixtures_affiliations) return self.gmm(latent_vectors) class DAGMMArrhythmia(DAGMM): def __init__(self): compressor = CompressionNetworkArrhythmia() estimator = EstimationNetworkArrhythmia() gmm = GMM(num_mixtures=2, dimension_embedding=4) super().__init__(compression_module=compressor, estimation_module=estimator, gmm_module=gmm)
MIT License
pidgeot/python-lnp
core/keybinds.py
delete_keybinds
python
def delete_keybinds(filename): log.i('Deleting ' + filename + 'keybinds') os.remove(_keybind_fname(filename))
Deletes a keybindings file. Args: filename: the filename to delete.
https://github.com/pidgeot/python-lnp/blob/e738c0003e119e92478c831b94a5fd44a52031c6/core/keybinds.py#L125-L133
from __future__ import print_function, unicode_literals, absolute_import import collections from io import open import os import shutil from . import baselines, helpers, paths, log from .lnp import lnp def _keybind_fname(filename): filename = os.path.basename(filename) if not filename.endswith('.txt'): filename = filename + '.txt' return paths.get('keybinds', filename) def read_keybinds(): files = [] for fname in helpers.get_text_files(paths.get('keybinds')): with open(fname, encoding='cp437') as f: if ('[DISPLAY_STRING:' in f.read()) == ('legacy' in lnp.df_info.variations): files.append(fname) return tuple(sorted(os.path.basename(o) for o in files)) def _sdl_get_binds(filename, compressed=True): with open(filename, encoding='cp437') as f: lines = f.readlines() od, lastkey = collections.OrderedDict(), None for line in (l.strip() for l in lines if l.strip()): if line.startswith('[BIND:'): od[line], lastkey = [], line elif lastkey is not None: od[lastkey].append(line) if not compressed: return od van = _get_vanilla_binds() if van is not None: return collections.OrderedDict( (k, v) for k, v in od.items() if van.get(k) and set(van.get(k)) != set(v)) def _sdl_write_binds(filename, binds_od, expanded=False): if expanded: van = _get_vanilla_binds() if van is not None: binds_od = collections.OrderedDict( (k, binds_od.get(k) or v) for k, v in van.items()) lines = [''] for bind, vals in binds_od.items(): lines.append(bind) lines.extend(vals if expanded else [' ' + v for v in vals]) text = '\n'.join(lines) + '\n' if filename is None: return text with open(filename, 'w', encoding='cp437') as f: f.write(text) def _get_vanilla_binds(): try: vanfile = os.path.join( baselines.find_vanilla(False), 'data', 'init', 'interface.txt') return _sdl_get_binds(vanfile, compressed=False) except TypeError: log.w("Can't load or change keybinds with missing baseline!") def load_keybinds(filename): target = paths.get('init', 'interface.txt') filename = _keybind_fname(filename) log.i('Loading keybinds: ' + filename) if 'legacy' in lnp.df_info.variations: shutil.copyfile(filename, target) else: _sdl_write_binds(target, _sdl_get_binds(filename), expanded=True) def keybind_exists(filename): return os.access(_keybind_fname(filename), os.F_OK) def save_keybinds(filename): installed = paths.get('init', 'interface.txt') filename = _keybind_fname(filename) log.i('Saving current keybinds as ' + filename) if 'legacy' in lnp.df_info.variations: shutil.copyfile(installed, filename) else: _sdl_write_binds(filename, _sdl_get_binds(installed))
ISC License
clusterhq/flocker
flocker/node/agents/ebs.py
_wait_for_volume_state_change
python
def _wait_for_volume_state_change(operation, volume, update=_get_ebs_volume_state, timeout=VOLUME_STATE_CHANGE_TIMEOUT): time.sleep(5.0) start_time = time.time() poll_until( lambda: _reached_end_state( operation, volume, update, time.time() - start_time, timeout ), itertools.repeat(1) )
Helper function to wait for a given volume to change state from ``start_status`` via ``transient_status`` to ``end_status``. :param NamedConstant operation: Operation triggering volume state change. A value from ``VolumeOperations``. :param boto3.resources.factory.ec2.Volume: Volume to check status for. :param update: Method to use to fetch EBS volume's latest state. :param int timeout: Seconds to wait for volume operation to succeed. :raises Exception: When input volume fails to reach expected backend state for given operation within timeout seconds.
https://github.com/clusterhq/flocker/blob/eaa586248986d7cd681c99c948546c2b507e44de/flocker/node/agents/ebs.py#L699-L730
from types import NoneType from subprocess import check_output import threading import time import logging import itertools import boto3 from botocore.exceptions import ClientError, EndpointConnectionError from boto.utils import get_instance_metadata from uuid import UUID from bitmath import Byte, GiB from characteristic import with_cmp from pyrsistent import PClass, field, pmap from zope.interface import implementer from twisted.python.constants import ( Names, NamedConstant, Values, ValueConstant ) from twisted.python.filepath import FilePath from eliot import Message, register_exception_extractor from .blockdevice import ( IBlockDeviceAPI, IProfiledBlockDeviceAPI, BlockDeviceVolume, UnknownVolume, AlreadyAttachedVolume, UnattachedVolume, UnknownInstanceID, MandatoryProfiles, ICloudAPI, ) from flocker.common import poll_until from ..exceptions import StorageInitializationError from ...control import pmap_field from ._logging import ( AWS_ACTION, NO_AVAILABLE_DEVICE, NO_NEW_DEVICE_IN_OS, WAITING_FOR_VOLUME_STATUS_CHANGE, BOTO_LOG_HEADER, IN_USE_DEVICES, CREATE_VOLUME_FAILURE, BOTO_LOG_RESULT, VOLUME_BUSY_MESSAGE, INVALID_FLOCKER_CLUSTER_ID, ) DATASET_ID_LABEL = u'flocker-dataset-id' METADATA_VERSION_LABEL = u'flocker-metadata-version' CLUSTER_ID_LABEL = u'flocker-cluster-id' BOTO_NUM_RETRIES = 20 VOLUME_STATE_CHANGE_TIMEOUT = 300 MAX_ATTACH_RETRIES = 3 IOPS_MIN_IOPS = 100 IOPS_MIN_SIZE = 4 NOT_FOUND = u'InvalidVolume.NotFound' INVALID_PARAMETER_VALUE = u'InvalidParameterValue' VOLUME_ATTACHMENT_BUSY = u"busy" register_exception_extractor( ClientError, lambda e: { "aws_code": e.response['Error']['Code'], "aws_message": unicode(e.response['Error']['Message']), "aws_request_id": e.response['ResponseMetadata']['RequestId'], } ) class NoAvailableDevice(Exception): class EBSVolumeTypes(Values): STANDARD = ValueConstant(u"standard") IO1 = ValueConstant(u"io1") GP2 = ValueConstant(u"gp2") class EBSProfileAttributes(PClass): volume_type = field(mandatory=False, type=ValueConstant, initial=EBSVolumeTypes.STANDARD) iops_per_size_gib = field(mandatory=False, type=(int, type(None)), initial=None) max_iops = field(mandatory=False, type=(int, type(None)), initial=None) def requested_iops(self, size_gib): if self.iops_per_size_gib is not None: if self.max_iops is not None: return min(size_gib * self.iops_per_size_gib, self.max_iops) return size_gib * self.iops_per_size_gib return None class EBSMandatoryProfileAttributes(Values): GOLD = ValueConstant(EBSProfileAttributes(volume_type=EBSVolumeTypes.IO1, iops_per_size_gib=30, max_iops=20000)) SILVER = ValueConstant(EBSProfileAttributes( volume_type=EBSVolumeTypes.GP2)) BRONZE = ValueConstant(EBSProfileAttributes( volume_type=EBSVolumeTypes.STANDARD)) def _volume_type_and_iops_for_profile_name(profile_name, size): volume_type = None iops = None try: A = EBSMandatoryProfileAttributes.lookupByName( MandatoryProfiles.lookupByValue(profile_name).name).value except ValueError: pass else: volume_type = A.volume_type.value iops = A.requested_iops(size) return volume_type, iops class VolumeOperations(Names): CREATE = NamedConstant() ATTACH = NamedConstant() DETACH = NamedConstant() DESTROY = NamedConstant() class VolumeStates(Values): EMPTY = ValueConstant('') CREATING = ValueConstant(u'creating') AVAILABLE = ValueConstant(u'available') ATTACHING = ValueConstant(u'attaching') IN_USE = ValueConstant(u'in-use') DETACHING = ValueConstant(u'detaching') DELETING = ValueConstant(u'deleting') class VolumeStateFlow(PClass): start_state = field(mandatory=True, type=ValueConstant) transient_state = field(mandatory=True, type=ValueConstant) end_state = field(mandatory=True, type=ValueConstant) sets_attach = field(mandatory=True, type=bool) unsets_attach = field(mandatory=True, type=bool) class VolumeStateTable(PClass): def _populate_volume_state_table(): O = VolumeOperations S = VolumeStates table = pmap() def add_flow(operation, start, transient, end, sets_attach, unsets_attach): return table.set(operation, VolumeStateFlow(start_state=start, transient_state=transient, end_state=end, sets_attach=sets_attach, unsets_attach=unsets_attach)) table = add_flow(O.CREATE, S.EMPTY, S.CREATING, S.AVAILABLE, False, False) table = add_flow(O.ATTACH, S.AVAILABLE, S.ATTACHING, S.IN_USE, True, False) table = add_flow(O.DETACH, S.IN_USE, S.DETACHING, S.AVAILABLE, False, True) table = add_flow(O.DESTROY, S.AVAILABLE, S.DELETING, S.EMPTY, False, False) return table table = pmap_field(NamedConstant, VolumeStateFlow, initial=_populate_volume_state_table()) VOLUME_STATE_TABLE = VolumeStateTable() class AttachFailed(Exception): class VolumeBusy(Exception): def __init__(self, volume): Exception.__init__(self, volume.id, volume.attachments) class InvalidRegionError(Exception): def __init__(self, region): message = u"The specified AWS region is not valid." Exception.__init__(self, message, region) self.region = region class InvalidZoneError(Exception): def __init__(self, zone, available_zones): message = u"The specified AWS zone is not valid." Exception.__init__( self, message, zone, u"Available zones:", available_zones) self.zone = zone self.available_zones = available_zones class InvalidStateException(Exception): def __init__(self, volume, state, valid_states): Exception.__init__(self, volume, state, valid_states) self.volume = volume self.state = state self.valid_states = valid_states class TagNotFound(Exception): def __init__(self, volume_id, tag, existing_tags): Exception.__init__(self, volume_id, tag, existing_tags) self.volume_id = volume_id self.tag = tag self.existing_tags = existing_tags class TimeoutException(Exception): def __init__(self, blockdevice_id, operation, start_state, transient_state, end_state, current_state): Exception.__init__(self, blockdevice_id, operation, current_state) self.blockdevice_id = blockdevice_id self.operation = operation self.start_state = start_state self.transient_state = transient_state self.end_state = end_state self.current_state = current_state class UnexpectedStateException(Exception): def __init__(self, blockdevice_id, operation, start_state, transient_state, end_state, current_state): Exception.__init__(self, blockdevice_id, operation, current_state) self.blockdevice_id = blockdevice_id self.operation = operation self.start_state = start_state self.transient_state = transient_state self.end_state = end_state self.current_state = current_state class EliotLogHandler(logging.Handler): def emit(self, record): Message.new( message_type=BOTO_LOG_HEADER, message=record.getMessage() ).write() def _enable_boto_logging(): logger = logging.getLogger("boto3") logger.setLevel(logging.INFO) logger.addHandler(EliotLogHandler()) _enable_boto_logging() class AttachUnexpectedInstance(Exception): def __init__(self, blockdevice_id, instance_id, local_instance_id): Exception.__init__( self, blockdevice_id, instance_id, local_instance_id) self.blockdevice_id = blockdevice_id self.instance_id = instance_id self.local_instance_id = local_instance_id @with_cmp(["requested", "discovered"]) class AttachedUnexpectedDevice(Exception): _template = "AttachedUnexpectedDevice(requested={!r}, discovered={!r})" def __init__(self, requested, discovered): if not isinstance(requested, FilePath): raise TypeError( "requested must be FilePath, not {}".format(type(requested)) ) if not isinstance(discovered, (FilePath, NoneType)): raise TypeError( "discovered must be None or FilePath, not {}".format( type(discovered) ) ) self.requested = requested self.discovered = discovered def __str__(self): discovered = self.discovered if discovered is not None: discovered = discovered.path return self._template.format( self.requested.path, discovered, ) __repr__ = __str__ def _expected_device(requested_device): prefix = b"/dev/sd" if requested_device.startswith(prefix): return FilePath(b"/dev").child(b"xvd" + requested_device[len(prefix):]) raise ValueError( "Unsupported requested device {!r}".format(requested_device) ) def ec2_client(region, zone, access_key_id, secret_access_key, session_token=None, validate_region=True): connection = boto3.session.Session( aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, aws_session_token=session_token, ) connection._session.set_config_variable( 'metadata_service_num_attempts', BOTO_NUM_RETRIES) ec2_resource = connection.resource("ec2", region_name=region) if validate_region: try: zones = ec2_resource.meta.client.describe_availability_zones() except EndpointConnectionError: raise InvalidRegionError(region) available_zones = [ available_zone['ZoneName'] for available_zone in zones['AvailabilityZones'] ] if zone not in available_zones: raise InvalidZoneError(zone, available_zones) return _EC2(zone=zone, connection=ec2_resource) def boto3_log(method): counter = itertools.count(1) def _run_with_logging(*args, **kwargs): with AWS_ACTION( operation=[method.__name__, args[1:], kwargs], count=next(counter) ): return method(*args, **kwargs) return _run_with_logging def _get_volume_tag(volume, name): if volume.tags: for tag in volume.tags: if tag['Key'] == name: return tag['Value'] raise TagNotFound(volume.id, name, volume.tags) class _EC2(PClass): zone = field(mandatory=True) connection = field(mandatory=True) def _blockdevicevolume_from_ebs_volume(ebs_volume): if ebs_volume.attachments: attached_to = unicode(ebs_volume.attachments[0]['InstanceId']) else: attached_to = None volume_dataset_id = _get_volume_tag(ebs_volume, DATASET_ID_LABEL) return BlockDeviceVolume( blockdevice_id=unicode(ebs_volume.id), size=int(GiB(ebs_volume.size).to_Byte().value), attached_to=attached_to, dataset_id=UUID(volume_dataset_id) ) @boto3_log def _get_ebs_volume_state(volume): volume.reload() return volume def _reached_end_state( operation, volume, update, elapsed_time, timeout=VOLUME_STATE_CHANGE_TIMEOUT ): state_flow = VOLUME_STATE_TABLE.table[operation] start_state = state_flow.start_state.value transient_state = state_flow.transient_state.value end_state = state_flow.end_state.value sets_attach = state_flow.sets_attach unsets_attach = state_flow.unsets_attach if elapsed_time > timeout: raise TimeoutException(unicode(volume.id), operation, start_state, transient_state, end_state, volume.state) try: update(volume) except ClientError as e: if e.response['Error']['Code'] == NOT_FOUND: raise UnknownVolume(volume.id) WAITING_FOR_VOLUME_STATUS_CHANGE( volume_id=volume.id, status=volume.state, target_status=end_state, needs_attach_data=sets_attach, wait_time=elapsed_time ).write() if volume.state not in [start_state, transient_state, end_state]: raise UnexpectedStateException(unicode(volume.id), operation, start_state, transient_state, end_state, volume.state) if volume.state != end_state: return False if volume.attachments: volume_attach_data = volume.attachments[0] else: volume_attach_data = None if sets_attach: return (volume_attach_data is not None and (volume_attach_data['Device'] != '' and volume_attach_data['InstanceId'] != '')) elif unsets_attach: return (volume_attach_data is None or (volume_attach_data['Device'] == '' and volume_attach_data['InstanceId'] == '')) else: return True
Apache License 2.0
biocommons/eutils
src/eutils/_internal/queryservice.py
QueryService.einfo
python
def einfo(self, args=None): if args is None: args = {} return self._query("/einfo.fcgi", args, skip_cache=True)
execute a NON-cached, throttled einfo query einfo.fcgi?db=<database> Input: Entrez database (&db) or None (returns info on all Entrez databases) Output: XML containing database statistics Example: Find database statistics for Entrez Protein. QueryService.einfo({"db": "protein"}) Equivalent HTTP request: https://eutils.ncbi.nlm.nih.gov/entrez/eutils/einfo.fcgi?db=protein :param dict args: dict of query items (optional) :returns: content of reply :rtype: str :raises EutilsRequestError: when NCBI replies, but the request failed (e.g., bogus database name)
https://github.com/biocommons/eutils/blob/57cd8a5f1a54ea5e3823d0e34366a5490ad834aa/src/eutils/_internal/queryservice.py#L145-L171
from __future__ import absolute_import, division, print_function, unicode_literals import hashlib import logging import os import pickle import time import lxml.etree import requests from .sqlitecache import SQLiteCache from .exceptions import EutilsRequestError, EutilsNCBIError _logger = logging.getLogger(__name__) url_base = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils" default_default_args = {"retmode": "xml", "usehistory": "y", "retmax": 250} default_tool = __package__ default_email = "biocommons-dev@googlegroups.com" default_cache_path = os.path.join(os.path.expanduser("~"), ".cache", "eutils-cache.db") class QueryService(object): def __init__(self, email=default_email, cache=False, default_args=default_default_args, request_interval=None, tool=default_tool, api_key=None ): self.default_args = default_args self.email = email self.tool = tool self.api_key = api_key if request_interval is not None: _logger.warning("eutils QueryService: request_interval no longer supported; ignoring passed parameter") if self.api_key is None: requests_per_second = 3 _logger.warning("No NCBI API key provided; throttling to {} requests/second; see " "https://ncbiinsights.ncbi.nlm.nih.gov/2017/11/02/new-api-keys-for-the-e-utilities/".format( requests_per_second)) else: requests_per_second = 10 _logger.info("Using NCBI API key; throttling to {} requests/second".format(requests_per_second)) self.request_interval = 1.0 / requests_per_second self._last_request_clock = 0 self._ident_args = {"tool": tool, "email": email} self._request_count = 0 if cache is True: cache_path = default_cache_path elif cache: cache_path = cache else: cache_path = False self._cache = SQLiteCache(cache_path) if cache_path else None def efetch(self, args): return self._query("/efetch.fcgi", args)
Apache License 2.0
microsoft/presidio
presidio-analyzer/presidio_analyzer/nlp_engine/nlp_engine.py
NlpEngine.is_stopword
python
def is_stopword(self, word: str, language: str) -> bool:
Return true if the given word is a stop word. (within the given language)
https://github.com/microsoft/presidio/blob/9d03112be79195937446daee4b583e07fa081667/presidio-analyzer/presidio_analyzer/nlp_engine/nlp_engine.py#L19-L24
from abc import ABC, abstractmethod from presidio_analyzer.nlp_engine import NlpArtifacts class NlpEngine(ABC): @abstractmethod def process_text(self, text: str, language: str) -> NlpArtifacts: @abstractmethod
MIT License
romanovmikev/setka
setka/pipes/optimization/LossHandler.py
LossHandler.after_epoch
python
def after_epoch(self): if self.trainer._mode in ["train", "valid"]: if hasattr(self.trainer, '_loss'): del self.trainer._loss if hasattr(self.trainer, '_loss_values'): del self.trainer._loss_values
Releases loss value in case it is present.
https://github.com/romanovmikev/setka/blob/cad6f17429a4bb3479c5557ad58c15fee568f410/setka/pipes/optimization/LossHandler.py#L54-L62
import torch from setka.pipes.Pipe import Pipe from copy import deepcopy class LossHandler(Pipe): def __init__(self, criterion, coefs=None, retain_graph=None): super(LossHandler, self).__init__() self.retain_graph = retain_graph self.criterion = criterion if not isinstance(self.criterion, (tuple, list)): self.criterion = [self.criterion] self.coefs = coefs if coefs is not None else [1.0] * len(self.criterion) if len(self.coefs) != len(self.criterion): raise RuntimeError('Number of criterion and coefficients are not equal') self.set_priority({'on_batch': 9, 'after_batch': -9}) def formula(self): return 'Loss = ' + ' + '.join([f'{coef} * {str(loss)}' for loss, coef in zip(self.criterion, self.coefs)]) def on_batch(self): if self.trainer._mode in ["train", "valid"]: self.trainer._loss = 0 self.trainer._loss_values = {} with torch.set_grad_enabled(self.trainer._mode == 'train'): for cur_coef, cur_criterion in zip(self.coefs, self.criterion): cur_loss = cur_criterion(self.trainer._output, self.trainer._input) self.trainer._loss = self.trainer._loss + cur_coef * cur_loss self.trainer._loss_values[cur_criterion.__name__] = cur_loss.item() if self.trainer._mode == "train": self.trainer._loss.backward(retain_graph=self.retain_graph) self.trainer.status['Loss'] = self.trainer._loss.detach().cpu().item() self.trainer.status['Formula'] = self.formula()
MIT License
aliok/trnltk
trnltk/morphology/model/formatter.py
format_morpheme_container_for_simple_parseset_without_suffixes
python
def format_morpheme_container_for_simple_parseset_without_suffixes(result): return u"{}+{}[{}({}+{})+{}]".format(result.get_surface(), result.get_surface_syntactic_category(), result.get_stem(), result.get_stem_syntactic_category(), result.get_lemma_root(), result.get_lemma_root_syntactic_category())
@type result MorphemeContainer @return "kitaplasti+Verb[kitaplas(kitap+Noun)+Verb]" for word 'kitaplasti'
https://github.com/aliok/trnltk/blob/c3faf04a2fc92275524ef4aca1441a583ac631ce/trnltk/morphology/model/formatter.py#L143-L149
from trnltk.morphology.model.lexeme import SyntacticCategory, SecondarySyntacticCategory from trnltk.morphology.lexicon.rootgenerator import CircumflexConvertingRootGenerator from trnltk.morphology.model.morpheme import FreeTransitionSuffix def format_morpheme_container_for_parseset(result, add_space=False): returnValue = u'{}+{}'.format(result.get_root().lexeme.root, result.get_root_state().pretty_name) if result.get_root().lexeme.secondary_syntactic_category: returnValue += u'+{}'.format(result.get_root().lexeme.secondary_syntactic_category) if result.has_transitions(): non_free_transitions = filter(lambda t: not isinstance(t.suffix_form_application.suffix_form.suffix, FreeTransitionSuffix), result.get_transitions()) if non_free_transitions: if add_space: returnValue = returnValue + u' + ' + u' + '.join([format_transition(t, False) for t in non_free_transitions]) else: returnValue = returnValue + u'+' + u'+'.join([format_transition(t, False) for t in non_free_transitions]) return returnValue def format_morpheme_container_for_tests(result): returnValue = u'{}({})+{}'.format(result.get_root().str, result.get_root().lexeme.lemma, result.get_root_state().pretty_name) if result.get_root().lexeme.secondary_syntactic_category: returnValue += u'+{}'.format(result.get_root().lexeme.secondary_syntactic_category) if result.has_transitions(): non_free_transitions = filter(lambda t: not isinstance(t.suffix_form_application.suffix_form.suffix, FreeTransitionSuffix), result.get_transitions()) if non_free_transitions: returnValue = returnValue + u'+' + u'+'.join([format_transition(t, True) for t in non_free_transitions]) return returnValue def format_transition(transition, includeForm=True): returnVal = u'' if transition.is_derivational(): returnVal = transition.to_state.pretty_name + '+' if includeForm and transition.suffix_form_application.actual_suffix_form and transition.suffix_form_application.actual_suffix_form.isalnum(): returnVal += u'{}({}[{}])'.format(transition.suffix_form_application.suffix_form.suffix.pretty_name, transition.suffix_form_application.suffix_form.form, transition.suffix_form_application.actual_suffix_form) else: returnVal += u'{}'.format(transition.suffix_form_application.suffix_form.suffix.pretty_name) return returnVal def format_morpheme_container_for_simple_parseset(result): root = result.get_root().lexeme.root secondary_syntactic_category_str = result.get_root().lexeme.secondary_syntactic_category if result.get_root().lexeme.syntactic_category == SyntacticCategory.ADVERB: if result.get_root().lexeme.secondary_syntactic_category == SecondarySyntacticCategory.QUESTION: secondary_syntactic_category_str = None elif result.get_root().lexeme.secondary_syntactic_category == SecondarySyntacticCategory.TIME: secondary_syntactic_category_str = None elif result.get_root().lexeme.syntactic_category == SyntacticCategory.ADJECTIVE: if result.get_root().lexeme.secondary_syntactic_category == SecondarySyntacticCategory.QUESTION: secondary_syntactic_category_str = None groups = [] current_group = [] for transition in result.get_transitions(): if transition.is_derivational(): groups.append(current_group) current_group = [transition.to_state.pretty_name] else: pass if isinstance(transition.suffix_form_application.suffix_form.suffix, FreeTransitionSuffix): continue else: current_group.append(transition.suffix_form_application.suffix_form.suffix.pretty_name) groups.append(current_group) if not groups: if not secondary_syntactic_category_str: return u'({},"{}+{}")'.format(1, root, result.get_root_state().pretty_name) else: return u'({},"{}+{}+{}")'.format(1, root, result.get_root_state().pretty_name, secondary_syntactic_category_str) return_value = None if not secondary_syntactic_category_str: return_value = u'({},"{}+{}")'.format(1, root, result.get_root_state().pretty_name) else: return_value = u'({},"{}+{}+{}")'.format(1, root, result.get_root_state().pretty_name, secondary_syntactic_category_str) if not groups[0]: if not secondary_syntactic_category_str: return_value = u'({},"{}+{}")'.format(1, root, result.get_root_state().pretty_name) else: return_value = u'({},"{}+{}+{}")'.format(1, root, result.get_root_state().pretty_name, secondary_syntactic_category_str) else: if not secondary_syntactic_category_str: return_value = u'({},"{}+{}+{}")'.format(1, root, result.get_root_state().pretty_name, u'+'.join(groups[0])) else: return_value = u'({},"{}+{}+{}+{}")'.format(1, root, result.get_root_state().pretty_name, secondary_syntactic_category_str, u'+'.join(groups[0])) for i in range(1, len(groups)): group = groups[i] return_value += u'({},"{}")'.format(i + 1, u'+'.join(group)) if any(c in CircumflexConvertingRootGenerator.Circumflex_Chars for c in return_value): for (cir, pla) in CircumflexConvertingRootGenerator.Circumflex_Letters_Map.iteritems(): return_value = return_value.replace(cir, pla) if u'+Apos' in return_value: return_value = return_value.replace(u'+Apos', u'') return return_value
Apache License 2.0
realitix/vulk
vulk/graphic/d2/batch.py
SpriteBatch.init_descriptorlayout
python
def init_descriptorlayout(self, context): ubo_descriptor = vo.DescriptorSetLayoutBinding( 0, vc.DescriptorType.UNIFORM_BUFFER, 1, vc.ShaderStage.VERTEX, None) texture_descriptor = vo.DescriptorSetLayoutBinding( 1, vc.DescriptorType.COMBINED_IMAGE_SAMPLER, 1, vc.ShaderStage.FRAGMENT, None) layout_bindings = [ubo_descriptor, texture_descriptor] return vo.DescriptorSetLayout(context, layout_bindings)
Initialize descriptor layout for one uniform and one texture *Parameters:* - `context`: `VulkContext`
https://github.com/realitix/vulk/blob/7530b22ebf1cd2c6bf3aa07b15ce6575b85ee184/vulk/graphic/d2/batch.py#L643-L657
from abc import ABC, abstractmethod from os import path import math from vulk import PATH_VULK_SHADER from vulk import vulkanconstant as vc from vulk import vulkanobject as vo from vulk import vulkanutil as vu from vulk.graphic import mesh as me from vulk.graphic import uniform from vulk.math.matrix import ProjectionMatrix, TransformationMatrix, Matrix4 class BaseBatch(ABC): def __init__(self, context, size=1000, shaderprogram=None, out_view=None): if not shaderprogram: shaderprogram = self.get_default_shaderprogram(context) self.shaderprogram = shaderprogram self.custom_out_view = out_view is not None self.out_view = out_view if out_view else context.final_image_view self.mesh = self.init_mesh(context, size) self.init_indices(size) self.uniformblock = self.init_uniform(context) self.cbpool = self.init_commandpool(context) self.descriptorpool = self.init_descriptorpool(context) self.descriptorlayout = self.init_descriptorlayout(context) self.pipelinelayout = self.init_pipelinelayout(context) self.renderpass = self.init_renderpass(context) self.pipeline = self.init_pipeline(context) self.framebuffer = self.init_framebuffer(context) self.drawing = False self.context = None self.projection_matrix = ProjectionMatrix() self.projection_matrix.to_orthographic_2d( 0, 0, context.width, context.height) self.transform_matrix = TransformationMatrix() self.combined_matrix = Matrix4() self.idx = 0 self.matrices_dirty = True self.reload_count = context.reload_count @abstractmethod def init_descriptorlayout(self, context): pass def reload(self, context): self.projection_matrix.to_orthographic_2d( 0, 0, context.width, context.height) self.matrices_dirty = True if not self.custom_out_view: self.out_view = context.final_image_view self.renderpass.destroy(context) self.renderpass = self.init_renderpass(context) self.pipeline.destroy(context) self.pipeline = self.init_pipeline(context) self.framebuffer.destroy(context) self.framebuffer = self.init_framebuffer(context) self.reload_count = context.reload_count def init_indices(self, size): j = 0 indices = self.mesh.indices_array for i in range(0, size * 6, 6): indices[i] = j indices[i + 1] = j + 1 indices[i + 2] = j + 2 indices[i + 3] = j + 2 indices[i + 4] = j + 3 indices[i + 5] = j j += 4 def init_uniform(self, context): matrix_attribute = uniform.UniformAttribute( uniform.UniformShapeType.MATRIX4, vc.DataType.SFLOAT32) uniform_attributes = uniform.UniformAttributes([matrix_attribute]) return uniform.UniformBlock(context, uniform_attributes) def init_commandpool(self, context): return vu.CommandBufferSynchronizedPool(context) def init_renderpass(self, context): attachment = vo.AttachmentDescription( self.out_view.image.format, vc.SampleCount.COUNT_1, vc.AttachmentLoadOp.LOAD, vc.AttachmentStoreOp.STORE, vc.AttachmentLoadOp.DONT_CARE, vc.AttachmentStoreOp.DONT_CARE, vc.ImageLayout.COLOR_ATTACHMENT_OPTIMAL, vc.ImageLayout.COLOR_ATTACHMENT_OPTIMAL) subpass = vo.SubpassDescription([vo.AttachmentReference( 0, vc.ImageLayout.COLOR_ATTACHMENT_OPTIMAL)], [], [], [], []) dependency = vo.SubpassDependency( vc.SUBPASS_EXTERNAL, vc.PipelineStage.COLOR_ATTACHMENT_OUTPUT, vc.Access.NONE, 0, vc.PipelineStage.COLOR_ATTACHMENT_OUTPUT, vc.Access.COLOR_ATTACHMENT_READ | vc.Access.COLOR_ATTACHMENT_WRITE ) return vo.Renderpass(context, [attachment], [subpass], [dependency]) def init_pipelinelayout(self, context): return vo.PipelineLayout(context, [self.descriptorlayout]) def init_pipeline(self, context): vertex_description = vo.VertexInputBindingDescription( 0, self.mesh.attributes.size, vc.VertexInputRate.VERTEX) vk_attrs = [] for attr in self.mesh.attributes: vk_attrs.append(vo.VertexInputAttributeDescription( attr.location, 0, attr.format, attr.offset)) vertex_input = vo.PipelineVertexInputState( [vertex_description], vk_attrs) input_assembly = vo.PipelineInputAssemblyState( vc.PrimitiveTopology.TRIANGLE_LIST) viewport = vo.Viewport(0, 0, context.width, context.height, 0, 1) scissor = vo.Rect2D(vo.Offset2D(0, 0), vo.Extent2D(context.width, context.height)) viewport_state = vo.PipelineViewportState([viewport], [scissor]) rasterization = vo.PipelineRasterizationState( False, vc.PolygonMode.FILL, 1, vc.CullMode.BACK, vc.FrontFace.COUNTER_CLOCKWISE, 0, 0, 0) multisample = vo.PipelineMultisampleState( False, vc.SampleCount.COUNT_1, 0) depth = None blend_attachment = vo.PipelineColorBlendAttachmentState( True, vc.BlendFactor.SRC_ALPHA, vc.BlendFactor.ONE_MINUS_SRC_ALPHA, vc.BlendOp.ADD, vc.BlendFactor.SRC_ALPHA, vc.BlendFactor.ONE_MINUS_SRC_ALPHA, vc.BlendOp.ADD, vc.ColorComponent.R | vc.ColorComponent.G | vc.ColorComponent.B | vc.ColorComponent.A ) blend = vo.PipelineColorBlendState( False, vc.LogicOp.COPY, [blend_attachment], [0, 0, 0, 0]) dynamic = None return vo.Pipeline( context, self.shaderprogram.stages, vertex_input, input_assembly, viewport_state, rasterization, multisample, depth, blend, dynamic, self.pipelinelayout, self.renderpass) def init_framebuffer(self, context): return vo.Framebuffer( context, self.renderpass, [self.out_view], context.width, context.height, 1) def begin(self, context, semaphores=None): if self.drawing: raise Exception("Currently drawing") if self.reload_count != context.reload_count: raise Exception("Batch not reloaded, can't draw") if self.matrices_dirty: self.upload_matrices(context) self.drawing = True self.context = context self.cbpool.begin(context, semaphores) def end(self): if not self.drawing: raise Exception("Not currently drawing") self.flush() self.drawing = False self.context = None return self.cbpool.end() def upload_matrices(self, context): self.combined_matrix.set(self.projection_matrix) self.combined_matrix.mul(self.transform_matrix) self.uniformblock.set_uniform(0, self.combined_matrix.values) self.uniformblock.upload(context) self.matrices_dirty = False def update_transform(self, matrix): self.transform_matrix.set(matrix) self.matrices_dirty = True def update_projection(self, matrix): self.projection_matrix.set(matrix) self.matrices_dirty = True class BlockProperty(): def __init__(self): self.x = 0 self.y = 0 self.width = 0 self.height = 0 self.colors = [[1] * 4] * 4 self.scale = [1] * 2 self.rotation = 0 self.border_widths = [0] * 4 self.border_radius = [0] * 4 self.border_colors = [[1] * 4] * 4 class BlockBatch(BaseBatch): def __init__(self, context, size=1000, shaderprogram=None, out_view=None): super().__init__(context, size, shaderprogram, out_view) self.descriptorsets = self.init_descriptorsets(context) def init_mesh(self, context, size): vertex_attributes = me.VertexAttributes([ me.VertexAttribute(0, vc.Format.R32G32_SFLOAT), me.VertexAttribute(1, vc.Format.R32G32_SFLOAT), me.VertexAttribute(2, vc.Format.R32G32B32A32_SFLOAT), me.VertexAttribute(3, vc.Format.R32G32B32A32_SFLOAT), me.VertexAttribute(4, vc.Format.R32G32B32A32_SFLOAT), me.VertexAttribute(5, vc.Format.R32G32B32A32_SFLOAT), me.VertexAttribute(6, vc.Format.R32G32B32A32_SFLOAT), me.VertexAttribute(7, vc.Format.R32G32B32A32_SFLOAT), me.VertexAttribute(8, vc.Format.R32G32B32A32_SFLOAT) ]) return me.Mesh(context, size * 4, size * 6, vertex_attributes) def init_descriptorpool(self, context): size = 1 pool_sizes = [vo.DescriptorPoolSize( vc.DescriptorType.UNIFORM_BUFFER, size)] return vo.DescriptorPool(context, pool_sizes, size) def init_descriptorlayout(self, context): ubo_descriptor = vo.DescriptorSetLayoutBinding( 0, vc.DescriptorType.UNIFORM_BUFFER, 1, vc.ShaderStage.VERTEX, None) bindings = [ubo_descriptor] return vo.DescriptorSetLayout(context, bindings) def init_descriptorsets(self, context): descriptorsets = self.descriptorpool.allocate_descriptorsets( context, 1, [self.descriptorlayout]) descriptorub_info = vo.DescriptorBufferInfo( self.uniformblock.uniform_buffer.final_buffer, 0, self.uniformblock.size) descriptorub_write = vo.WriteDescriptorSet( descriptorsets[0], 0, 0, vc.DescriptorType.UNIFORM_BUFFER, [descriptorub_info]) vo.update_descriptorsets(context, [descriptorub_write], []) return descriptorsets def get_default_shaderprogram(self, context): vs = path.join(PATH_VULK_SHADER, "block.vs.glsl") fs = path.join(PATH_VULK_SHADER, "block.fs.glsl") shaders_mapping = { vc.ShaderStage.VERTEX: vs, vc.ShaderStage.FRAGMENT: fs } return vo.ShaderProgramGlslFile(context, shaders_mapping) def flush(self): if not self.idx: return if not self.drawing: raise Exception("Not currently drawing") self.mesh.upload(self.context) blocks_in_batch = self.idx / 4 indices_count = int(blocks_in_batch) * 6 with self.cbpool.pull() as cmd: width = self.context.width height = self.context.height cmd.begin_renderpass( self.renderpass, self.framebuffer, vo.Rect2D(vo.Offset2D(0, 0), vo.Extent2D(width, height)), [] ) cmd.bind_pipeline(self.pipeline) self.mesh.bind(cmd) cmd.bind_descriptor_sets(self.pipelinelayout, 0, self.descriptorsets, []) self.mesh.draw(cmd, 0, indices_count) cmd.end_renderpass() self.idx = 0 def draw(self, properties): if not self.drawing: raise Exception("Not currently drawing") width = properties.width * properties.scale[0] height = properties.height * properties.scale[1] x = properties.x y = properties.y x2 = x + width y2 = y + height p1x, p2x, p3x, p4x = x, x, x2, x2 p1y, p2y, p3y, p4y = y, y2, y2, y rotation = properties.rotation if rotation: cos = math.cos(rotation) sin = math.sin(rotation) w1 = -width / 2 w2 = width / 2 h1 = -height / 2 h2 = height / 2 x1 = cos * w1 - sin * h1 y1 = sin * w1 + cos * h1 x2 = cos * w1 - sin * h2 y2 = sin * w1 + cos * h2 x3 = cos * w2 - sin * h2 y3 = sin * w2 + cos * h2 x4 = x1 + (x3 - x2) y4 = y3 - (y2 - y1) x1 += p1x x2 += p1x x3 += p1x x4 += p1x y1 += p1y y2 += p1y y3 += p1y y4 += p1y else: x1, x2, x3, x4 = p1x, p2x, p3x, p4x y1, y2, y3, y4 = p1y, p2y, p3y, p4y c = properties.colors bw = properties.border_widths bct = properties.border_colors[0] bcr = properties.border_colors[1] bcb = properties.border_colors[2] bcl = properties.border_colors[3] br = properties.border_radius for val in [([x1, y1], [0, 0], c[0], bw, bct, bcr, bcb, bcl, br), ([x2, y2], [0, 1], c[3], bw, bct, bcr, bcb, bcl, br), ([x3, y3], [1, 1], c[2], bw, bct, bcr, bcb, bcl, br), ([x4, y4], [1, 0], c[1], bw, bct, bcr, bcb, bcl, br)]: self.mesh.set_vertex(self.idx, val) self.idx += 1 class SpriteBatchDescriptorPool(): def __init__(self, descriptorpool, descriptorlayout): self.descriptorsets = [] self.descriptorset_id = -1 self.descriptorpool = descriptorpool self.descriptorlayout = descriptorlayout def pull(self, context): self.descriptorset_id += 1 try: descriptorset = self.descriptorsets[self.descriptorset_id] except IndexError: descriptorset = self.descriptorpool.allocate_descriptorsets( context, 1, [self.descriptorlayout])[0] self.descriptorsets.append(descriptorset) return descriptorset def reset(self): self.descriptorset_id = -1 class SpriteBatch(BaseBatch): def __init__(self, context, size=1000, shaderprogram=None, out_view=None): super().__init__(context, size, shaderprogram, out_view) self.dspool = self.init_dspool() self.last_texture = None def init_mesh(self, context, size): vertex_attributes = me.VertexAttributes([ me.VertexAttribute(0, vc.Format.R32G32_SFLOAT), me.VertexAttribute(1, vc.Format.R32G32_SFLOAT), me.VertexAttribute(2, vc.Format.R32G32B32A32_SFLOAT) ]) return me.Mesh(context, size * 4, size * 6, vertex_attributes) def init_descriptorpool(self, context): size = 8 type_uniform = vc.DescriptorType.UNIFORM_BUFFER type_sampler = vc.DescriptorType.COMBINED_IMAGE_SAMPLER pool_sizes = [ vo.DescriptorPoolSize(type_uniform, size), vo.DescriptorPoolSize(type_sampler, size) ] return vo.DescriptorPool(context, pool_sizes, size)
Apache License 2.0
cslydia/biflag
model/wordsequence.py
WordSequence.forward
python
def forward(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover): word_represent = self.wordrep(word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover) if self.word_feature_extractor == "CNN": batch_size = word_inputs.size(0) word_in = torch.tanh(self.word2cnn(word_represent)).transpose(2,1).contiguous() for idx in range(self.cnn_layer): if idx == 0: cnn_feature = F.relu(self.cnn_list[idx](word_in)) else: cnn_feature = F.relu(self.cnn_list[idx](cnn_feature)) cnn_feature = self.cnn_drop_list[idx](cnn_feature) if batch_size > 1: cnn_feature = self.cnn_batchnorm_list[idx](cnn_feature) feature_out = cnn_feature.transpose(2,1).contiguous() else: packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True) hidden = None lstm_out, hidden = self.lstm(packed_words, hidden) lstm_out, _ = pad_packed_sequence(lstm_out) feature_out = lstm_out.transpose(1,0) feat_1p = self.droplstm(feature_out) outermost = self.extract_outermost(feat_1p, word_seq_lengths) return outermost, feat_1p
input: word_inputs: (batch_size, sent_len) feature_inputs: [(batch_size, sent_len), ...] list of variables word_seq_lengths: list of batch_size, (batch_size,1) char_inputs: (batch_size*sent_len, word_length) char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1) char_seq_recover: variable which records the char order information, used to recover char order output: Variable(batch_size, sent_len, hidden_dim)
https://github.com/cslydia/biflag/blob/e697f779d7e84ca74aa0f669eda8c62b9720cce3/model/wordsequence.py#L73-L110
from __future__ import print_function from __future__ import absolute_import import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from .wordrep import WordRep from .gcn import GCN import math class WordSequence(nn.Module): def __init__(self, data): super(WordSequence, self).__init__() print("build word sequence feature extractor: %s..."%(data.word_feature_extractor)) self.gpu = data.HP_gpu self.use_char = data.use_char self.droplstm = nn.Dropout(data.HP_dropout) self.bilstm_flag = data.HP_bilstm self.lstm_layer = data.HP_lstm_layer self.wordrep = WordRep(data) self.input_size = data.word_emb_dim self.feature_num = data.feature_num self.HP_hidden_dim = data.HP_hidden_dim if self.use_char: self.input_size += data.HP_char_hidden_dim if data.char_feature_extractor == "ALL": self.input_size += data.HP_char_hidden_dim for idx in range(self.feature_num): self.input_size += data.feature_emb_dims[idx] if data.char_feature_extractor == "IntNet": kernel_type = data.HP_intNet_kernel_type self.input_size = data.word_emb_dim + int( (data.HP_intNet_layer - 1) // 2 * (data.char_emb_dim // 2) * kernel_type + data.char_emb_dim * kernel_type) if self.bilstm_flag: lstm_hidden = data.HP_hidden_dim // 2 else: lstm_hidden = data.HP_hidden_dim self.word_feature_extractor = data.word_feature_extractor if self.word_feature_extractor == "GRU": self.lstm = nn.GRU(self.input_size, lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag) elif self.word_feature_extractor == "LSTM": self.lstm = nn.LSTM(self.input_size, lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag) elif self.word_feature_extractor == "CNN": self.word2cnn = nn.Linear(self.input_size, data.HP_hidden_dim) self.cnn_layer = data.HP_cnn_layer print("CNN layer: ", self.cnn_layer) self.cnn_list = nn.ModuleList() self.cnn_drop_list = nn.ModuleList() self.cnn_batchnorm_list = nn.ModuleList() kernel = 3 pad_size = int((kernel-1)/2) for idx in range(self.cnn_layer): self.cnn_list.append(nn.Conv1d(data.HP_hidden_dim, data.HP_hidden_dim, kernel_size=kernel, padding=pad_size)) self.cnn_drop_list.append(nn.Dropout(data.HP_dropout)) self.cnn_batchnorm_list.append(nn.BatchNorm1d(data.HP_hidden_dim)) self.lstm_ne = nn.LSTM(lstm_hidden * 2, lstm_hidden, batch_first=True) self.hidden2tag = nn.Linear(lstm_hidden, data.label_alphabet_size) self.num_ne = data.num_ne self.gcn_layer = data.gcn_layer self.exp0_ne = nn.Linear(lstm_hidden*2, lstm_hidden) self.exp1_ne = nn.Linear(lstm_hidden*2, lstm_hidden) self.fc2rel = nn.Linear(lstm_hidden*2, self.num_ne) self.gcn_fw = nn.ModuleList([GCN(lstm_hidden*2) for _ in range(self.gcn_layer)]) self.gcn_bw = nn.ModuleList([GCN(lstm_hidden*2) for _ in range(self.gcn_layer)]) self.gcn_fw_2 = nn.ModuleList([GCN(lstm_hidden*2) for _ in range(self.gcn_layer)]) self.gcn_bw_2 = nn.ModuleList([GCN(lstm_hidden*2) for _ in range(self.gcn_layer)])
Apache License 2.0
wolfinabox/steelseries-oled-display-mirror
oled_display_mirror/gamesense.py
GameSense.unregister_game
python
def unregister_game(self, game_name: str = None) -> requests.Response: try: return self._post(self.ep.REMOVE_GAME, { "game": game_name or self.game }) except Exception as e: raise
Unregisters the game with SteelSeries engine
https://github.com/wolfinabox/steelseries-oled-display-mirror/blob/bf87cfc1aa5b8f6ef9fa27887124b631124a280d/oled_display_mirror/gamesense.py#L64-L73
import time import os import itertools import json import requests class Endpoints: REGISTER_GAME = '/game_metadata' REMOVE_GAME = '/remove_game' REGISTER_EVENT = '/register_game_event' BIND_EVENT = '/bind_game_event' REMOVE_EVENT = '/remove_game_event' SEND_EVENT = '/game_event' HEARTBEAT = '/game_heartbeat' class GameSense(): def __init__(self, game: str = None, game_display_name: str = None, developer=None, deinitialize_timer_length_ms=None): self.game = game self.game_display_name = game_display_name or self.game self.developer = developer self.deinitialize_timer_length_ms = deinitialize_timer_length_ms self.req_url = self.get_req_url() self.ep = Endpoints self.value_cycler = itertools.cycle(range(1, 100)) self.timeout=1 def _post(self, endpoint: str, data: dict = None, url: str = None) -> requests.Response: if not url: url = self.req_url if not isinstance(data, str): data = json.dumps(data) try: r= requests.post(url+endpoint, data=data or '{}', headers={'content-type': 'application/json'}) except requests.ConnectionError as e: print(e) raise else: return r def get_req_url(self) -> str: path = os.path.expandvars( r'%programdata%\SteelSeries\SteelSeries Engine 3\coreProps.json') return 'http://'+json.load(open(path))['address'] def register_game(self, game_name: str = None, reset=False) -> requests.Response: if reset: self.unregister_game() return self._post(self.ep.REGISTER_GAME, { "game": game_name or self.game, "game_display_name": self.game_display_name, "developer": self.developer })
MIT License
openstack/cinder
cinder/volume/drivers/zadara/common.py
ZadaraVPSAConnection._detach_vpsa_volume
python
def _detach_vpsa_volume(self, vpsa_vol, vpsa_srv=None): if vpsa_srv: list_servers_ids = [vpsa_srv] else: list_servers_ids = self._get_servers_attached_to_volume(vpsa_vol) for server_id in list_servers_ids: self.send_cmd('detach_volume', vpsa_srv=server_id, vpsa_vol=vpsa_vol['name'])
Detach volume from all attached servers.
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/volume/drivers/zadara/common.py#L476-L486
import json import re from oslo_config import cfg from oslo_log import log as logging import requests LOG = logging.getLogger(__name__) vpsa_timeout = 300 class CommonException(Exception): def __init__(self): pass class UnknownCmd(Exception): def __init__(self, cmd): self.cmd = cmd class BadHTTPResponseStatus(Exception): def __init__(self, status): self.status = status class FailedCmdWithDump(Exception): def __init__(self, status, data): self.status = status self.data = data class SessionRequestException(Exception): def __init__(self, msg): self.msg = msg class ZadaraInvalidAccessKey(Exception): pass exception = CommonException() zadara_opts = [ cfg.HostAddressOpt('zadara_vpsa_host', default=None, help='VPSA - Management Host name or IP address'), cfg.PortOpt('zadara_vpsa_port', default=None, help='VPSA - Port number'), cfg.BoolOpt('zadara_vpsa_use_ssl', default=False, help='VPSA - Use SSL connection'), cfg.BoolOpt('zadara_ssl_cert_verify', default=True, help='If set to True the http client will validate the SSL ' 'certificate of the VPSA endpoint.'), cfg.StrOpt('zadara_access_key', default=None, help='VPSA access key', secret=True), cfg.StrOpt('zadara_vpsa_poolname', default=None, help='VPSA - Storage Pool assigned for volumes'), cfg.BoolOpt('zadara_vol_encrypt', default=False, help='VPSA - Default encryption policy for volumes. ' 'If the option is neither configured nor provided ' 'as metadata, the VPSA will inherit the default value.'), cfg.BoolOpt('zadara_gen3_vol_dedupe', default=False, help='VPSA - Enable deduplication for volumes. ' 'If the option is neither configured nor provided ' 'as metadata, the VPSA will inherit the default value.'), cfg.BoolOpt('zadara_gen3_vol_compress', default=False, help='VPSA - Enable compression for volumes. ' 'If the option is neither configured nor provided ' 'as metadata, the VPSA will inherit the default value.'), cfg.BoolOpt('zadara_default_snap_policy', default=False, help="VPSA - Attach snapshot policy for volumes. " "If the option is neither configured nor provided " "as metadata, the VPSA will inherit the default value.")] class ZadaraVPSAConnection(object): def __init__(self, conf, driver_ssl_cert_path, block): self.conf = conf self.access_key = conf.zadara_access_key if not self.access_key: raise exception.ZadaraInvalidAccessKey() self.driver_ssl_cert_path = driver_ssl_cert_path self.vol_type_str = 'showonlyblock' if block else 'showonlyfile' self.vpsa_commands = { 'create_volume': lambda kwargs: ( 'POST', '/api/volumes.json', {'name': kwargs.get('name'), 'capacity': kwargs.get('size'), 'pool': self.conf.zadara_vpsa_poolname, 'block': 'YES' if self.vol_type_str == 'showonlyblock' else 'NO', 'thin': 'YES', 'crypt': 'YES' if self.conf.zadara_vol_encrypt else 'NO', 'compress': 'YES' if self.conf.zadara_gen3_vol_compress else 'NO', 'dedupe': 'YES' if self.conf.zadara_gen3_vol_dedupe else 'NO', 'attachpolicies': 'NO' if not self.conf.zadara_default_snap_policy else 'YES'}), 'delete_volume': lambda kwargs: ( 'DELETE', '/api/volumes/%s.json' % kwargs.get('vpsa_vol'), {'force': 'YES'}), 'expand_volume': lambda kwargs: ( 'POST', '/api/volumes/%s/expand.json' % kwargs.get('vpsa_vol'), {'capacity': kwargs.get('size')}), 'rename_volume': lambda kwargs: ( 'POST', '/api/volumes/%s/rename.json' % kwargs.get('vpsa_vol'), {'new_name': kwargs.get('new_name')}), 'create_snapshot': lambda kwargs: ( 'POST', '/api/consistency_groups/%s/snapshots.json' % kwargs.get('cg_name'), {'display_name': kwargs.get('snap_name')}), 'delete_snapshot': lambda kwargs: ( 'DELETE', '/api/snapshots/%s.json' % kwargs.get('snap_id'), {}), 'rename_snapshot': lambda kwargs: ( 'POST', '/api/snapshots/%s/rename.json' % kwargs.get('snap_id'), {'newname': kwargs.get('new_name')}), 'create_clone_from_snap': lambda kwargs: ( 'POST', '/api/consistency_groups/%s/clone.json' % kwargs.get('cg_name'), {'name': kwargs.get('name'), 'snapshot': kwargs.get('snap_id')}), 'create_clone': lambda kwargs: ( 'POST', '/api/consistency_groups/%s/clone.json' % kwargs.get('cg_name'), {'name': kwargs.get('name')}), 'create_server': lambda kwargs: ( 'POST', '/api/servers.json', {'iqn': kwargs.get('iqn'), 'iscsi': kwargs.get('iscsi_ip'), 'display_name': kwargs.get('iqn') if kwargs.get('iqn') else kwargs.get('iscsi_ip')}), 'attach_volume': lambda kwargs: ( 'POST', '/api/servers/%s/volumes.json' % kwargs.get('vpsa_srv'), {'volume_name[]': kwargs.get('vpsa_vol'), 'access_type': kwargs.get('share_proto'), 'readonly': kwargs.get('read_only'), 'force': 'YES'}), 'detach_volume': lambda kwargs: ( 'POST', '/api/volumes/%s/detach.json' % kwargs.get('vpsa_vol'), {'server_name[]': kwargs.get('vpsa_srv'), 'force': 'YES'}), 'update_volume': lambda kwargs: ( 'POST', '/api/volumes/%s/update_comment.json' % kwargs.get('vpsa_vol'), {'new_comment': kwargs.get('new_comment')}), 'list_volumes': lambda kwargs: ( 'GET', '/api/volumes.json?%s=YES' % self.vol_type_str, {}), 'get_volume': lambda kwargs: ( 'GET', '/api/volumes/%s.json' % kwargs.get('vpsa_vol'), {}), 'get_volume_by_name': lambda kwargs: ( 'GET', '/api/volumes.json?display_name=%s' % kwargs.get('display_name'), {}), 'get_pool': lambda kwargs: ( 'GET', '/api/pools/%s.json' % kwargs.get('pool_name'), {}), 'list_controllers': lambda kwargs: ( 'GET', '/api/vcontrollers.json', {}), 'list_servers': lambda kwargs: ( 'GET', '/api/servers.json', {}), 'list_vol_snapshots': lambda kwargs: ( 'GET', '/api/consistency_groups/%s/snapshots.json' % kwargs.get('cg_name'), {}), 'list_vol_attachments': lambda kwargs: ( 'GET', '/api/volumes/%s/servers.json' % kwargs.get('vpsa_vol'), {}), 'list_snapshots': lambda kwargs: ( 'GET', '/api/snapshots.json', {}), 'change_export_name': lambda kwargs: ( 'PUT', '/api/volumes/%s/export_name.json' % kwargs.get('vpsa_vol'), {'exportname': kwargs.get('exportname')})} def _generate_vpsa_cmd(self, cmd, **kwargs): try: method, url, params = self.vpsa_commands[cmd](kwargs) metadata = kwargs.get('metadata') if metadata: for key, value in metadata.items(): params[key] = value except KeyError: raise exception.UnknownCmd(cmd=cmd) if method == 'GET': params = dict(page=1, start=0, limit=0) body = None elif method in ['DELETE', 'POST', 'PUT']: body = params params = None else: msg = ('Method %(method)s is not defined' % {'method': method}) LOG.error(msg) raise AssertionError(msg) headers = {'X-Access-Key': self.access_key} return method, url, params, body, headers def send_cmd(self, cmd, **kwargs): if not self.access_key: raise exception.ZadaraInvalidAccessKey() method, url, params, body, headers = self._generate_vpsa_cmd(cmd, **kwargs) LOG.debug('Invoking %(cmd)s using %(method)s request.', {'cmd': cmd, 'method': method}) host = self._get_target_host(self.conf.zadara_vpsa_host) port = int(self.conf.zadara_vpsa_port) protocol = "https" if self.conf.zadara_vpsa_use_ssl else "http" if protocol == "https": if not self.conf.zadara_ssl_cert_verify: verify = False else: verify = (self.driver_ssl_cert_path if self.driver_ssl_cert_path else True) else: verify = False if port: api_url = "%s://%s:%d%s" % (protocol, host, port, url) else: api_url = "%s://%s%s" % (protocol, host, url) try: with requests.Session() as session: session.headers.update(headers) response = session.request(method, api_url, params=params, data=body, headers=headers, verify=verify, timeout=vpsa_timeout) except requests.exceptions.RequestException as e: msg = ('Exception: %s') % e raise exception.SessionRequestException(msg=msg) if response.status_code != 200: raise exception.BadHTTPResponseStatus( status=response.status_code) data = response.content json_data = json.loads(data) response = json_data['response'] status = int(response['status']) if status == 5: raise exception.ZadaraInvalidAccessKey() if status != 0: raise exception.FailedCmdWithDump(status=status, data=data) LOG.debug('Operation completed with status code %(status)s', {'status': status}) return response def _get_target_host(self, vpsa_host): ipv6_without_brackets = ':' in vpsa_host and vpsa_host[-1] != ']' if ipv6_without_brackets: return ('[%s]' % vpsa_host) return ('%s' % vpsa_host) def _get_active_controller_details(self): data = self.send_cmd('list_controllers') ctrl = None vcontrollers = data.get('vcontrollers', []) for controller in vcontrollers: if controller['state'] == 'active': ctrl = controller break if ctrl is not None: target_ip = (ctrl['iscsi_ipv6'] if ctrl['iscsi_ipv6'] else ctrl['iscsi_ip']) return dict(target=ctrl['target'], ip=target_ip, chap_user=ctrl['vpsa_chap_user'], chap_passwd=ctrl['vpsa_chap_secret']) return None def _check_access_key_validity(self): if not self.access_key: raise exception.ZadaraInvalidAccessKey() active_ctrl = self._get_active_controller_details() if active_ctrl is None: raise exception.ZadaraInvalidAccessKey() def _get_vpsa_volume(self, name): volume = None display_name = name if re.search(r"\s", name): display_name = re.split(r"\s", name)[0] data = self.send_cmd('get_volume_by_name', display_name=display_name) if data['status'] != 0: return None volumes = data['volumes'] for vol in volumes: if vol['display_name'] == name: volume = vol break return volume def _get_vpsa_volume_by_id(self, vpsa_vol): data = self.send_cmd('get_volume', vpsa_vol=vpsa_vol) return data['volume'] def _get_volume_cg_name(self, name): volume = self._get_vpsa_volume(name) if volume is not None: return volume['cg_name'] return None def _get_all_vpsa_snapshots(self): data = self.send_cmd('list_snapshots') return data['snapshots'] def _get_all_vpsa_volumes(self): data = self.send_cmd('list_volumes') volumes = [] for volume in data['volumes']: if volume['pool_name'] == self.conf.zadara_vpsa_poolname: volumes.append(volume) return volumes def _get_server_name(self, initiator, share): data = self.send_cmd('list_servers') servers = data.get('servers', []) for server in servers: if share: if server['iscsi_ip'] == initiator: return server['name'] else: if server['iqn'] == initiator: return server['name'] return None def _create_vpsa_server(self, iqn=None, iscsi_ip=None): initiator = iscsi_ip if iscsi_ip else iqn share = True if iscsi_ip else False vpsa_srv = self._get_server_name(initiator, share) if not vpsa_srv: data = self.send_cmd('create_server', iqn=iqn, iscsi_ip=iscsi_ip) if data['status'] != 0: return None vpsa_srv = data['server_name'] return vpsa_srv def _get_servers_attached_to_volume(self, vpsa_vol): servers = vpsa_vol.get('server_ext_names') list_servers = [] if servers: list_servers = servers.split(',') return list_servers
Apache License 2.0
pygfx/wgpu-py
wgpu/base.py
GPUCanvasContext.get_preferred_format
python
def get_preferred_format(self, adapter): return "bgra8unorm-srgb"
Get the preferred swap chain format.
https://github.com/pygfx/wgpu-py/blob/3cc6f36684b63b683b9d24cc14b7dde5d16f11b2/wgpu/base.py#L177-L179
import weakref import logging from typing import List, Dict from ._coreutils import ApiDiff from . import flags, enums, structs __all__ = [ "GPUObjectBase", "GPU", "GPUAdapter", "GPUDevice", "GPUBuffer", "GPUTexture", "GPUTextureView", "GPUExternalTexture", "GPUSampler", "GPUBindGroupLayout", "GPUBindGroup", "GPUPipelineLayout", "GPUShaderModule", "GPUCompilationMessage", "GPUCompilationInfo", "GPUPipelineBase", "GPUComputePipeline", "GPURenderPipeline", "GPUCommandBuffer", "GPUCommandEncoder", "GPUProgrammablePassEncoder", "GPUComputePassEncoder", "GPURenderEncoderBase", "GPURenderPassEncoder", "GPURenderBundle", "GPURenderBundleEncoder", "GPUQueue", "GPUQuerySet", "GPUCanvasContext", "GPUDeviceLostInfo", "GPUOutOfMemoryError", "GPUValidationError", "GPUUncapturedErrorEvent", ] logger = logging.getLogger("wgpu") apidiff = ApiDiff() DEFAULT_ADAPTER_LIMITS = { "max_texture_dimension1d": 8192, "max_texture_dimension2d": 8192, "max_texture_dimension3d": 2048, "max_texture_array_layers": 2048, "max_bind_groups": 4, "max_dynamic_uniform_buffers_per_pipeline_layout": 8, "max_dynamic_storage_buffers_per_pipeline_layout": 4, "max_sampled_textures_per_shader_stage": 16, "max_samplers_per_shader_stage": 16, "max_storage_buffers_per_shader_stage": 4, "max_storage_textures_per_shader_stage": 4, "max_uniform_buffers_per_shader_stage": 12, "max_uniform_buffer_binding_size": 16384, "max_storage_buffer_binding_size": 134217728, "max_vertex_buffers": 8, "max_vertex_attributes": 16, "max_vertex_buffer_array_stride": 2048, } class GPU: @apidiff.change("arguments include a canvas object") def request_adapter(self, *, canvas, power_preference=None): raise RuntimeError( "Select a backend (by importing wgpu.rs) before requesting an adapter!" ) @apidiff.change("arguments include a canvas object") async def request_adapter_async(self, *, canvas, power_preference=None): raise RuntimeError( "Select a backend (by importing wgpu.rs) before requesting an adapter!" ) class GPUCanvasContext: def __init__(self, canvas): self._canvas_ref = weakref.ref(canvas) def _get_canvas(self): return self._canvas_ref() @property def canvas(self): return self._canvas_ref() def configure( self, *, device: "GPUDevice", format: "enums.TextureFormat", usage: "flags.TextureUsage" = 0x10, color_space: "enums.PredefinedColorSpace" = "srgb", compositing_alpha_mode: "enums.CanvasCompositingAlphaMode" = "opaque", size: "structs.Extent3D" = None, ): self.unconfigure() self._device = device self._format = format or self.get_preferred_format(device.adapter) self._usage = usage or flags.TextureUsage.RENDER_ATTACHMENT self._color_space = color_space self._compositing_alpha_mode = compositing_alpha_mode self._size = size def unconfigure(self): self._device = None self._format = None self._usage = None self._color_space = None self._compositing_alpha_mode = None self._size = None
BSD 2-Clause Simplified License
tongchangd/text_data_enhancement_with_lasertagger
official_transformer/model_utils.py
get_padding
python
def get_padding(x, padding_value=0, dtype=tf.float32): with tf.name_scope("padding"): return tf.cast(tf.equal(x, padding_value), dtype)
Return float tensor representing the padding values in x. Args: x: int tensor with any shape padding_value: int value that dtype: The dtype of the return value. Returns: float tensor with same shape as x containing values 0 or 1. 0 -> non-padding, 1 -> padding
https://github.com/tongchangd/text_data_enhancement_with_lasertagger/blob/b8286196e2f0e1decf73da79c665f25bf8a0ff45/official_transformer/model_utils.py#L88-L101
from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np import tensorflow as tf _NEG_INF_FP32 = -1e9 _NEG_INF_FP16 = np.finfo(np.float16).min def get_position_encoding( length, hidden_size, min_timescale=1.0, max_timescale=1.0e2): position = tf.cast(tf.range(length), tf.float32) num_timescales = hidden_size // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.cast(num_timescales, tf.float32) - 1)) inv_timescales = min_timescale * tf.exp( tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) return signal def get_decoder_self_attention_bias(length, dtype=tf.float32): neg_inf = _NEG_INF_FP16 if dtype == tf.float16 else _NEG_INF_FP32 with tf.name_scope("decoder_self_attention_bias"): valid_locs = tf.linalg.band_part(input=tf.ones([length, length], dtype=dtype), num_lower=-1, num_upper=0) valid_locs = tf.reshape(valid_locs, [1, 1, length, length]) decoder_bias = neg_inf * (1.0 - valid_locs) return decoder_bias
Apache License 2.0
slackha/pyjac
pyjac/functional_tester/test.py
run_pasr
python
def run_pasr(pasr_input_file, mech_filename, pasr_output_file=None): pasr_input = pasr.parse_input_file(pasr_input_file) state_data = pasr.run_simulation( mech_filename, pasr_input['case'], pasr_input['temperature'], pasr_input['pressure'], pasr_input['equivalence ratio'], pasr_input['fuel'], pasr_input['oxidizer'], pasr_input['complete products'], pasr_input['number of particles'], pasr_input['residence time'], pasr_input['mixing time'], pasr_input['pairing time'], pasr_input['number of residence times'] ) if pasr_output_file: np.save(pasr_output_file, state_data) return state_data
Run PaSR simulation to get thermochemical data for testing. Parameters ---------- pasr_input_file : str Name of PaSR input file in YAML format mech_filename : str Name of Cantera-format mechanism file pasr_output_file : str Optional; filename for saving PaSR output data Returns ------- state_data : ``numpy.array`` Array with state data (time, temperature, pressure, mass fractions)
https://github.com/slackha/pyjac/blob/5f54b2d6c04e1ee62d9db8bbf7553ad785f58c33/pyjac/functional_tester/test.py#L238-L276
from __future__ import division from __future__ import print_function import os import re import sys import subprocess import pickle from argparse import ArgumentParser import multiprocessing import glob import numpy as np try: import cantera as ct from cantera import ck2cti except ImportError: print('Error: Cantera must be installed.') raise from .. import utils from ..core.create_jacobian import create_jacobian from . import partially_stirred_reactor as pasr from ..pywrap import generate_wrapper cmd_compile = dict(c='gcc', cuda='nvcc', fortran='gfortran' ) flags = dict(c=['-std=c99'], cuda=['-arch=sm_20', '-I/usr/local/cuda/include/', '-I/usr/local/cuda/samples/common/inc/', '-dc'], fortran='') libs = dict(c=['-lm', '-std=c99'], cuda='-arch=sm_20', fortran='') class ReactorConstPres(object): def __init__(self, gas): self.gas = gas self.P = gas.P def __call__(self): rho = self.gas.density wdot = self.gas.net_production_rates dTdt = - (np.dot(self.gas.partial_molar_enthalpies, wdot) / (rho * self.gas.cp) ) dYdt = wdot * self.gas.molecular_weights / rho return np.hstack((dTdt, dYdt)) class ReactorConstVol(object): def __init__(self, gas): self.gas = gas self.density = gas.density def __call__(self): wdot = self.gas.net_production_rates dTdt = - (np.dot(self.gas.partial_molar_int_energies, wdot) / (self.density * self.gas.cv) ) dYdt = wdot * self.gas.molecular_weights / self.density return np.hstack((dTdt, dYdt)) def convert_mech(mech_filename, therm_filename=None): arg = ['--input=' + mech_filename] if therm_filename is not None: arg.append('--thermo=' + therm_filename) arg.append('--permissive') ck2cti.main(arg) mech_filename = mech_filename[:-4] + '.cti' print('Mechanism conversion successful, written to ' '{}'.format(mech_filename) ) return mech_filename class AutodiffJacob(object): def __init__(self, pressure, fwd_spec_map): self.pres = pressure self.fwd_spec_map = fwd_spec_map import adjacob self.jac = adjacob def eval_jacobian(self, gas): y = np.hstack((gas.T, gas.Y[self.fwd_spec_map][:-1])) jacob = np.zeros((gas.n_species * gas.n_species)) self.jac.ad_eval_jacobian(0, gas.P, y, jacob) return jacob def is_pdep(rxn): return (isinstance(rxn, ct.ThreeBodyReaction) or isinstance(rxn, ct.FalloffReaction) or isinstance(rxn, ct.ChemicallyActivatedReaction) )
MIT License
uoft-ecosystem/rlscope
rlscope/parser/one_off_plot.py
TrtexecExperiment._plot_batch_size_vs_metric
python
def _plot_batch_size_vs_metric(self, title, cupti_metric, streams, ylabel=None, suffix=None): if self.trtexec_gpu_hw_df is None: return """ WANT: x_field: batch_size y_field: metric_value group_field: num_threads """ plot_df = pd.DataFrame(columns=['batch_size', 'metric_value', 'config']) if self.trtexec_gpu_hw_df is not None: df = copy.copy(self.trtexec_gpu_hw_df) df = df[df['streams'] == streams] df = keep_cupti_metric(df, cupti_metric) add_gpu_hw_fields(df) df = self._add_config(df, df_type='trtexec') plot_df = plot_df.append(df[plot_df.columns]) if self.tf_inference_gpu_hw_df is not None: df = copy.copy(self.tf_inference_gpu_hw_df) df = df[df['range_name'] == 'inference_loop/inference'] df = keep_cupti_metric(df, cupti_metric) add_gpu_hw_fields(df) df = self._add_config(df, df_type='tf_inference') plot_df = plot_df.append(df[plot_df.columns]) plot_df.sort_values(by=['config', 'batch_size'], inplace=True) sns.set(style="whitegrid") g = sns.catplot(x="batch_size", y="metric_value", hue="config", data=plot_df, kind="bar", palette="muted" ) g.despine(left=True) if ylabel is None: ylabel = CUPTI_METRIC_Y_LABEL[cupti_metric] g.set_ylabels(ylabel) g.set_xlabels(BATCH_SIZE_X_LABEL) g.fig.suptitle(title) g.fig.subplots_adjust(top=0.90) if suffix is None: suffix = "" save_plot(plot_df, _j(self.args['trtexec_dir'], f'batch_size_vs_{cupti_metric}.streams_{streams}{suffix}.svg'))
WANT: x_field: batch_size y_field: metric_value group_field: num_threads
https://github.com/uoft-ecosystem/rlscope/blob/cdd9bbdc2a3a832be24f20105b8c9fe28149cb63/rlscope/parser/one_off_plot.py#L620-L679
from rlscope.profiler.rlscope_logging import logger import argparse import traceback import bdb import copy import re import sys import itertools import os import csv import textwrap import pprint import math from io import StringIO import json import codecs import pandas as pd from rlscope.parser.plot_utils import setup_matplotlib setup_matplotlib() import matplotlib import matplotlib.ticker import matplotlib.pyplot as plt import seaborn as sns from os.path import join as _j, abspath as _a, dirname as _d, exists as _e, basename as _b from rlscope.profiler.util import pprint_msg from rlscope.parser.stacked_bar_plots import get_x_env, get_x_algo, xfields_from_xtick_expression, get_capsize, OverlapStackedBarPlot, add_repetition, group_numeric_cols from rlscope.parser.dataframe import UtilDataframeReader, RLScopeConfig from rlscope import py_config from rlscope.parser.common import * from rlscope.parser import constants from rlscope.parser.plot_utils import is_pdf, pdf2png from rlscope.py_config import yes_as_bool from typing import * class IMLInvaidArgument(Exception): pass def maybe_number(x): if type(x) != str: return x try: num = int(x) return num except ValueError: pass try: num = float(x) return num except ValueError: pass return x def parse_filename_attrs( path : str, file_prefix : str, file_suffix : str, attrs : Iterable[str], dflt_attrs : Optional[Dict[str, Any]] = None): attr_name_regex = r'(?:{regex})'.format( regex='|'.join(sorted(attrs, key=lambda attr: (-1*len(attr), attr))) ) attr_string_regex = r'(?P<attr_name>{attr_name})_(?P<attr_value>[^\.]*)'.format( attr_name=attr_name_regex ) components = re.split(r'\.', _b(path)) assert components[0] == file_prefix assert components[-1] == file_suffix attr_strings = components[1:len(components)-1] attr_vals = dict() if dflt_attrs is not None: attr_vals.update(dflt_attrs) for attr_string in attr_strings: m = re.fullmatch(attr_string_regex, attr_string) if not m: raise RuntimeError(f""" Not sure how to parse attribute name/value from \"{attr_string}\" found in {_b(path)}. Attributes we recognize = {attrs} """) attr_vals[m.group('attr_name')] = m.group('attr_value') return attr_vals def parse_path_attrs( path : str, attrs : Iterable[str], dflt_attrs : Optional[Dict[str, Any]] = None, attr_types : Optional[Dict[str, Any]] = None, debug : bool = False, ): attr_name_regex = r'(?:{regex})'.format( regex='|'.join(sorted(attrs, key=lambda attr: (-1*len(attr), attr))) ) attr_string_regex = r'(?P<attr_name>{attr_name})_(?P<attr_value>[^\.]*)\b'.format( attr_name=attr_name_regex ) if debug: logger.info(f"attr_name_regex = {attr_name_regex}") attr_vals = dict() if dflt_attrs is not None: attr_vals.update(dflt_attrs) path_components = os.path.split(path) for path_component in path_components: attr_strings = re.split(r'\.', path_component) for attr_string in attr_strings: m = re.search(attr_string_regex, attr_string) if m: value = m.group('attr_value') attr_name = m.group('attr_name') if attr_types is not None and attr_name in attr_types: value = attr_types[attr_name](value) attr_vals[attr_name] = value missing_attrs = set(attrs).difference(attr_vals.keys()) if len(missing_attrs) > 0: raise RuntimeError(f""" Couldn't find all required attributes in {path}. Attributes we are missing = {missing_attrs} """) return attr_vals METRIC_NAME_CUPTI_TO_PROF = { 'achieved_occupancy': "sm__warps_active.avg.pct_of_peak_sustained_active", 'sm_efficiency': "smsp__cycles_active.avg.pct_of_peak_sustained_elapsed", 'inst_executed': "smsp__inst_executed.sum", 'active_cycles': "sm__cycles_active.sum", 'active_warps': "sm__warps_active.sum", 'elapsed_cycles_sm': "sm__cycles_elapsed.sum", } PROF_TO_METRIC_NAME_CUPTI = dict((v, k) for k, v in METRIC_NAME_CUPTI_TO_PROF.items()) NUM_SMS = 68 SM_OCCUPANCY_TITLE = "SM occupancy: average percent of warps\nthat are in use within an SM" SM_EFFICIENCY_TITLE = "SM efficiency: percent of SMs\nthat are in use across the entire GPU" SM_EFFICIENCY_Y_LABEL = f"SM efficiency (%)\n# SMs = {NUM_SMS}" SM_OCCUPANCY_Y_LABEL = "SM occupancy (%)\nmax threads per block = 1024" SAMPLE_THROUGHPUT_Y_LABEL = "Throughput (samples/second)" SAMPLE_LATENCY_Y_LABEL = "Minibatch latency (ms)" CUPTI_METRIC_Y_LABEL = { 'sm_efficiency': SM_EFFICIENCY_Y_LABEL, 'achieved_occupancy': SM_OCCUPANCY_Y_LABEL, } CUPTI_METRIC_Y_LABEL_SHORT = { 'sm_efficiency': "SM efficiency (%)", 'achieved_occupancy': "SM occupancy (%)", } TRT_METRIC_YLABELS = { 'host_latency_throughput_qps': SAMPLE_THROUGHPUT_Y_LABEL, 'gpu_compute_mean_ms': "Mean GPU compute time (ms)", 'gpu_compute_percentile_99_ms': "99%-tile GPU compute time (ms)", } BATCH_SIZE_X_LABEL = "Batch size" STREAMS_X_LABEL = "# of CUDA streams" SIMULATOR_X_LABEL = "Simulator" STEP_THROUGHPUT_Y_LABEL = "Simulation throughput (samples/sec)" STEP_LATENCY_Y_LABEL = "Simulation latency (ms)" RLSCOPE_X_LABEL = "(RL algorithm, Simulator)" SM_ID_X_LABEL = f"SM ID\n# SMs = {NUM_SMS}" GPU_UTIL_EXPERIMENT_ATTRS = { 'thread_blocks', 'thread_block_size', 'n_launches', 'iterations', 'num_threads', 'processes', 'hw_counters', } GPU_UTIL_EXPERIMENT_ATTR_TYPES = { 'thread_blocks': maybe_number, 'thread_block_size': maybe_number, 'n_launches': maybe_number, 'iterations': maybe_number, 'num_threads': maybe_number, 'processes': yes_as_bool, 'hw_counters': yes_as_bool, } MULTI_TASK_ATTRS = set(GPU_UTIL_EXPERIMENT_ATTRS) MULTI_TASK_ATTRS.update({ 'iterations_per_sched_sample', 'thread_id', 'stream_id', 'trace_id', }) MULTI_TASK_JSON_ATTRS = { "globaltimer_ns", "kernel_id", "lane_id", "sm_id", "stream_id", "warp_id", } MULTI_TASK_ATTR_TYPES = dict(GPU_UTIL_EXPERIMENT_ATTR_TYPES) MULTI_TASK_ATTR_TYPES.update({ 'iterations_per_sched_sample': maybe_number, 'thread_id': maybe_number, 'stream_id': maybe_number, 'trace_id': maybe_number, }) MULTI_TASK_RAW_ATTR_TYPES = dict(MULTI_TASK_ATTR_TYPES) MULTI_TASK_RAW_ATTR_TYPES.update({ 'num_sms': maybe_number, 'sms_allocated': maybe_number, 'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': maybe_number, }) MULTI_TASK_RAW_ATTR_DFLTS = { 'num_sms': None, 'sms_allocated': None, 'CUDA_MPS_ACTIVE_THREAD_PERCENTAGE': None, } MULTI_TASK_RAW_ATTRS = MULTI_TASK_ATTRS.union(MULTI_TASK_RAW_ATTR_TYPES.keys()).difference({ 'stream_id', 'thread_id', 'trace_id', }) MEASUREMENT_PERIOD_ACTIVE_CYCLES = 'active_cycles' MEASUREMENT_PERIOD_ALL_CYCLES = 'all_cycles' CUPTI_METRIC_MEASUREMENT_PERIOD = { 'achieved_occupancy': MEASUREMENT_PERIOD_ACTIVE_CYCLES, 'sm_efficiency': MEASUREMENT_PERIOD_ALL_CYCLES, 'inst_executed': MEASUREMENT_PERIOD_ACTIVE_CYCLES, 'active_cycles': MEASUREMENT_PERIOD_ACTIVE_CYCLES, 'active_warps': MEASUREMENT_PERIOD_ACTIVE_CYCLES, 'elapsed_cycles_sm': MEASUREMENT_PERIOD_ALL_CYCLES, } FLOAT_RE = r'(?:[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)' UNIT_RE = r'(?:\b(?:ms|s|qps)\b)' class TrtexecExperiment: def __init__(self, args): self.args = args def run(self): self.read_df() self.plot_df() def read_df(self): self._read_trtexec_df() self._read_tf_inference_df() self._read_simulator_df() self._read_mps_df() def plot_df(self): """ batch_size = 1, 8, 16, 32, 64 streams = 1 plot: throughput sm_efficiency sm_occupancy """ def _plot_batch_size_vs(streams, suffix=None): self._plot_batch_size_vs_throughput( title="Throughput with increasing batch size", streams=streams, suffix=suffix) def filter_tensorflow(plot_df): plot_df = plot_df[plot_df['config'] == 'TF'] return plot_df self._plot_batch_size_vs_throughput( title="Throughput with increasing batch size", streams=streams, filter_df=filter_tensorflow, suffix=f"{or_empty(suffix)}.just_tensorflow") self._plot_batch_size_vs_metric( title=SM_EFFICIENCY_TITLE, cupti_metric='sm_efficiency', streams=streams, suffix=suffix) self._plot_batch_size_vs_metric( title=SM_OCCUPANCY_TITLE, cupti_metric='achieved_occupancy', streams=streams, suffix=suffix) _plot_batch_size_vs(streams=1) def _plot_streams_vs(batch_size, suffix=None): def _title(title): return f"{title}:\n(batch size = {batch_size})" trt_metric_title = { 'host_latency_throughput_qps': _title("Throughput with increasing streams"), 'gpu_compute_mean_ms': _title("Mean GPU compute time with increasing streams"), 'gpu_compute_percentile_99_ms': _title("99-%tile GPU compute time with increasing streams"), } cuda_graph_dict = { 'host_latency_throughput_qps': None, 'gpu_compute_mean_ms': None, 'gpu_compute_percentile_99_ms': None, } for trt_metric in trt_metric_title.keys(): self._plot_streams_vs_trt_metric( trt_metric, batch_size, title=trt_metric_title[trt_metric], cuda_graph=cuda_graph_dict.get(trt_metric, None)) self._plot_streams_vs_metric( title=SM_EFFICIENCY_TITLE, cupti_metric='sm_efficiency', batch_size=batch_size, suffix=suffix) self._plot_streams_vs_metric( title=SM_OCCUPANCY_TITLE, cupti_metric='achieved_occupancy', batch_size=batch_size, suffix=suffix) """ batch_size = 1 streams = 1, 2, 3, ..., 8 plot: throughput sm_efficiency sm_occupancy """ _plot_streams_vs(batch_size=1) if self.trtexec_df is not None: best_batch_size = self._compute_best_batch_size() _plot_streams_vs(batch_size=best_batch_size, suffix='.best_batch_size') self._plot_simulator_vs_steptime() self._plot_simulator_vs_throughput() def _plot_multiprocess_inference(df, throughput_title=None, inference_title=None, filter_df=None, suffix=None): self._plot_mps_batch_size_vs_metric_by_num_tasks( df=self.mps_df, metric='throughput_qps', title=throughput_title, xlabel=BATCH_SIZE_X_LABEL, ylabel=SAMPLE_THROUGHPUT_Y_LABEL, filter_df=filter_df, suffix=suffix, global_ymax=True, ) self._plot_mps_batch_size_vs_metric_by_num_tasks( df=self.mps_raw_df, metric='inference_time_ms', title=inference_title, xlabel=BATCH_SIZE_X_LABEL, ylabel=SAMPLE_LATENCY_Y_LABEL, filter_df=filter_df, suffix=suffix, global_ymax=False, ) """ 3 different graphs for multi-process experiment: - Multi-process (CPU) / config_cpu row['cpu'] assert not row['mps'] - Multi-process MPS (GPU) / config_mps_gpu_evenly row['mps'] and row['sm_alloc_strategy'] == 'evenly' assert not row['cpu'] - Multi-process MPS (GPU) / config_mps_gpu_evenly_x2 row['mps'] and row['sm_alloc_strategy'] == 'evenly_x2' assert not row['cpu'] - Multi-process (GPU, no MPS) / config_gpu not row['mps'] and not row['cpu'] """ def is_config_cpu(row): is_cpu = row['cpu'] if is_cpu: assert not row['mps'] return is_cpu def is_config_mps_gpu(row): is_mps = row['mps'] if is_mps: assert not row['cpu'] return is_mps def is_config_gpu(row): return not row['mps'] and not row['cpu'] def as_row_filter_func(is_config): def row_filter_func(df): df = df[df.apply(is_config, axis=1)] return df return row_filter_func sm_alloc_strategies = self.mps_df[self.mps_df['mps']]['sm_alloc_strategy'].unique().tolist() for sm_alloc_strategy in sm_alloc_strategies: def _is_config(row): return is_config_mps_gpu(row) and row['sm_alloc_strategy'] == sm_alloc_strategy _plot_multiprocess_inference( self.mps_df, throughput_title='Inference throughput:\nmulti-process TF scripts (GPU) + CUDA MPS', inference_title='Inference latency:\nmulti-process TF scripts (GPU) + CUDA MPS', filter_df=as_row_filter_func(_is_config), suffix=f".config_mps_gpu_{sm_alloc_strategy}") _plot_multiprocess_inference( self.mps_df, throughput_title='Inference throughput:\nmulti-process TF scripts (CPU)', inference_title='Inference latency:\nmulti-process TF scripts (CPU)', filter_df=as_row_filter_func(is_config_cpu), suffix='.config_cpu') _plot_multiprocess_inference( self.mps_df, throughput_title='Inference throughput:\nmulti-process TF scripts (GPU)', inference_title='Inference latency:\nmulti-process TF scripts (GPU)', filter_df=as_row_filter_func(is_config_gpu), suffix='.config_gpu') def _compute_best_batch_size(self): df = self.trtexec_df[self.trtexec_df['streams'] == 1] max_throughput = df['host_latency_throughput_qps'].max() batch_sizes = df[df['host_latency_throughput_qps'] == max_throughput]['batch_size'].unique() assert len(batch_sizes) == 1 best_batch_size = batch_sizes[0] return best_batch_size def _plot_streams_vs_metric(self, title, cupti_metric, batch_size, ylabel=None, suffix=None): if self.trtexec_gpu_hw_df is None: return df = copy.copy(self.trtexec_gpu_hw_df) df = df[df['batch_size'] == batch_size] df = keep_cupti_metric(df, cupti_metric) add_gpu_hw_fields(df) df = self._add_config(df, df_type='trtexec') sns.set(style="whitegrid") g = sns.catplot(x="streams", y="metric_value", hue="config", data=df, kind="bar", palette="muted" ) g.despine(left=True) if ylabel is None: ylabel = CUPTI_METRIC_Y_LABEL[cupti_metric] g.set_ylabels(ylabel) g.set_xlabels(STREAMS_X_LABEL) g.fig.suptitle(title) g.fig.subplots_adjust(top=0.90) if suffix is None: suffix = "" save_plot(df, _j(self.args['trtexec_dir'], f'streams_vs_{cupti_metric}.batch_size_{batch_size}{suffix}.svg'))
Apache License 2.0
docusign/docusign-python-client
docusign_esign/models/power_form_recipient.py
PowerFormRecipient.recipient_type
python
def recipient_type(self): return self._recipient_type
Gets the recipient_type of this PowerFormRecipient. # noqa: E501 # noqa: E501 :return: The recipient_type of this PowerFormRecipient. # noqa: E501 :rtype: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/power_form_recipient.py#L287-L295
import pprint import re import six from docusign_esign.client.configuration import Configuration class PowerFormRecipient(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'access_code': 'str', 'access_code_locked': 'str', 'access_code_required': 'str', 'email': 'str', 'email_locked': 'str', 'id_check_configuration_name': 'str', 'id_check_required': 'str', 'name': 'str', 'recipient_type': 'str', 'role_name': 'str', 'routing_order': 'str', 'template_requires_id_lookup': 'str', 'user_name_locked': 'str' } attribute_map = { 'access_code': 'accessCode', 'access_code_locked': 'accessCodeLocked', 'access_code_required': 'accessCodeRequired', 'email': 'email', 'email_locked': 'emailLocked', 'id_check_configuration_name': 'idCheckConfigurationName', 'id_check_required': 'idCheckRequired', 'name': 'name', 'recipient_type': 'recipientType', 'role_name': 'roleName', 'routing_order': 'routingOrder', 'template_requires_id_lookup': 'templateRequiresIdLookup', 'user_name_locked': 'userNameLocked' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._access_code = None self._access_code_locked = None self._access_code_required = None self._email = None self._email_locked = None self._id_check_configuration_name = None self._id_check_required = None self._name = None self._recipient_type = None self._role_name = None self._routing_order = None self._template_requires_id_lookup = None self._user_name_locked = None self.discriminator = None setattr(self, "_{}".format('access_code'), kwargs.get('access_code', None)) setattr(self, "_{}".format('access_code_locked'), kwargs.get('access_code_locked', None)) setattr(self, "_{}".format('access_code_required'), kwargs.get('access_code_required', None)) setattr(self, "_{}".format('email'), kwargs.get('email', None)) setattr(self, "_{}".format('email_locked'), kwargs.get('email_locked', None)) setattr(self, "_{}".format('id_check_configuration_name'), kwargs.get('id_check_configuration_name', None)) setattr(self, "_{}".format('id_check_required'), kwargs.get('id_check_required', None)) setattr(self, "_{}".format('name'), kwargs.get('name', None)) setattr(self, "_{}".format('recipient_type'), kwargs.get('recipient_type', None)) setattr(self, "_{}".format('role_name'), kwargs.get('role_name', None)) setattr(self, "_{}".format('routing_order'), kwargs.get('routing_order', None)) setattr(self, "_{}".format('template_requires_id_lookup'), kwargs.get('template_requires_id_lookup', None)) setattr(self, "_{}".format('user_name_locked'), kwargs.get('user_name_locked', None)) @property def access_code(self): return self._access_code @access_code.setter def access_code(self, access_code): self._access_code = access_code @property def access_code_locked(self): return self._access_code_locked @access_code_locked.setter def access_code_locked(self, access_code_locked): self._access_code_locked = access_code_locked @property def access_code_required(self): return self._access_code_required @access_code_required.setter def access_code_required(self, access_code_required): self._access_code_required = access_code_required @property def email(self): return self._email @email.setter def email(self, email): self._email = email @property def email_locked(self): return self._email_locked @email_locked.setter def email_locked(self, email_locked): self._email_locked = email_locked @property def id_check_configuration_name(self): return self._id_check_configuration_name @id_check_configuration_name.setter def id_check_configuration_name(self, id_check_configuration_name): self._id_check_configuration_name = id_check_configuration_name @property def id_check_required(self): return self._id_check_required @id_check_required.setter def id_check_required(self, id_check_required): self._id_check_required = id_check_required @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property
MIT License
wavii/listy-django-cache
listy/utils.py
filter_into_two
python
def filter_into_two(elements, f): a = [] b = [] for e in elements: if f(e): a.append(e) else: b.append(e) return (a, b)
Returns a tuple of two lists, the first one if the provided function f returns truthy then second one if the function returned falsy.
https://github.com/wavii/listy-django-cache/blob/080152fdf18b89387d9f6fb9ce5e9fc1c7ed8a21/listy/utils.py#L6-L18
import time import copy import string import itertools
MIT License
mrknow/filmkodi
tests/lib/xbmc.py
Player.getAvailableAudioStreams
python
def getAvailableAudioStreams(self): return list()
Get audio stream names.
https://github.com/mrknow/filmkodi/blob/0162cde9ae25ddbf4a69330948714833ff2f78c9/tests/lib/xbmc.py#L361-L363
import xbmcgui as _xbmcgui CAPTURE_FLAG_CONTINUOUS = 1 CAPTURE_FLAG_IMMEDIATELY = 2 CAPTURE_STATE_DONE = 3 CAPTURE_STATE_FAILED = 4 CAPTURE_STATE_WORKING = 0 DRIVE_NOT_READY = 1 ENGLISH_NAME = 2 ISO_639_1 = 0 ISO_639_2 = 1 LOGDEBUG = 0 LOGERROR = 4 LOGFATAL = 6 LOGINFO = 1 LOGNONE = 7 LOGNOTICE = 2 LOGSEVERE = 5 LOGWARNING = 3 PLAYER_CORE_AUTO = 0 PLAYER_CORE_DVDPLAYER = 1 PLAYER_CORE_MPLAYER = 2 PLAYER_CORE_PAPLAYER = 3 PLAYLIST_MUSIC = 0 PLAYLIST_VIDEO = 1 SERVER_AIRPLAYSERVER = 2 SERVER_EVENTSERVER = 6 SERVER_JSONRPCSERVER = 3 SERVER_UPNPRENDERER = 4 SERVER_UPNPSERVER = 5 SERVER_WEBSERVER = 1 SERVER_ZEROCONF = 7 TRAY_CLOSED_MEDIA_PRESENT = 96 TRAY_CLOSED_NO_MEDIA = 64 TRAY_OPEN = 16 __author__ = 'Team Kodi <http://kodi.tv>' __credits__ = 'Team Kodi' __date__ = 'Fri May 01 16:22:03 BST 2015' __platform__ = 'ALL' __version__ = '2.20.0' abortRequested = False class Keyboard(object): def __init__(self, line='', heading='', hidden=False): pass def doModal(self, autoclose=0): pass def setDefault(self, line=''): pass def setHiddenInput(self, hidden=False): pass def setHeading(self, heading): pass def getText(self): return str() def isConfirmed(self): return bool(1) class Player(object): def __init__(self): pass def play(self, item=None, listitem=None, windowed=False, statrpos=-1): pass def stop(self): pass def pause(self): pass def playnext(self): pass def playprevious(self): pass def playselected(self, selected): pass def onPlayBackStarted(self): pass def onPlayBackEnded(self): pass def onPlayBackStopped(self): def onPlayBackPaused(self): pass def onPlayBackResumed(self): pass def onPlayBackSeek(self, time, seekOffset): pass def onPlayBackSeekChapter(self, chapter): pass def onPlayBackSpeedChanged(self, speed): pass def onQueueNextItem(self): pass def isPlaying(self): return bool(1) def isPlayingAudio(self): return bool(1) def isPlayingVideo(self): return bool(1) def getPlayingFile(self): return str() def getVideoInfoTag(self): return InfoTagVideo() def getMusicInfoTag(self): return InfoTagMusic() def getTotalTime(self): return float() def getTime(self): return float() def seekTime(self, pTime): pass def setSubtitles(self, subtitleFile): pass def getSubtitles(self): return str() def disableSubtitles(self): pass
Apache License 2.0
jhorey/ferry
ferry/fabric/openstack/singlelauncher.py
SingleLauncher._create_instance
python
def _create_instance(self, name, image, size, manage_network, sec_group): plan = { name : { "Type" : "OS::Nova::Server", "Properties" : { "name" : name, "image" : image, "key_name" : self.ssh_key, "flavor" : size, "availability_zone" : self.default_zone, "networks" : []}}} desc = { name : { "type" : "OS::Nova::Server", "name" : name, "ports" : [], "volumes" : [] }} port_descs = [] port_name = "ferry-port-%s" % name port_descs.append(self._create_port(port_name, manage_network, sec_group, ref=False)) plan[name]["Properties"]["networks"].append({ "port" : { "Ref" : port_name }, "network" : manage_network}) desc[name]["ports"].append(port_name) desc[port_name] = { "type" : "OS::Neutron::Port", "role" : "manage" } for d in port_descs: plan = dict(plan.items() + d.items()) user_data = self._create_server_init() plan[name]["Properties"]["user_data"] = user_data return plan, desc
Create a new instance
https://github.com/jhorey/ferry/blob/bbaa047df08386e17130a939e20fde5e840d1ffa/ferry/fabric/openstack/singlelauncher.py#L376-L410
import copy import ferry.install from ferry.install import Installer from ferry.config.system.info import System from heatclient import client as heat_client from heatclient.exc import HTTPUnauthorized, HTTPNotFound, HTTPBadRequest import json import logging import math from neutronclient.neutron import client as neutron_client from novaclient import client as nova_client import os from pymongo import MongoClient import sys import time import uuid import yaml class SingleLauncher(object): def __init__(self, controller): self.name = "OpenStack launcher" self.docker_registry = None self.docker_user = None self.heat_server = None self.openstack_key = None self.system = System() self.installer = Installer() self.controller = controller self._init_open_stack() self._init_app_db() def support_proxy(self): return True def _init_app_db(self): self.mongo = MongoClient(os.environ['MONGODB'], 27017, connectTimeoutMS=6000) self.apps = self.mongo['cloud']['openstack'] def _init_open_stack(self): conf = ferry.install.read_ferry_config() self.data_device = conf['system']['network'] provider = conf['system']['provider'] params = conf[provider]['params'] self.default_dc = params['dc'] self.default_zone = params['zone'] if self._check_openstack_credentials(): self.openstack_user = os.environ['OS_USERNAME'] self.openstack_pass = os.environ['OS_PASSWORD'] self.tenant_id = os.environ['OS_TENANT_ID'] self.tenant_name = os.environ['OS_TENANT_NAME'] else: logging.error("Missing OpenStack credentials") raise ValueError("Missing OpenStack credentials") servers = conf[provider][self.default_dc] self.manage_network = servers['network'] self.external_network = servers['extnet'] self.region = servers['region'] self.keystone_server = servers['keystone'] self.nova_server = servers['nova'] self.neutron_server = servers['neutron'] self.heatuuid = None if 'HEAT_URL' in os.environ: self.heat_server = os.environ['HEAT_URL'] elif 'heat' in servers: self.heat_server = servers['heat'] else: self.heat_server = self._check_and_start_heat(self.tenant_id) logging.warning("using heat server " + str(self.heat_server)) deploy = conf[provider]['deploy'] self.default_image = deploy['image'] self.default_personality = deploy['personality'] self.default_user = deploy['default-user'] self.ssh_key = deploy['ssh'] self.ssh_user = deploy['ssh-user'] keypath = self._get_host_key() if not os.path.exists(keypath): logging.error("could not find ssh key (%s)" % self.ssh_key) raise ValueError("Missing ssh keys") self._init_openstack_clients() self._collect_subnet_info() def _get_host_key(self): p = self.ssh_key.split("/") if len(p) == 1: return "/ferry/keys/" + self.ssh_key + ".pem" else: return self.ssh_key + ".pem" def _check_and_start_heat(self, tenant_id): logging.info("Check for Heat image") self.installer._check_and_pull_image("ferry/heatserver") heatlogs = ferry.install.DOCKER_DIR + "/heatlog" try: if not os.path.isdir(heatlogs): os.makedirs(heatlogs) self.installer._change_permission(heatlogs) except OSError as e: logging.error(e.strerror) sys.exit(1) volumes = { heatlogs : "/var/log/heat" } heatplan = {'image':'ferry/heatserver', 'type':'ferry/heatserver', 'keydir': {}, 'keyname': None, 'privatekey': None, 'volumes':volumes, 'volume_user':ferry.install.DEFAULT_FERRY_OWNER, 'ports':[], 'exposed':["8004","8000"], 'internal':[], 'hostname':'heatserver', 'netenable':True, 'default_cmd' : '', 'args': 'trust' } self.heatuuid = 'fht-' + str(uuid.uuid4()).split('-')[0] self.heatbox = self.installer.fabric.alloc(self.heatuuid, self.heatuuid, [heatplan], "HEAT")[0] if not self.heatbox: logging.error("Could not start Heat server") sys.exit(1) else: return "http://%s:8004/v1/%s" % (str(self.heatbox.internal_ip), tenant_id) def _check_openstack_credentials(self): envs = ['OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_ID', 'OS_TENANT_NAME'] for e in envs: if not e in os.environ: return False return True def _init_openstack_clients(self): if 'HEAT_API_VERSION' in os.environ: heat_api_version = os.environ['HEAT_API_VERSION'] else: heat_api_version = '1' kwargs = { 'username' : self.openstack_user, 'password' : self.openstack_pass, 'include_pass' : True, 'tenant_id': self.tenant_id, 'tenant_name': self.tenant_name, 'auth_url' : self.keystone_server } self.heat = heat_client.Client(heat_api_version, self.heat_server, **kwargs) for i in range(0, 10): try: stacks = self.heat.stacks.list() for s in stacks: logging.warning("found Heat stack: " + str(s)) connected = True break except: time.sleep(12) connected = False if not connected: raise ValueError("Could not connect to Heat") neutron_api_version = "2.0" kwargs['endpoint_url'] = self.neutron_server self.neutron = neutron_client.Client(neutron_api_version, **kwargs) nova_api_version = "1.1" kwargs = { 'username' : self.openstack_user, 'api_key' : self.openstack_pass, 'tenant_id': self.tenant_id, 'auth_url' : self.keystone_server, 'service_type' : 'compute', 'region_name' : self.region } self.nova = nova_client.Client(nova_api_version, **kwargs) def _create_floating_ip(self, name, port): plan = { name : { "Type": "OS::Neutron::FloatingIP", "Properties": { "floating_network_id": self.external_network }}, name + "_assoc" : { "Type": "OS::Neutron::FloatingIPAssociation", "Properties": { "floatingip_id": { "Ref" : name }, "port_id": { "Ref" : port }}}} desc = { "type" : "OS::Neutron::FloatingIP" } return plan, desc def _create_security_group(self, group_name, ports, internal): desc = { group_name : { "Type" : "OS::Neutron::SecurityGroup", "Properties" : { "name" : group_name, "description" : "Ferry firewall rules", "rules" : [ { "protocol" : "icmp", "remote_ip_prefix": "0.0.0.0/0" }, { "protocol" : "tcp", "remote_ip_prefix": "0.0.0.0/0", "port_range_min" : 22, "port_range_max" : 22 }]}}} for p in ports: min_port = p[0] max_port = p[1] desc[group_name]["Properties"]["rules"].append({ "protocol" : "tcp", "remote_ip_prefix": "0.0.0.0/0", "port_range_min" : min_port, "port_range_max" : max_port }) for p in internal: min_port = p[0] max_port = p[1] desc[group_name]["Properties"]["rules"].append({ "protocol" : "tcp", "remote_ip_prefix": self.subnet["cidr"], "port_range_min" : min_port, "port_range_max" : max_port }) return desc def _create_storage_volume(self, volume_name, server_name, size_gb): desc = { volume_name : { "Type" : "OS::Cinder::Volume", "Properties": { "size" : size_db, "availability_zone": self.default_zone }}, volume_name + "_attachment" : { "Type" : "OS::Cinder::VolumeAttachment", "Properties": { "volume_id" : { "Ref" : volume_name }, "instance_uuid": { "Ref" : server_name }, "mount_point": "/dev/vdc" }}} return desc def _create_port(self, name, network, sec_group, ref=True): desc = { name : { "Type" : "OS::Neutron::Port", "Properties" : { "name" : name, "security_groups" : [{ "Ref" : sec_group }]}}} if ref: desc[name]["Properties"]["network"] = { "Ref" : network } else: desc[name]["Properties"]["network"] = network return desc def _create_server_init(self): user_data = { "Fn::Base64": { "Fn::Join": [ "", [ "#!/bin/bash -v\n", "umount /mnt\n", "parted --script /dev/vdb mklabel gpt\n", "parted --script /dev/vdb mkpart primary xfs 0% 100%\n", "mkfs.xfs /dev/vdb1\n", "mkdir /ferry/data\n", "mkdir /ferry/keys\n", "mkdir /ferry/containers\n", "mount -o noatime /dev/vdb1 /ferry/data\n", "export FERRY_SCRATCH=/ferry/data\n", "export FERRY_DIR=/ferry/master\n", "echo export FERRY_SCRATCH=/ferry/data >> /etc/profile\n", "echo export FERRY_DIR=/ferry/master >> /etc/profile\n", "export HOME=/root\n", "export USER=root\n", "mkdir /home/ferry/.ssh\n", "cp /home/%s/.ssh/authorized_keys /home/ferry/.ssh/\n" % self.default_user, "cp /home/%s/.ssh/authorized_keys /root/.ssh/\n" % self.default_user, "chown -R ferry:ferry /home/ferry/.ssh\n", "chown -R ferry:ferry /ferry/data\n", "chown -R ferry:ferry /ferry/keys\n", "chown -R ferry:ferry /ferry/containers\n", "ferry server -n\n", "sleep 3\n" ] ] }} return user_data def _create_volume_attachment(self, iface, instance, volume_id): plan = { iface: { "Type": "OS::Cinder::VolumeAttachment", "Properties": { "instance_uuid": { "Ref" : instance }, "mountpoint": "/dev/vdc", "volume_id": volume_id}}} desc = { "type" : "OS::Cinder::VolumeAttachment" } return plan, desc
Apache License 2.0
twisted/imaginary
ExampleGame/examplegame/glass.py
_CantReachThroughGlassBox.tellMeWhyNot
python
def tellMeWhyNot(self): return "You can't reach through the glass box."
Return a simple message explaining that the user can't reach through the glass box.
https://github.com/twisted/imaginary/blob/a162488a3166baf19f9ed3ae3b98afa18b8f294d/ExampleGame/examplegame/glass.py#L24-L29
from zope.interface import implements from axiom.item import Item from axiom.attributes import reference from imaginary.iimaginary import ( ILinkContributor, IWhyNot, IObstruction, IContainer) from imaginary.enhancement import Enhancement from imaginary.objects import ContainmentRelationship from imaginary.idea import Link class _CantReachThroughGlassBox(object): implements(IWhyNot)
MIT License
danbradham/mvp
mvp/integration.py
Integration.on_enable
python
def on_enable(self): return True
Return True to enable integration and False to disable
https://github.com/danbradham/mvp/blob/bb9d0da6d0bdb2a42d91d5a850739ebc2355e54f/mvp/integration.py#L49-L52
class Integration(object): name = None description = None icon = None banner = None requires_confirmation = False enabled_by_default = False columns = 1 def __init__(self): self.set_enabled(self.enabled_by_default) def fields(self): return NotImplemented def on_filename_changed(self, form, value): return NotImplemented def set_enabled(self, value): if value: return self._on_enable() else: return self._on_disable() def _on_enable(self): self.enabled = self.on_enable() return self.enabled
MIT License
gorgitko/molminer
molminer/OPSIN.py
OPSIN.set_options
python
def set_options(self, options: dict): _, self.options, self.options_internal = self.build_commands(options, self._OPTIONS_REAL, self.path_to_binary)
Sets the options passed in dict. Keys are the same as optional parameters in OPSIN constructor (__init__()). Parameters ---------- options Dict of new options.
https://github.com/gorgitko/molminer/blob/b43b05d947571c1273d0da10bf215ae920ab45f6/molminer/OPSIN.py#L120-L130
from .AbstractLinker import AbstractLinker from .utils import common_subprocess, dict_to_csv, write_empty_file, eprint from rdkit.Chem import MolFromSmiles, MolToSmiles, MolFromInchi, MolToInchi, InchiToInchiKey, SDWriter, MolToMolBlock from molvs import Standardizer from collections import OrderedDict import logging from typing import Union import re import os logging.basicConfig(format="[%(levelname)s - %(filename)s:%(funcName)s:%(lineno)s] %(message)s") verbosity_levels = { 0: 100, 1: logging.WARNING, 2: logging.INFO } class OPSIN(AbstractLinker): _OPTIONS_REAL = { "allow_acids_without_acid": ("--allowAcidsWithoutAcid", ""), "detailed_failure_analysis": ("--detailedFailureAnalysis", ""), "output_format": ("--output", "{}"), "allow_radicals": ("--allowRadicals", ""), "allow_uninterpretable_stereo": ("--allowUninterpretableStereo", ""), "opsin_verbose": ("--verbose", ""), "wildcard_radicals": ("--wildcardRadicals", ""), } PLURAL_PATTERN = re.compile(r"(nitrate|bromide|chloride|iodide|amine|ketoxime|ketone|oxime)s", flags=re.IGNORECASE) logger = logging.getLogger("opsin") def __init__(self, path_to_binary: str = "opsin", allow_acids_without_acid: bool = True, detailed_failure_analysis: bool = True, output_format: str = "smi", allow_radicals: bool = True, allow_uninterpretable_stereo: bool = True, opsin_verbose: bool = False, wildcard_radicals: bool = False, plural_pattern: str = None, verbosity: int = 1): if verbosity > 2: verbosity = 2 elif verbosity not in verbosity_levels: verbosity = 1 self.logger.setLevel(verbosity_levels[verbosity]) self.path_to_binary = path_to_binary _, self.options, self.options_internal = self.build_commands(locals(), self._OPTIONS_REAL, path_to_binary) if plural_pattern: self.plural_pattern = re.compile(plural_pattern, flags=re.IGNORECASE) else: self.plural_pattern = self.PLURAL_PATTERN
MIT License
sebcagnon/pygoogleform
PyGoogleForm/FormQuestion.py
GFQuestion.answerQuestion
python
def answerQuestion(self, answer): answers = self._getChoices() if self.type == "ss-checkbox": if not isinstance(answer, collections.Sequence): raise ValueError("answer should be string/unicode or list of strings/unicode") error = None if isinstance(answer, basestring) and answer in answers: self._answer = [answer] elif isinstance(answer, collections.Sequence): self._answer = [] for ans in answer: if ans in answers: self._answer.append(ans) else: error = ans break else: error = answer if error is not None: errorMessage = 'Answer "{}" is not a posible answer. Possible answers are:\n\t'.format(error) errorMessage += '\n\t'.join(answers) raise ValueError(errorMessage) else: if not isinstance(answer, basestring): raise ValueError("answer should be string or unicode") if answers == "" or answer in answers: self._answer = answer else: errorMessage = 'Answer "{}" is not a posible answer. Possible answers are:\n\t'.format(answer) errorMessage += '\n\t'.join(answers) raise ValueError(errorMessage)
Save the answer to the question
https://github.com/sebcagnon/pygoogleform/blob/62054749b65613aa6e74d67e0512f5a7699701be/PyGoogleForm/FormQuestion.py#L59-L90
import collections class GFQuestion(object): SUPPORTED_TYPES = ["ss-radio", "ss-select", "ss-checkbox", "ss-text", "ss-paragraph-text"] def __init__(self, questionSoup): self.soup = questionSoup qClasses = self.soup.div["class"] for qClass in qClasses: if qClass in self.SUPPORTED_TYPES: self.type = qClass break self.id = None self.label = self.soup.label.div.text.strip() self._answer = None def getType(self): return self.type def getID(self): if self.id is None: if self.type in ["ss-radio", "ss-text", "ss-checkbox"]: self.id = self.soup.input["name"] elif self.type == "ss-select": self.id = self.soup.find("select")["name"] elif self.type == "ss-paragraph-text": self.id = self.soup.textarea["name"] return self.id def _getChoices(self): choices = None if self.type in ["ss-radio", "ss-checkbox"]: choices = [item.input["value"] for item in self.soup.findChildren("li")] elif self.type in ["ss-text", "ss-paragraph-text"]: choices = "" elif self.type == "ss-select": choices = [item.text for item in self.soup.findChildren("option") if item.text!=""] return choices def getQuestionAsList(self): retValue = [self.id, self.type, self.label, self._getChoices()] return retValue
MIT License