repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
sametmax/django--an-app-at-a-time
ignore_this_directory/django/contrib/postgres/utils.py
prefix_validation_error
python
def prefix_validation_error(error, prefix, code, params): if error.error_list == [error]: error_params = error.params or {} return ValidationError( message=format_lazy( '{} {}', SimpleLazyObject(lambda: prefix % params), SimpleLazyObject(lambda: error.message % error_params), ), code=code, params={**error_params, **params}, ) return ValidationError([ prefix_validation_error(e, prefix, code, params) for e in error.error_list ])
Prefix a validation error message while maintaining the existing validation data structure.
https://github.com/sametmax/django--an-app-at-a-time/blob/99eddf12ead76e6dfbeb09ce0bae61e282e22f8a/ignore_this_directory/django/contrib/postgres/utils.py#L6-L29
from django.core.exceptions import ValidationError from django.utils.functional import SimpleLazyObject from django.utils.text import format_lazy
MIT License
frostming/atoml
atoml/api.py
document
python
def document() -> TOMLDocument: return TOMLDocument()
Returns a new TOMLDocument instance.
https://github.com/frostming/atoml/blob/6d976f7f9c62f36b1f5934b800daf0702d786173/atoml/api.py#L71-L75
import datetime as _datetime from collections.abc import Mapping from typing import IO, Tuple from ._utils import parse_rfc3339 from .container import Container from .items import ( AoT, Array, Bool, Comment, Date, DateTime, Float, InlineTable, Integer, ) from .items import Item as _Item from .items import Key, String, Table, Time, Trivia, Whitespace, item from .parser import Parser from .toml_document import TOMLDocument def loads(string: str) -> TOMLDocument: return parse(string) def dumps(data: Mapping, sort_keys: bool = False) -> str: if not isinstance(data, Container) and isinstance(data, Mapping): data = item(dict(data), _sort_keys=sort_keys) try: return data.as_string() except AttributeError as ex: msg = f"Expecting Mapping or TOML Container, {type(data)} given" raise TypeError(msg) from ex def load(fp: IO) -> TOMLDocument: return parse(fp.read()) def dump(data: Mapping, fp: IO[str], *, sort_keys: bool = False) -> None: fp.write(dumps(data, sort_keys=sort_keys)) def parse(string: str) -> TOMLDocument: return Parser(string).parse()
MIT License
xiaobai1217/mbmd
lib/object_detection/evaluator.py
_extract_prediction_tensors
python
def _extract_prediction_tensors(model, create_input_dict_fn, ignore_groundtruth=False): input_dict = create_input_dict_fn() prefetch_queue = prefetcher.prefetch(input_dict, capacity=500) input_dict = prefetch_queue.dequeue() original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0) preprocessed_image = model.preprocess(tf.to_float(original_image)) prediction_dict = model.predict(preprocessed_image) detections = model.postprocess(prediction_dict) original_image_shape = tf.shape(original_image) absolute_detection_boxlist = box_list_ops.to_absolute_coordinates( box_list.BoxList(tf.squeeze(detections['detection_boxes'], axis=0)), original_image_shape[1], original_image_shape[2]) label_id_offset = 1 tensor_dict = { 'original_image': original_image, 'image_id': input_dict[fields.InputDataFields.source_id], 'detection_boxes': absolute_detection_boxlist.get(), 'detection_scores': tf.squeeze(detections['detection_scores'], axis=0), 'detection_classes': ( tf.squeeze(detections['detection_classes'], axis=0) + label_id_offset), } if 'detection_masks' in detections: detection_masks = tf.squeeze(detections['detection_masks'], axis=0) detection_boxes = tf.squeeze(detections['detection_boxes'], axis=0) detection_masks_reframed = ops.reframe_box_masks_to_image_masks( detection_masks, detection_boxes, original_image_shape[1], original_image_shape[2]) detection_masks_reframed = tf.to_float(tf.greater(detection_masks_reframed, 0.5)) tensor_dict['detection_masks'] = detection_masks_reframed if not ignore_groundtruth: normalized_gt_boxlist = box_list.BoxList( input_dict[fields.InputDataFields.groundtruth_boxes]) gt_boxlist = box_list_ops.scale(normalized_gt_boxlist, tf.shape(original_image)[1], tf.shape(original_image)[2]) groundtruth_boxes = gt_boxlist.get() groundtruth_classes = input_dict[fields.InputDataFields.groundtruth_classes] tensor_dict['groundtruth_boxes'] = groundtruth_boxes tensor_dict['groundtruth_classes'] = groundtruth_classes tensor_dict['area'] = input_dict[fields.InputDataFields.groundtruth_area] tensor_dict['is_crowd'] = input_dict[ fields.InputDataFields.groundtruth_is_crowd] tensor_dict['difficult'] = input_dict[ fields.InputDataFields.groundtruth_difficult] if 'detection_masks' in tensor_dict: tensor_dict['groundtruth_instance_masks'] = input_dict[ fields.InputDataFields.groundtruth_instance_masks] return tensor_dict
Restores the model in a tensorflow session. Args: model: model to perform predictions with. create_input_dict_fn: function to create input tensor dictionaries. ignore_groundtruth: whether groundtruth should be ignored. Returns: tensor_dict: A tensor dictionary with evaluations.
https://github.com/xiaobai1217/mbmd/blob/246f3434bccb9c8357e0f698995b659578bf1afb/lib/object_detection/evaluator.py#L38-L106
import logging import tensorflow as tf from object_detection import eval_util from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import prefetcher from object_detection.core import standard_fields as fields from object_detection.utils import ops slim = tf.contrib.slim EVAL_METRICS_FN_DICT = { 'pascal_voc_metrics': eval_util.evaluate_detection_results_pascal_voc }
MIT License
ucfopen/canvasapi
canvasapi/account.py
Account.add_authentication_providers
python
def add_authentication_providers(self, **kwargs): from canvasapi.authentication_provider import AuthenticationProvider response = self._requester.request( "POST", "accounts/{}/authentication_providers".format(self.id), _kwargs=combine_kwargs(**kwargs), ) authentication_providers_json = response.json() authentication_providers_json.update({"account_id": self.id}) return AuthenticationProvider(self._requester, authentication_providers_json)
Add external authentication providers for the account :calls: `POST /api/v1/accounts/:account_id/authentication_providers \ <https://canvas.instructure.com/doc/api/authentication_providers.html#method.account_authorization_configs.create>`_ :rtype: :class:`canvasapi.authentication_provider.AuthenticationProvider`
https://github.com/ucfopen/canvasapi/blob/2ac9979d17979932a3f43eb8737b7648566c1c68/canvasapi/account.py#L56-L75
from canvasapi.canvas_object import CanvasObject from canvasapi.exceptions import CanvasException, RequiredFieldMissing from canvasapi.feature import Feature, FeatureFlag from canvasapi.grading_period import GradingPeriod from canvasapi.grading_standard import GradingStandard from canvasapi.outcome_import import OutcomeImport from canvasapi.paginated_list import PaginatedList from canvasapi.rubric import Rubric from canvasapi.sis_import import SisImport from canvasapi.util import combine_kwargs, file_or_path, obj_or_id, obj_or_str class Account(CanvasObject): def __str__(self): return "{} ({})".format(self.name, self.id) def abort_sis_imports_pending(self, **kwargs): response = self._requester.request( "PUT", "accounts/{}/sis_imports/abort_all_pending".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json().get("aborted", False) def activate_role(self, role, **kwargs): role_id = obj_or_id(role, "role", (Role,)) response = self._requester.request( "POST", "accounts/{}/roles/{}/activate".format(self.id, role_id), _kwargs=combine_kwargs(**kwargs), ) return Role(self._requester, response.json())
MIT License
openai/mujoco-worldgen
mujoco_worldgen/parser/parser.py
preprocess
python
def preprocess(xml_dict, root_xml_path, enforce_validation=True): normalize(xml_dict) set_absolute_paths(xml_dict, root_xml_path) extract_includes(xml_dict, root_xml_path, enforce_validation=enforce_validation) if enforce_validation: validate(xml_dict)
All the steps to turn XML into Worldgen readable form: - normalize: changes strings to floats / vectors / bools, and turns consistently nodes to OrderedDict and List - name_meshes: some meshes are missing names. Here we give default names. - rename_defaults: some defaults are global, we give them names so they won't be anymore. - extract_includes: recursively, we extract includes and merge them. - validate: we apply few final checks on the structure.
https://github.com/openai/mujoco-worldgen/blob/39f52b1b47aed499925a6a214b58bdbdb4e2f75e/mujoco_worldgen/parser/parser.py#L62-L77
from collections import OrderedDict from decimal import getcontext from os.path import abspath, dirname, join, exists from mujoco_worldgen.transforms import closure_transform import numpy as np import xmltodict import os from mujoco_worldgen.util.types import accepts, returns from mujoco_worldgen.util.path import worldgen_path from mujoco_worldgen.parser.normalize import normalize, stringify getcontext().prec = 4 @accepts(str, bool) @returns(OrderedDict) def parse_file(xml_path, enforce_validation=True): with open(xml_path) as f: xml_string = f.read() xml_doc_dict = xmltodict.parse(xml_string.strip()) assert 'mujoco' in xml_doc_dict, "XML must contain <mujoco> node" xml_dict = xml_doc_dict['mujoco'] assert isinstance(xml_dict, OrderedDict), "Invalid node type {}".format(type(xml_dict)) preprocess(xml_dict, xml_path, enforce_validation=enforce_validation) return xml_dict @accepts(OrderedDict) @returns(str) def unparse_dict(xml_dict): stringify(xml_dict) xml_doc_dict = OrderedDict(mujoco=xml_dict) return xmltodict.unparse(xml_doc_dict, pretty=True) @accepts(OrderedDict, str, bool)
MIT License
openstate-sdn/ryu
ryu/lib/bfdlib.py
BFDSession.__init__
python
def __init__(self, app, my_discr, dpid, ofport, src_mac, src_ip, src_port, dst_mac="FF:FF:FF:FF:FF:FF", dst_ip="255.255.255.255", detect_mult=3, desired_min_tx_interval=1000000, required_min_rx_interval=1000000, auth_type=0, auth_keys={}): assert not (auth_type and len(auth_keys) == 0) self.app = app self._session_state = bfd.BFD_STATE_DOWN self._remote_session_state = bfd.BFD_STATE_DOWN self._local_discr = my_discr self._remote_discr = 0 self._local_diag = 0 self._desired_min_tx_interval = 1000000 self._required_min_rx_interval = required_min_rx_interval self._remote_min_rx_interval = -1 self._demand_mode = 0 self._remote_demand_mode = 0 self._detect_mult = detect_mult self._auth_type = auth_type self._auth_keys = auth_keys if self._auth_type in [bfd.BFD_AUTH_KEYED_MD5, bfd.BFD_AUTH_METICULOUS_KEYED_MD5, bfd.BFD_AUTH_KEYED_SHA1, bfd.BFD_AUTH_METICULOUS_KEYED_SHA1]: self._rcv_auth_seq = 0 self._xmit_auth_seq = random.randint(0, UINT32_MAX) self._auth_seq_known = 0 self._cfg_desired_min_tx_interval = desired_min_tx_interval self._cfg_required_min_echo_rx_interval = 0 self._active_role = True self._detect_time = 0 self._xmit_period = None self._update_xmit_period() self._is_polling = True self._pending_final = False self._enable_send = True self._lock = None self.src_mac = src_mac self.dst_mac = dst_mac self.src_ip = src_ip self.dst_ip = dst_ip self.ipv4_id = random.randint(0, UINT16_MAX) self.src_port = src_port self.dst_port = BFD_CONTROL_UDP_PORT if dst_mac == "FF:FF:FF:FF:FF:FF" or dst_ip == "255.255.255.255": self._remote_addr_config = False else: self._remote_addr_config = True self.dpid = dpid self.datapath = None self.ofport = ofport hub.spawn(self._send_loop) LOG.info("[BFD][%s][INIT] BFD Session initialized.", hex(self._local_discr))
Initialize a BFD session. __init__ takes the corresponding args in this order. .. tabularcolumns:: |l|L| ========================= ============================================ Argument Description ========================= ============================================ app The instance of BFDLib. my_discr My Discriminator. dpid Datapath ID of the BFD interface. ofport Openflow port number of the BFD interface. src_mac Source MAC address of the BFD interface. src_ip Source IPv4 address of the BFD interface. dst_mac (Optional) Destination MAC address of the BFD interface. dst_ip (Optional) Destination IPv4 address of the BFD interface. detect_mult (Optional) Detection time multiplier. desired_min_tx_interval (Optional) Desired Min TX Interval. (in microseconds) required_min_rx_interval (Optional) Required Min RX Interval. (in microseconds) auth_type (Optional) Authentication type. auth_keys (Optional) A dictionary of authentication key chain which key is an integer of *Auth Key ID* and value is a string of *Password* or *Auth Key*. ========================= ============================================ Example:: sess = BFDSession(app=self.bfdlib, my_discr=1, dpid=1, ofport=1, src_mac="01:23:45:67:89:AB", src_ip="192.168.1.1", dst_mac="12:34:56:78:9A:BC", dst_ip="192.168.1.2", detect_mult=3, desired_min_tx_interval=1000000, required_min_rx_interval=1000000, auth_type=bfd.BFD_AUTH_KEYED_SHA1, auth_keys={1: "secret key 1", 2: "secret key 2"})
https://github.com/openstate-sdn/ryu/blob/b4a7f6c3615a934eaf42894bcb1cc809fce96e93/ryu/lib/bfdlib.py#L75-L198
import logging import six import time import random from ryu.base import app_manager from ryu.controller import event from ryu.controller import ofp_event from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER from ryu.controller.handler import set_ev_cls from ryu.exception import RyuException from ryu.ofproto.ether import ETH_TYPE_IP, ETH_TYPE_ARP from ryu.ofproto import ofproto_v1_3 from ryu.ofproto import inet from ryu.lib import ofctl_v1_3 from ryu.lib import hub from ryu.lib.packet import packet from ryu.lib.packet import ethernet from ryu.lib.packet import ipv4 from ryu.lib.packet import udp from ryu.lib.packet import bfd from ryu.lib.packet import arp from ryu.lib.packet.arp import ARP_REQUEST, ARP_REPLY LOG = logging.getLogger(__name__) UINT16_MAX = (1 << 16) - 1 UINT32_MAX = (1 << 32) - 1 BFD_CONTROL_UDP_PORT = 3784 BFD_ECHO_UDP_PORT = 3785 class BFDSession(object):
Apache License 2.0
autonomousvision/neat
neat/utils.py
flow_uv_to_colors
python
def flow_uv_to_colors(u, v, convert_to_bgr=False): flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) colorwheel = make_colorwheel() ncols = colorwheel.shape[0] rad = np.sqrt(np.square(u) + np.square(v)) a = np.arctan2(-v, -u)/np.pi fk = (a+1) / 2*(ncols-1) k0 = np.floor(fk).astype(np.int32) k1 = k0 + 1 k1[k1 == ncols] = 0 f = fk - k0 for i in range(colorwheel.shape[1]): tmp = colorwheel[:,i] col0 = tmp[k0] / 255.0 col1 = tmp[k1] / 255.0 col = (1-f)*col0 + f*col1 idx = (rad <= 1) col[idx] = 1 - rad[idx] * (1-col[idx]) col[~idx] = col[~idx] * 0.75 ch_idx = 2-i if convert_to_bgr else i flow_image[:,:,ch_idx] = np.floor(255 * col) return flow_image
Applies the flow color wheel to (possibly clipped) flow components u and v. According to the C++ source code of Daniel Scharstein According to the Matlab source code of Deqing Sun Args: u (np.ndarray): Input horizontal flow of shape [H,W] v (np.ndarray): Input vertical flow of shape [H,W] convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. Returns: np.ndarray: Flow visualization image of shape [H,W,3]
https://github.com/autonomousvision/neat/blob/686f4a0b5b5bf20c99f323e9542f5b68808df2de/neat/utils.py#L54-L87
import math import numpy as np import torch def make_colorwheel(): RY = 15 YG = 6 GC = 4 CB = 11 BM = 13 MR = 6 ncols = RY + YG + GC + CB + BM + MR colorwheel = np.zeros((ncols, 3)) col = 0 colorwheel[0:RY, 0] = 255 colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY) col = col+RY colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG) colorwheel[col:col+YG, 1] = 255 col = col+YG colorwheel[col:col+GC, 1] = 255 colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC) col = col+GC colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB) colorwheel[col:col+CB, 2] = 255 col = col+CB colorwheel[col:col+BM, 2] = 255 colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM) col = col+BM colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR) colorwheel[col:col+MR, 0] = 255 return colorwheel
MIT License
jfilak/sapcli
sap/adt/annotations.py
xml_attribute
python
def xml_attribute(name, deserialize=True, version=None): def decorator(meth): return XmlAttributeProperty(name, meth, deserialize=deserialize, version=version) return decorator
Mark the given property as a XML element attribute of the given name
https://github.com/jfilak/sapcli/blob/072ab1b8d58ea58e4f4bd67fc4f349a6b0b52fac/sap/adt/annotations.py#L255-L263
from enum import Enum import collections def _make_attr_name_for_version(element_name, version): def format_name(version, suffix): return f'{version}_{suffix}' name = f'_{element_name}'.replace(':', '_') if version is None: return name if isinstance(version, str): return format_name(name, version) if isinstance(version, (list, set)): return format_name(name, '_'.join(version)) raise TypeError(f'Version cannot be of the type {type(version).__name__}') class OrderedClassMembers(type): @classmethod def __prepare__(mcs, name, bases): return collections.OrderedDict() def __new__(mcs, name, bases, classdict): members = [] if bases: parent = bases[-1] if hasattr(parent, '__ordered__'): members.extend(parent.__ordered__) members.extend([key for key in classdict.keys() if key not in ('__module__', '__qualname__')]) classdict['__ordered__'] = members return type.__new__(mcs, name, bases, classdict) class XmlElementKind(Enum): OBJECT = 1 TEXT = 2 class XmlAttributeProperty(property): def __init__(self, name, fget, fset=None, deserialize=True, version=None): super().__init__(fget, fset) self.name = name self.deserialize = deserialize self.version = version def setter(self, fset): return type(self)(self.name, self.fget, fset, deserialize=self.deserialize) class XmlElementProperty(property): NAME_FROM_OBJECT = None def __init__(self, name, fget, fset=None, deserialize=True, factory=None, kind=XmlElementKind.OBJECT, version=None): super().__init__(fget, fset) self.name = name self.deserialize = deserialize self.factory = factory self.kind = kind self.version = version def setter(self, fset): return type(self)(self.name, self.fget, fset, deserialize=self.deserialize, factory=self.factory, kind=self.kind) class XmlPropertyImpl: def __init__(self, name, default_value=None, version=None): self.attr = _make_attr_name_for_version(name, version) self.default_value = default_value def get(self, obj): try: return getattr(obj, self.attr) except AttributeError: return self.default_value def set(self, obj, value): obj.__dict__[self.attr] = value class XmlNodeProperty(XmlElementProperty, XmlPropertyImpl): def __init__(self, name, value=None, deserialize=True, factory=None, kind=XmlElementKind.OBJECT, version=None): super().__init__(name, self.get, fset=self.set, deserialize=deserialize, factory=factory, kind=kind, version=version) XmlPropertyImpl.__init__(self, name, default_value=value, version=version) def setter(self, fset): raise NotImplementedError() class XmlNodeAttributeProperty(XmlAttributeProperty, XmlPropertyImpl): def __init__(self, name, value=None, deserialize=True, version=None): super().__init__(name, self.get, fset=self.set, deserialize=deserialize, version=version) XmlPropertyImpl.__init__(self, name, default_value=value, version=version) def setter(self, fset): raise NotImplementedError() class XmlListNodeProperty(XmlElementProperty): def __init__(self, name, value=None, deserialize=True, factory=None, kind=XmlElementKind.OBJECT, version=None): super().__init__(name, self.get, fset=self.append, deserialize=deserialize, factory=factory, kind=kind, version=version) if value is not None and not isinstance(value, list): raise RuntimeError() self.attr = _make_attr_name_for_version(name, version) self.default_value = value def _get_list(self, obj): items = obj.__dict__.get(self.attr, None) if items is None: if self.default_value is not None: items = list(self.default_value) obj.__dict__[self.attr] = items return items def get(self, obj): try: return getattr(obj, self.attr) except AttributeError: return self._get_list(obj) def append(self, obj, value): items = self._get_list(obj) if items is None: items = [] obj.__dict__[self.attr] = items items.append(value) class XmlContainerMeta(OrderedClassMembers): def define(cls, item_element_name, item_factory, version=None): items_property = XmlListNodeProperty(item_element_name, deserialize=True, factory=item_factory, value=[], kind=XmlElementKind.OBJECT, version=version) return type(f'XMLContainer_{item_factory.__name__}', (cls,), dict(items=items_property)) class XmlContainer(metaclass=XmlContainerMeta): def append(self, value): self.items.append(value) def __iter__(self): return self.items.__iter__() def __getitem__(self, index): return self.items.__getitem__(index) def __len__(self): return self.items.__len__() def xml_text_node_property(name, value=None, deserialize=True, version=None): return XmlNodeProperty(name, value=value, deserialize=deserialize, factory=None, kind=XmlElementKind.TEXT, version=version)
Apache License 2.0
dmlc/gluon-cv
gluoncv/torch/data/detection/detection_dataset.py
build_detection_train_loader
python
def build_detection_train_loader(cfg, mapper=None): dataset_dicts = get_detection_dataset_dicts( cfg.CONFIG.DATA.DATASET.TRAIN, filter_empty=cfg.CONFIG.DATA.DETECTION.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.CONFIG.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.CONFIG.DATA.KEYPOINT_ON else 0, proposal_files=cfg.CONFIG.DATA.DATASET.PROPOSAL_FILES_TRAIN if cfg.CONFIG.DATA.LOAD_PROPOSALS else None, ) dataset = DatasetFromList(dataset_dicts, copy=False) if mapper is None: mapper = DatasetMapper.from_config(cfg, True) dataset = MapDataset(dataset, mapper) sampler_name = cfg.CONFIG.DATA.DETECTION.SAMPLER_TRAIN logger = logging.getLogger(__name__) logger.info("Using training sampler {}".format(sampler_name)) if sampler_name == "TrainingSampler": sampler = TrainingSampler(len(dataset)) elif sampler_name == "RepeatFactorTrainingSampler": repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( dataset_dicts, cfg.CONFIG.DATA.DETECTION.REPEAT_THRESHOLD ) sampler = RepeatFactorTrainingSampler(repeat_factors) else: raise ValueError("Unknown training sampler: {}".format(sampler_name)) return build_batch_data_loader( dataset, sampler, cfg.CONFIG.TRAIN.BATCH_SIZE, aspect_ratio_grouping=cfg.CONFIG.DATA.DETECTION.ASPECT_RATIO_GROUPING, num_workers=cfg.CONFIG.DATA.NUM_WORKERS, )
A data loader is created by the following steps: 1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts. 2. Coordinate a random shuffle order shared among all processes (all GPUs) 3. Each process spawn another few workers to process the dicts. Each worker will: * Map each metadata dict into another format to be consumed by the model. * Batch them by simply putting dicts into a list. The batched ``list[mapped_dict]`` is what this dataloader will yield. Args: cfg (CfgNode): the config mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. By default it will be `DatasetMapper.from_config(cfg, True)`. Returns: an infinite iterator of training data
https://github.com/dmlc/gluon-cv/blob/f22650a5d31c31956d9392530a0e619689cdb3c5/gluoncv/torch/data/detection/detection_dataset.py#L291-L345
import itertools import random import logging import operator import dill import pickle from pprint import pformat from typing import List, Optional, Union import numpy as np import torch import torch.utils.data as data from ..structures import BoxMode from ...utils.comm import get_world_size from ...utils.random import seed_all_rng from ..transforms import instance_transforms as T from ..registry.catalog import DatasetCatalog, MetadataCatalog from .detection_utils import check_metadata_consistency from . import detection_utils as utils from .samplers import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", "trivial_batch_collator", ] def filter_images_with_only_crowd_annotations(dataset_dicts): num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=np.int) for entry in dataset_dicts: annos = entry["annotations"] classes = [x["category_id"] for x in annos if not x.get("iscrowd", 0)] histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) logging.info("Distribution of instances among all {} categories:\n".format(num_classes)) logging.info(pformat(data)) def get_detection_dataset_dicts( dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None ): assert len(dataset_names) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names] for dataset_name, dicts in zip(dataset_names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(dataset_names) == len(proposal_files) dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if has_instances: try: class_names = MetadataCatalog.get(dataset_names[0]).thing_classes check_metadata_consistency("thing_classes", dataset_names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: pass return dataset_dicts def build_batch_data_loader( dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0 ): world_size = get_world_size() assert ( total_batch_size > 0 and total_batch_size % world_size == 0 ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( total_batch_size, world_size ) batch_size = total_batch_size // world_size if aspect_ratio_grouping: data_loader = torch.utils.data.DataLoader( dataset, sampler=sampler, num_workers=num_workers, batch_sampler=None, collate_fn=operator.itemgetter(0), worker_init_fn=worker_init_reset_seed, ) return AspectRatioGroupedDataset(data_loader, batch_size) else: batch_sampler = torch.utils.data.sampler.BatchSampler( sampler, batch_size, drop_last=True ) return torch.utils.data.DataLoader( dataset, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=trivial_batch_collator, worker_init_fn=worker_init_reset_seed, )
Apache License 2.0
hosford42/xcs
xcs/scenarios.py
Scenario.more
python
def more(self): raise NotImplementedError()
Return a Boolean indicating whether additional actions may be executed, per the reward program. Usage: while scenario.more(): situation = scenario.sense() selected_action = choice(possible_actions) reward = scenario.execute(selected_action) Arguments: None Return: A bool indicating whether additional situations remain in the current run.
https://github.com/hosford42/xcs/blob/183bdd0dd339e19ded3be202f86e1b38bdb9f1e5/xcs/scenarios.py#L183-L198
__author__ = 'Aaron Hosford' __all__ = [ 'HaystackProblem', 'MUXProblem', 'Scenario', 'ScenarioObserver', 'PreClassifiedData', 'UnclassifiedData', ] import logging import random from abc import ABCMeta, abstractmethod from . import numpy from . import bitstrings class Scenario(metaclass=ABCMeta): @property @abstractmethod def is_dynamic(self): raise NotImplementedError() @abstractmethod def get_possible_actions(self): raise NotImplementedError() @abstractmethod def reset(self): raise NotImplementedError() @abstractmethod def sense(self): raise NotImplementedError() @abstractmethod def execute(self, action): raise NotImplementedError() @abstractmethod
BSD 3-Clause New or Revised License
geier/pycarddav
pycarddav/__init__.py
Section._parse_write_support
python
def _parse_write_support(self, value): value = value.strip() if value == 'YesPleaseIDoHaveABackupOfMyData': return True else: return False
returns True if value is YesPlease..., this is a rather dirty solution, but it works fine (TM)
https://github.com/geier/pycarddav/blob/edc9150f4b21cf027f42e77c42a6030d091e0624/pycarddav/__init__.py#L151-L158
import argparse import ConfigParser import getpass import re import logging import os import signal import subprocess import sys import xdg.BaseDirectory import version from netrc import netrc from urlparse import urlsplit __productname__ = 'pyCardDAV' __version__ = version.__version__ __author__ = 'Christian Geier' __copyright__ = 'Copyright 2011-2013 Christian Geier & contributors' __author_email__ = 'pycarddav@lostpackets.de' __description__ = 'A CardDAV based address book tool' __license__ = 'Expat/MIT, see COPYING' __homepage__ = 'http://lostpackets.de/pycarddav/' def capture_user_interruption(): signal.signal(signal.SIGINT, lambda x, y: sys.exit(0)) class Namespace(dict): def __init__(self, obj=None): dict.__init__(self, obj if obj else {}) def __dir__(self): return list(self) def __repr__(self): return "%s(%s)" % (type(self).__name__, dict.__repr__(self)) def __getattribute__(self, name): try: return self[name] except KeyError: msg = "'%s' object has no attribute '%s'" raise AttributeError(msg % (type(self).__name__, name)) def __setattr__(self, name, value): self[name] = value def __delattr__(self, name): del self[name] class Section(object): READERS = {bool: ConfigParser.SafeConfigParser.getboolean, float: ConfigParser.SafeConfigParser.getfloat, int: ConfigParser.SafeConfigParser.getint, str: ConfigParser.SafeConfigParser.get} def __init__(self, parser, group): self._parser = parser self._group = group self._schema = None self._parsed = {} def matches(self, name): return self._group == name.lower() def is_collection(self): return False def parse(self, section): if self._schema is None: return None for option, default, filter_ in self._schema: try: if filter_ is None: reader = ConfigParser.SafeConfigParser.get filter_ = lambda x: x else: reader = Section.READERS[type(default)] self._parsed[option] = filter_(reader(self._parser, section, option)) self._parser.remove_option(section, option) except ConfigParser.Error: if filter_ is None: self._parsed[option] = default else: self._parsed[option] = filter_(default) return Namespace(self._parsed) @property def group(self): return self._group def _parse_verify(self, value): boolvalue = value.strip().lower() if boolvalue == 'true': return True elif boolvalue == 'false': return False else: return os.path.expanduser(value)
MIT License
styxit/htpc-manager
libs/cherrypy/test/modwsgi.py
ModWSGISupervisor.stop
python
def stop(self): read_process(APACHE_PATH, "-k stop")
Gracefully shutdown a server that is serving forever.
https://github.com/styxit/htpc-manager/blob/490697460b4fa1797106aece27d873bc256b2ff1/libs/cherrypy/test/modwsgi.py#L125-L127
import os curdir = os.path.abspath(os.path.dirname(__file__)) import re import sys import time import cherrypy from cherrypy.test import helper, webtest def read_process(cmd, args=""): pipein, pipeout = os.popen4("%s %s" % (cmd, args)) try: firstline = pipeout.readline() if (re.search(r"(not recognized|No such file|not found)", firstline, re.IGNORECASE)): raise IOError('%s must be on your system path.' % cmd) output = firstline + pipeout.read() finally: pipeout.close() return output if sys.platform == 'win32': APACHE_PATH = "httpd" else: APACHE_PATH = "apache" CONF_PATH = "test_mw.conf" conf_modwsgi = r""" # Apache2 server conf file for testing CherryPy with modpython_gateway. ServerName 127.0.0.1 DocumentRoot "/" Listen %(port)s AllowEncodedSlashes On LoadModule rewrite_module modules/mod_rewrite.so RewriteEngine on RewriteMap escaping int:escape LoadModule log_config_module modules/mod_log_config.so LogFormat "%%h %%l %%u %%t \"%%r\" %%>s %%b \"%%{Referer}i\" \"%%{User-agent}i\"" combined CustomLog "%(curdir)s/apache.access.log" combined ErrorLog "%(curdir)s/apache.error.log" LogLevel debug LoadModule wsgi_module modules/mod_wsgi.so LoadModule env_module modules/mod_env.so WSGIScriptAlias / "%(curdir)s/modwsgi.py" SetEnv testmod %(testmod)s """ class ModWSGISupervisor(helper.Supervisor): using_apache = True using_wsgi = True template=conf_modwsgi def __str__(self): return "ModWSGI Server on %s:%s" % (self.host, self.port) def start(self, modulename): mpconf = CONF_PATH if not os.path.isabs(mpconf): mpconf = os.path.join(curdir, mpconf) f = open(mpconf, 'wb') try: output = (self.template % {'port': self.port, 'testmod': modulename, 'curdir': curdir}) f.write(output) finally: f.close() result = read_process(APACHE_PATH, "-k start -f %s" % mpconf) if result: print(result) cherrypy._cpserver.wait_for_occupied_port("127.0.0.1", self.port) webtest.openURL('/ihopetheresnodefault', port=self.port) time.sleep(1)
MIT License
ucasir/nprf
preprocess/matrix.py
kernal_mus
python
def kernal_mus(n_kernels, use_exact): if use_exact: l_mu = [1] else: l_mu = [2] if n_kernels == 1: return l_mu bin_size = 2.0 / (n_kernels - 1) l_mu.append(1 - bin_size / 2) for i in xrange(1, n_kernels - 1): l_mu.append(l_mu[i] - bin_size) return l_mu
get the mu for each guassian kernel. Mu is the middle of each bin :param n_kernels: number of kernels (including exact match). first one is exact match :return: l_mu, a list of mu.
https://github.com/ucasir/nprf/blob/d385929b3249a003e017cba03b8669dc6a05037e/preprocess/matrix.py#L97-L114
import numpy as np from bs4 import BeautifulSoup from gensim.models.keyedvectors import KeyedVectors from gensim.matutils import unitvec, cossim def get_word_vec(word, embeddings, OOV_dict=None): vector_size = embeddings.vector_size try: vec = embeddings.word_vec(word) except KeyError: vec = OOV_dict.get(word) if vec is None: vec = np.random.uniform(low=-1., high=1., size=vector_size) OOV_dict.update({word: vec}) return vec def similarity_matrix(query, doc, embeddings, OOV_dict): vector_size = embeddings.vector_size q_mat = np.zeros((len(query), vector_size)) d_mat = np.zeros((vector_size, len(doc))) for i, word in enumerate(query): q_mat[i, :] = unitvec(get_word_vec(word, embeddings, OOV_dict)) for j, word in enumerate(doc): d_mat[:, j] = unitvec(get_word_vec(word, embeddings, OOV_dict)) similarity_matrix = np.dot(q_mat, d_mat) return similarity_matrix def hist_from_matrix(text_maxlen, hist_size, sim_mat): hist = np.zeros((text_maxlen, hist_size), dtype=np.float32) for (i, j), v in np.ndenumerate(sim_mat): if i >= text_maxlen: break vid = int((v + 1.) / 2. * (hist_size - 1.)) hist[i][vid] += 1 hist += 1 hist = np.log10(hist) return hist def kernel_from_matrix(sim_mat, mu_list, sigma_list, d2d=False): assert len(mu_list) == len(sigma_list) text1_len = sim_mat.shape[0] if d2d: kernel_feature = np.zeros((text1_len, len(mu_list)), dtype=np.float32) else: kernel_feature = np.zeros((len(mu_list),), dtype=np.float32) for i in range(len(mu_list)): mu = mu_list[i] sigma = sigma_list[i] tmp = np.exp(-np.square(sim_mat - mu) / (2 * np.square(sigma))) kde = np.sum(tmp, axis=1) kde = np.log(np.maximum(kde, 1e-10)) * 0.01 if d2d: kernel_feature[:, i] = kde else: kernel_feature[i] = (np.sum(kde)) return kernel_feature
Apache License 2.0
borda/pyimsegm
imsegm/utilities/data_samples.py
sample_segment_vertical_3d
python
def sample_segment_vertical_3d(seg_size=SAMPLE_SEG_SIZE_3D_SMALL, nb_labels=SAMPLE_SEG_NB_CLASSES, levels=2): seg = [] for lb in range(int(levels)): seg_2d = sample_segment_vertical_2d(seg_size[:2], nb_labels) for _ in range(int(seg_size[2] / levels)): seg.append(seg_2d.copy() + lb * nb_labels) seg = np.array(seg, dtype=np.int) return seg
create sample regular 3D segmentation :param tuple(int,int)|tuple(int,int,int) seg_size: :param int nb_labels: :param int levels: :return ndarray: >>> im = sample_segment_vertical_3d((10, 5, 6), 3) >>> im[:, :, 3] array([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [4, 4, 4, 4, 4], [4, 4, 4, 4, 4], [4, 4, 4, 4, 4]])
https://github.com/borda/pyimsegm/blob/7463cfc7aad8781564dc84c8780f291cc3c17fe3/imsegm/utilities/data_samples.py#L96-L119
import logging import os import numpy as np from imsegm.utilities.data_io import io_imread, update_path SAMPLE_SEG_SIZE_2D_SMALL = (20, 10) SAMPLE_SEG_SIZE_2D_NORM = (150, 100) SAMPLE_SEG_NB_CLASSES = 3 SAMPLE_SEG_SIZE_3D_SMALL = (10, 5, 6) PATH_IMAGES = update_path('data-images') IMAGE_LENNA = os.path.join('others', 'lena.png') IMAGE_OBJECTS = os.path.join('synthetic', 'reference.jpg') IMAGE_3CLS = os.path.join('synthetic', 'texture_rgb_3cls.jpg') IMAGE_STAR = os.path.join('others', 'sea_starfish-2.jpg') IMAGE_HISTOL_CIMA = os.path.join('histology_CIMA', '29-041-Izd2-w35-CD31-3-les1.jpg') IMAGE_HISTOL_FLAGSHIP = os.path.join('histology_Flagship', 'Case001_Cytokeratin.jpg') IMAGE_DROSOPHILA_DISC = os.path.join('drosophila_disc', 'image', 'img_6.jpg') ANNOT_DROSOPHILA_DISC = os.path.join('drosophila_disc', 'annot', 'img_6.png') IMAGE_DROSOPHILA_OVARY_2D = os.path.join('drosophila_ovary_slice', 'image', 'insitu7545.jpg') ANNOT_DROSOPHILA_OVARY_2D = os.path.join('drosophila_ovary_slice', 'annot_struct', 'insitu7545.png') IMAGE_DROSOPHILA_OVARY_3D = os.path.join('drosophila_ovary_3D', 'AU10-13_f0011.tif') IMAGE_LANGER_ISLET = os.path.join('langerhans_islets', 'image', 'gtExoIsl_21.jpg') LIST_ALL_IMAGES = [ IMAGE_LENNA, IMAGE_3CLS, IMAGE_OBJECTS, IMAGE_STAR, IMAGE_HISTOL_CIMA, IMAGE_HISTOL_FLAGSHIP, IMAGE_LANGER_ISLET, IMAGE_DROSOPHILA_DISC, ANNOT_DROSOPHILA_DISC, IMAGE_DROSOPHILA_OVARY_2D, ANNOT_DROSOPHILA_OVARY_2D, IMAGE_DROSOPHILA_OVARY_3D, ] def sample_segment_vertical_2d(seg_size=SAMPLE_SEG_SIZE_2D_SMALL, nb_labels=SAMPLE_SEG_NB_CLASSES): cls_vals = [] cls_size = (seg_size[1], int(seg_size[0] / nb_labels)) for lb in range(nb_labels): cls_vals.append(lb * np.ones(cls_size)) seg = np.hstack(tuple(cls_vals)) seg = np.array(seg, dtype=np.int) return seg
BSD 3-Clause New or Revised License
kippt/django-api-boilerplate
api_boilerplate/middleware.py
ApiHttpBasicAuthMiddleware.process_view
python
def process_view(self, request, view_func, view_args, view_kwargs): if request.META.get('HTTP_AUTHORIZATION', None): try: (auth_type, data) = request.META['HTTP_AUTHORIZATION'].split() if auth_type != 'Basic': return JSONResponseUnauthorized(request, 'Error with HTTP Basic Auth') user_pass = base64.b64decode(data) except: return JSONResponseUnauthorized(request, 'Error with HTTP Basic Auth') bits = user_pass.split(':', 1) if len(bits) != 2: return JSONResponseUnauthorized(request, 'Invalid Authorization header. Value should be "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" where base64 encoded part is encrypted from "username:password"') username = bits[0] user = _get_user(username) if user == None: return JSONResponseUnauthorized(request, 'User and password don\'t match') if user.check_password(bits[1]): request.user = user else: return JSONResponseUnauthorized(request, 'Username and password don\'t match') request.user = user return
Middleware for API authentication. Partly forked from django-tastypie
https://github.com/kippt/django-api-boilerplate/blob/ae73629f6b45f44cd0adff07382dd4c638f2c1ba/api_boilerplate/middleware.py#L66-L100
import base64 import json from django.contrib.auth.models import User from django.conf import settings from django.db.models.loading import get_model from django.middleware.csrf import get_token from api_boilerplate.http import JSONResponseUnauthorized, JSONResponseBadRequest AUTH_CASE_INSENSITIVE = getattr(settings, 'API_AUTH_CASE_INSENSITIVE', False) AUTH_EMAIL_AS_USERNAME = getattr(settings, 'API_AUTH_EMAIL_AS_USERNAME', False) API_KEY_MODEL = getattr(settings, 'API_KEY_MODEL', 'api_boilerplate.models.ApiKey') ApiKey = get_model(*API_KEY_MODEL.split('.',1)) REQUEST_JSON = getattr(settings, 'API_REQUEST_JSON', True) def _get_user(username): try: if AUTH_CASE_INSENSITIVE: user = User.objects.get(username__iexact=username) else: user = User.objects.get(username=username) except User.DoesNotExist: user = None if AUTH_EMAIL_AS_USERNAME: if not user: try: if AUTH_CASE_INSENSITIVE: user = User.objects.get(email__iexact=username) else: user = User.objects.get(email=username) except User.DoesNotExist: user = None return user class ApiDjangoAuthMiddleware: def process_view(self, request, view_func, view_args, view_kwargs): if request.user.is_authenticated(): get_token(request) return None class ApiHttpBasicAuthMiddleware:
MIT License
jonathanfeng/new_horizons
venv/lib/python3.7/site-packages/jinja2/runtime.py
Context.get_all
python
def get_all(self): if not self.vars: return self.parent if not self.parent: return self.vars return dict(self.parent, **self.vars)
Return the complete context as dict including the exported variables. For optimizations reasons this might not return an actual copy so be careful with using it.
https://github.com/jonathanfeng/new_horizons/blob/0ec21c8f8423932611e1e0bf24548dcef912bc54/venv/lib/python3.7/site-packages/jinja2/runtime.py#L249-L258
import sys from itertools import chain from types import MethodType from markupsafe import escape from markupsafe import Markup from markupsafe import soft_unicode from ._compat import abc from ._compat import imap from ._compat import implements_iterator from ._compat import implements_to_string from ._compat import iteritems from ._compat import PY2 from ._compat import string_types from ._compat import text_type from ._compat import with_metaclass from .exceptions import TemplateNotFound from .exceptions import TemplateRuntimeError from .exceptions import UndefinedError from .nodes import EvalContext from .utils import concat from .utils import evalcontextfunction from .utils import internalcode from .utils import missing from .utils import Namespace from .utils import object_type_repr exported = [ "LoopContext", "TemplateReference", "Macro", "Markup", "TemplateRuntimeError", "missing", "concat", "escape", "markup_join", "unicode_join", "to_string", "identity", "TemplateNotFound", "Namespace", "Undefined", ] to_string = text_type def identity(x): return x def markup_join(seq): buf = [] iterator = imap(soft_unicode, seq) for arg in iterator: buf.append(arg) if hasattr(arg, "__html__"): return Markup(u"").join(chain(buf, iterator)) return concat(buf) def unicode_join(seq): return concat(imap(text_type, seq)) def new_context( environment, template_name, blocks, vars=None, shared=None, globals=None, locals=None, ): if vars is None: vars = {} if shared: parent = vars else: parent = dict(globals or (), **vars) if locals: if shared: parent = dict(parent) for key, value in iteritems(locals): if value is not missing: parent[key] = value return environment.context_class(environment, parent, template_name, blocks) class TemplateReference(object): def __init__(self, context): self.__context = context def __getitem__(self, name): blocks = self.__context.blocks[name] return BlockReference(name, self.__context, blocks, 0) def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self.__context.name) def _get_func(x): return getattr(x, "__func__", x) class ContextMeta(type): def __new__(mcs, name, bases, d): rv = type.__new__(mcs, name, bases, d) if bases == (): return rv resolve = _get_func(rv.resolve) default_resolve = _get_func(Context.resolve) resolve_or_missing = _get_func(rv.resolve_or_missing) default_resolve_or_missing = _get_func(Context.resolve_or_missing) if ( resolve is not default_resolve and resolve_or_missing is default_resolve_or_missing ): rv._legacy_resolve_mode = True elif ( resolve is default_resolve and resolve_or_missing is default_resolve_or_missing ): rv._fast_resolve_mode = True return rv def resolve_or_missing(context, key, missing=missing): if key in context.vars: return context.vars[key] if key in context.parent: return context.parent[key] return missing class Context(with_metaclass(ContextMeta)): _legacy_resolve_mode = False _fast_resolve_mode = False def __init__(self, environment, parent, name, blocks): self.parent = parent self.vars = {} self.environment = environment self.eval_ctx = EvalContext(self.environment, name) self.exported_vars = set() self.name = name self.blocks = dict((k, [v]) for k, v in iteritems(blocks)) if self._fast_resolve_mode: self.resolve_or_missing = MethodType(resolve_or_missing, self) def super(self, name, current): try: blocks = self.blocks[name] index = blocks.index(current) + 1 blocks[index] except LookupError: return self.environment.undefined( "there is no parent block called %r." % name, name="super" ) return BlockReference(name, self, blocks, index) def get(self, key, default=None): try: return self[key] except KeyError: return default def resolve(self, key): if self._legacy_resolve_mode: rv = resolve_or_missing(self, key) else: rv = self.resolve_or_missing(key) if rv is missing: return self.environment.undefined(name=key) return rv def resolve_or_missing(self, key): if self._legacy_resolve_mode: rv = self.resolve(key) if isinstance(rv, Undefined): rv = missing return rv return resolve_or_missing(self, key) def get_exported(self): return dict((k, self.vars[k]) for k in self.exported_vars)
MIT License
lcharlick/python-metallum
metallum.py
Album.date
python
def date(self) -> Optional[datetime.datetime]: s = self._dd_text_for_label('Release date:') if len(s) > 4 and ',' not in s: date = datetime.datetime.strptime(s, '%B %Y') else: date = date_parser.parse(s) return date
>>> album.date datetime.datetime(1986, 3, 3, 0, 0)
https://github.com/lcharlick/python-metallum/blob/e50bf2ae83ac0bc3928b379714ebf53771ae6ac4/metallum.py#L693-L705
import datetime import json import re import time import os.path import tempfile from typing import List, Optional from urllib.parse import urlencode import requests_cache from dateutil import parser as date_parser from pyquery import PyQuery from requests_cache.core import remove_expired_responses CACHE_FILE = os.path.join(tempfile.gettempdir(), 'metallum_cache') requests_cache.install_cache(cache_name=CACHE_FILE, expire_after=300) remove_expired_responses() BASE_URL = 'https://www.metal-archives.com' USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36' BR = '<br/>' CR = '&#13;' REQUEST_TIMEOUT = 1.0 UTC_OFFSET = 4 def map_params(params, m): res = {} for k, v in params.items(): if v is not None: res[m.get(k, k)] = v return res def band_for_id(id: str) -> 'Band': return Band('bands/_/{0}'.format(id)) def band_search(name, strict=True, genre=None, countries=[], year_created_from=None, year_created_to=None, status=[], themes=None, location=None, label=None, page_start=0) -> 'Search': params = locals() params['strict'] = str(int(params['strict'])) params = map_params(params, { 'name': 'bandName', 'strict': 'exactBandMatch', 'countries': 'country[]', 'year_created_from': 'yearCreationFrom', 'year_created_to': 'yearCreationTo', 'status': 'status[]', 'label': 'bandLabelName', 'page_start': 'iDisplayStart' }) url = 'search/ajax-advanced/searching/bands/?' + urlencode(params, True) return Search(url, BandResult) def album_for_id(id: str) -> 'AlbumWrapper': return AlbumWrapper(url='albums/_/_/{0}'.format(id)) def album_search(title, strict=True, band=None, band_strict=True, year_from=None, year_to=None, month_from=None, month_to=None, countries=[], location=None, label=None, indie_label=False, genre=None, types=[], page_start=0) -> 'Search': params = locals() params['strict'] = str(int(params['strict'])) params['band_strict'] = str(int(params['band_strict'])) params['indie_label'] = str(int(params['indie_label'])) if year_from and not month_from: params['month_from'] = '1' if year_to and not month_to: params['month_to'] = '12' params = map_params(params, { 'title': 'releaseTitle', 'strict': 'exactReleaseMatch', 'band': 'bandName', 'band_strict': 'exactBandMatch', 'year_from': 'releaseYearFrom', 'year_to': 'releaseYearTo', 'month_from': 'releaseMonthFrom', 'month_to': 'releaseMonthTo', 'countries': 'country[]', 'label': 'releaseLabelName', 'indie_label': 'indieLabel', 'types': 'releaseType[]', 'page_start': 'iDisplayStart' }) url = 'search/ajax-advanced/searching/albums/?' + urlencode(params, True) return Search(url, AlbumResult) def lyrics_for_id(id: int) -> 'Lyrics': return Lyrics(id) def split_genres(s: str) -> List[str]: return re.split(r'(?:,|;)\s*(?![^()]*\))', s) class AlbumTypes(object): FULL_LENGTH = 'Full-length' EP = 'EP' SINGLE = 'Single' DEMO = 'Demo' VIDEO = 'Video/VHS' COMPILATION = 'Compilation' DVD = 'DVD' LIVE = 'Live album' SPLIT = 'Split' def make_absolute(endpoint: str) -> str: return '{0}/{1}'.format(BASE_URL, endpoint) def offset_time(t: datetime.datetime) -> datetime.datetime: td = datetime.timedelta(hours=UTC_OFFSET) return t + td def parse_duration(s: str) -> int: parts = s.split(':') seconds = int(parts[-1]) if len(parts) > 1: seconds += int(parts[-2]) * 60 if len(parts) == 3: seconds += int(parts[0]) * 3600 return seconds class Metallum(object): def __init__(self, url): self._session = requests_cache.CachedSession(cache_name=CACHE_FILE) self._session.hooks = {'response': self._make_throttle_hook()} self._session.headers = { 'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip' } self._content = self._fetch_page_content(url) self._page = PyQuery(self._content) def _make_throttle_hook(self): def hook(response, *args, **kwargs): is_cached = getattr(response, 'from_cache', False) if not is_cached: time.sleep(REQUEST_TIMEOUT) return response return hook def _fetch_page_content(self, url) -> str: res = self._session.get(make_absolute(url)) return res.text class MetallumEntity(Metallum): def _dd_element_for_label(self, label: str) -> Optional[PyQuery]: labels = list(self._page('dt').contents()) try: index = labels.index(label) except ValueError: return None return self._page('dd').eq(index) def _dd_text_for_label(self, label: str) -> str: element = self._dd_element_for_label(label) return element.text() if element else "" class MetallumCollection(Metallum, list): def __init__(self, url): super().__init__(url) def search(self, **kwargs) -> 'MetallumCollection': collection = self[:] for arg in kwargs: for item in collection[:]: if kwargs[arg].lower() != getattr(item, arg).lower(): try: collection.remove(item) except ValueError: continue return collection class Search(Metallum, list): def __init__(self, url, result_handler): super().__init__(url) data = json.loads(self._content) results = data['aaData'] for result in results: self.append(result_handler(result)) self.result_count = int(data['iTotalRecords']) class SearchResult(list): _resultType = None def __init__(self, details): super().__init__() for detail in details: if re.match('^<a href.*', detail): d = PyQuery(detail) self.append(d('a').text()) else: self.append(detail) def __repr__(self): s = ' | '.join(self) return '<SearchResult: {0}>'.format(s) def get(self) -> 'Metallum': return self._resultType(self.url) class BandResult(SearchResult): def __init__(self, details): super().__init__(details) self._details = details self._resultType = Band @property def id(self) -> str: url = PyQuery(self._details[0])('a').attr('href') return re.search(r'\d+$', url).group(0) @property def url(self) -> str: return 'bands/_/{0}'.format(self.id) @property def name(self) -> str: return self[0] @property def genres(self) -> List[str]: return split_genres(self[1]) @property def country(self) -> str: return self[2] class AlbumResult(SearchResult): def __init__(self, details): super().__init__(details) self._details = details self._resultType = AlbumWrapper @property def id(self) -> str: url = PyQuery(self._details[1])('a').attr('href') return re.search(r'\d+$', url).group(0) @property def url(self) -> str: return 'albums/_/_/{0}'.format(self.id) @property def title(self) -> str: return self[1] @property def type(self) -> str: return self[2] @property def bands(self) -> List['Band']: bands = [] el = PyQuery(self._details[0]).wrap('<div></div>') for a in el.find('a'): url = PyQuery(a).attr('href') id = re.search(r'\d+$', url).group(0) bands.append(Band('bands/_/{0}'.format(id))) return bands @property def band_name(self) -> str: return self[0] class Band(MetallumEntity): def __init__(self, url): super().__init__(url) def __repr__(self): return '<Band: {0}>'.format(self.name) @property def id(self) -> str: url = self._page('.band_name a').attr('href') return re.search(r'\d+$', url).group(0) @property def url(self) -> str: return 'bands/_/{0}'.format(self.id) @property def added(self) -> Optional[datetime.datetime]: s = self._page('#auditTrail').find('tr').eq(1).find('td').eq(0).text()[10:] try: return offset_time(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S')) except ValueError: return None @property def modified(self) -> Optional[datetime.datetime]: s = self._page('#auditTrail').find('tr').eq(1).find('td').eq(1).text()[18:] try: return offset_time(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S')) except ValueError: return None @property def name(self) -> str: return self._page('h1.band_name').text().strip() @property def country(self) -> str: return self._dd_text_for_label('Country of origin:') @property def location(self) -> str: return self._dd_text_for_label('Location:') @property def status(self) -> str: return self._dd_text_for_label('Status:') @property def formed_in(self) -> str: return self._dd_text_for_label('Formed in:') @property def genres(self) -> List[str]: return split_genres(self._dd_text_for_label('Genre:')) @property def themes(self) -> List[str]: return self._dd_text_for_label('Lyrical themes:').split(', ') @property def label(self) -> str: return self._dd_text_for_label('Current label:') @property def logo(self) -> Optional[str]: url = self._page('#logo').attr('href') if not url: return None return url.split("?")[0] @property def photo(self) -> Optional[str]: url = self._page('#photo').attr('href') if not url: return None return url.split("?")[0] @property def albums(self) -> List['AlbumCollection']: url = 'band/discography/id/{0}/tab/all'.format(self.id) return AlbumCollection(url) class AlbumCollection(MetallumCollection): def __init__(self, url): super().__init__(url) rows = self._page('tr:gt(0)') for index in range(len(rows)): self.append(AlbumWrapper(elem=rows.eq(index))) class AlbumWrapper(Metallum): def __init__(self, url=None, elem=None): if url: super().__init__(url) self._album = Album(url) elif elem: self._album = LazyAlbum(elem) def __repr__(self): return '<Album: {0} ({1})>'.format(self.title, self.type) def __getattr__(self, name): if not hasattr(self._album, name) and hasattr(Album, name): self._album = Album(self._album.url) return getattr(self._album, name) @property def tracks(self): return TrackCollection(self._album.url, self) @property def disc_count(self): discs = 0 for track in self.tracks: if track.disc_number > discs: discs = track.disc_number return discs class Album(MetallumEntity): def __init__(self, url): super().__init__(url) @property def id(self) -> str: url = self._page('.album_name a').attr('href') return re.search(r'\d+$', url).group(0) @property def url(self) -> str: return 'albums/_/_/{0}'.format(self.id) @property def bands(self) -> List[Band]: bands = [] for a in self._page('.band_name').find('a'): url = PyQuery(a).attr('href') id = re.search(r'\d+$', url).group(0) bands.append(Band('bands/_/{0}'.format(id))) return bands @property def added(self) -> Optional[datetime.datetime]: s = self._page('#auditTrail').find('tr').eq(1).find('td').eq(0).text()[10:] try: return offset_time(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S')) except ValueError: return None @property def modified(self) -> Optional[datetime.datetime]: s = self._page('#auditTrail').find('tr').eq(1).find('td').eq(1).text()[18:] try: return offset_time(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S')) except ValueError: return None @property def title(self) -> str: return self._page('h1.album_name a').text() @property def type(self) -> str: element = self._dd_element_for_label('Type:') return element.text() if element else "" @property def duration(self) -> int: s = self._page('table.table_lyrics td strong').text() if s: return parse_duration(s) else: return 0 @property
MIT License
pyviz-dev/nbsite
examples/sites/holoviews/holoviews/ipython/preprocessors.py
filter_magic
python
def filter_magic(source, magic, strip=True): filtered, magic_lines=[],[] for line in source.splitlines(): if line.strip().startswith(magic): magic_lines.append(line) else: filtered.append(line) if strip: magic_lines = [el.replace(magic,'') for el in magic_lines] return '\n'.join(filtered), magic_lines
Given the source of a cell, filter out the given magic and collect the lines using the magic into a list. If strip is True, the IPython syntax part of the magic (e.g %magic or %%magic) is stripped from the returned lines.
https://github.com/pyviz-dev/nbsite/blob/7a4752e6ed6a3b0c3698473a6dd3a71ff9ba2acb/examples/sites/holoviews/holoviews/ipython/preprocessors.py#L58-L74
import ast from nbconvert.preprocessors import Preprocessor def comment_out_magics(source): filtered = [] for line in source.splitlines(): if line.strip().startswith('%'): filtered.append('# ' + line) else: filtered.append(line) return '\n'.join(filtered) def wrap_cell_expression(source, template='{expr}'): cell_output_types = (ast.IfExp, ast.BoolOp, ast.BinOp, ast.Call, ast.Name, ast.Attribute) try: node = ast.parse(comment_out_magics(source)) except SyntaxError: return source filtered = source.splitlines() if node.body != []: last_expr = node.body[-1] if not isinstance(last_expr, ast.Expr): pass elif isinstance(last_expr.value, cell_output_types): expr_end_slice = filtered[last_expr.lineno-1][:last_expr.col_offset] expr_start_slice = filtered[last_expr.lineno-1][last_expr.col_offset:] start = '\n'.join(filtered[:last_expr.lineno-1] + ([expr_end_slice] if expr_end_slice else [])) ending = '\n'.join(([expr_start_slice] if expr_start_slice else []) + filtered[last_expr.lineno:]) if ending.strip().endswith(';'): return source return start + '\n' + template.format(expr=ending) return source
BSD 3-Clause New or Revised License
gdanezis/rscoin
rscoin/__init__.py
Key.id
python
def id(self): return sha256(self.pub.export()).digest()
The fingerprint of the public key
https://github.com/gdanezis/rscoin/blob/6c0983cf6f9dba3bbd77d245be5a4e02adf9560e/rscoin/__init__.py#L53-L56
from collections import namedtuple from struct import pack, unpack from hashlib import sha256 from petlib.ec import EcGroup, EcPt from petlib.bn import Bn from petlib.ecdsa import do_ecdsa_sign, do_ecdsa_verify, do_ecdsa_setup from os import urandom _globalECG = EcGroup(713) class Key: def __init__(self, key_bytes, public=True): self.G = _globalECG if public: self.sec = None self.pub = EcPt.from_binary(key_bytes, self.G) self.optim = None else: self.sec = Bn.from_binary(sha256(key_bytes).digest()) self.pub = self.sec * self.G.generator() self.optim = do_ecdsa_setup(self.G, self.sec) def sign(self, message): assert len(message) == 32 assert self.sec is not None r, s = do_ecdsa_sign(self.G, self.sec, message, self.optim) r0, s0 = r.binary(), s.binary() assert len(r0) <= 32 and len(s0) <= 32 sig = pack("H32sH32s", len(r0), r0, len(s0), s0) return sig def verify(self, message, sig): assert len(message) == 32 lr, r, ls, s = unpack("H32sH32s", sig) sig = Bn.from_binary(r[:lr]), Bn.from_binary(s[:ls]) return do_ecdsa_verify(self.G, self.pub, sig, message)
BSD 2-Clause Simplified License
zhongdao/unitrack
tracker/sot/lib/models/modules.py
FeatureBase.get_feature
python
def get_feature(self, im: torch.Tensor): is_color = im.shape[1] == 3 if is_color and not self.use_for_color or not is_color and not self.use_for_gray: return torch.Tensor([]) feat = self.extract(im) if self.output_size is not None: feat = F.adaptive_avg_pool2d(feat, self.output_size) elif self.pool_stride != 1: feat = F.avg_pool2d(feat, self.pool_stride, self.pool_stride) if self.normalize_power is not None: feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) / (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power) return feat
Get the feature. Generally, call this function. args: im: image patch as a torch.Tensor.
https://github.com/zhongdao/unitrack/blob/e18aece2a9046225ec3b3dd595a35490d594c699/tracker/sot/lib/models/modules.py#L375-L400
import math import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict from functools import partial import collections import re eps = 1e-5 def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv3x3NP(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, bias=False) def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def down(in_planes, out_planes): return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=1), nn.BatchNorm2d(out_planes)) def down_spatial(in_planes, out_planes): return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=4), nn.BatchNorm2d(out_planes)) class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) padding = 2 - stride if downsample is not None and dilation > 1: dilation = dilation // 2 padding = dilation assert stride == 1 or dilation == 1, "stride and dilation must have one equals to zero at least" if dilation > 1: padding = dilation self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=padding, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck_BIG_CI(nn.Module): expansion = 4 def __init__(self, inplanes, planes, last_relu, stride=1, downsample=None, dilation=1): super(Bottleneck_BIG_CI, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) padding = 1 if abs(dilation - 2) < eps: padding = 2 if abs(dilation - 3) < eps: padding = 3 self.conv2 = nn.Conv2d(planes, planes*2, kernel_size=3, stride=stride, padding=padding, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes*2) self.conv3 = nn.Conv2d(planes*2, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.last_relu = last_relu def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual if self.last_relu: out = self.relu(out) out = self.center_crop(out) return out def center_crop(self, x): return x[:, :, 1:-1, 1:-1].contiguous() class ResNet_plus2(nn.Module): def __init__(self, block, layers, used_layers, online=False): self.inplanes = 64 super(ResNet_plus2, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.feature_size = 128 * block.expansion self.used_layers = used_layers self.layer3_use = True if 3 in used_layers else False self.layer4_use = True if 4 in used_layers else False if self.layer3_use: if online: self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, update=True) self.layeronline = self._make_layer(block, 256, layers[2], stride=2) else: self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) self.feature_size = (256 + 128) * block.expansion else: self.layer3 = lambda x: x if self.layer4_use: self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) self.feature_size = 512 * block.expansion else: self.layer4 = lambda x: x for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1, dilation=1, update=False): downsample = None dd = dilation if stride != 1 or self.inplanes != planes * block.expansion: if stride == 1 and dilation == 1: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) else: if dilation > 1: dd = dilation // 2 padding = dd else: dd = 1 padding = 0 downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=3, stride=stride, bias=False, padding=padding, dilation=dd), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride=stride, downsample=downsample, dilation=dilation)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, dilation=dilation)) if update: self.inplanes = int(self.inplanes / 2) return nn.Sequential(*layers) def forward(self, x, online=False): x = self.conv1(x) x = self.bn1(x) x_ = self.relu(x) x = self.maxpool(x_) p1 = self.layer1(x) p2 = self.layer2(p1) if online: return self.layeronline(p2) p3 = self.layer3(p2) return [x_, p1, p2], p3 class ResNet(nn.Module): def __init__(self, block, layers, last_relus, s2p_flags, firstchannels=64, channels=[64, 128], dilation=1): self.inplanes = firstchannels self.stage_len = len(layers) super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, firstchannels, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(firstchannels) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) if s2p_flags[0]: self.layer1 = self._make_layer(block, channels[0], layers[0], stride2pool=True, last_relu=last_relus[0]) else: self.layer1 = self._make_layer(block, channels[0], layers[0], last_relu=last_relus[0]) if s2p_flags[1]: self.layer2 = self._make_layer(block, channels[1], layers[1], stride2pool=True, last_relu=last_relus[1], dilation=dilation) else: self.layer2 = self._make_layer(block, channels[1], layers[1], last_relu=last_relus[1], dilation=dilation) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal(m.weight, mode='fan_out') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, last_relu, stride=1, stride2pool=False, dilation=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, last_relu=True, stride=stride, downsample=downsample, dilation=dilation)) if stride2pool: layers.append(self.maxpool) self.inplanes = planes * block.expansion for i in range(1, blocks): if i == blocks - 1: layers.append(block(self.inplanes, planes, last_relu=last_relu, dilation=dilation)) else: layers.append(block(self.inplanes, planes, last_relu=True, dilation=dilation)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.center_crop7(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) return x def center_crop7(self, x): return x[:, :, 2:-2, 2:-2].contiguous() class FeatureBase: def __init__(self, fparams = None, pool_stride = None, output_size = None, normalize_power = None, use_for_color = True, use_for_gray = True): self.fparams = fparams self.pool_stride = 1 if pool_stride is None else pool_stride self.output_size = output_size self.normalize_power = normalize_power self.use_for_color = use_for_color self.use_for_gray = use_for_gray def initialize(self): pass def free_memory(self): pass def dim(self): raise NotImplementedError def stride(self): raise NotImplementedError def size(self, im_sz): if self.output_size is None: return im_sz // self.stride() if isinstance(im_sz, torch.Tensor): return torch.Tensor([self.output_size[0], self.output_size[1]]) return self.output_size def extract(self, im): raise NotImplementedError
MIT License
scikit-multiflow/scikit-multiflow
src/skmultiflow/meta/dynamic_weighted_majority.py
DynamicWeightedMajorityClassifier.fit_single_sample
python
def fit_single_sample(self, X, y, classes=None, sample_weight=None): self.epochs += 1 self.num_classes = max( len(classes) if classes is not None else 0, (int(np.max(y)) + 1), self.num_classes) predictions = np.zeros((self.num_classes,)) max_weight = 0 weakest_expert_weight = 1 weakest_expert_index = None for i, exp in enumerate(self.experts): y_hat = exp.estimator.predict(X) if np.any(y_hat != y) and (self.epochs % self.period == 0): exp.weight *= self.beta predictions[y_hat] += exp.weight max_weight = max(max_weight, exp.weight) if exp.weight < weakest_expert_weight: weakest_expert_index = i weakest_expert_weight = exp.weight y_hat = np.array([np.argmax(predictions)]) if self.epochs % self.period == 0: self._scale_weights(max_weight) self._remove_experts() if np.any(y_hat != y): if len(self.experts) == self.n_estimators: self.experts.pop(weakest_expert_index) self.experts.append(self._construct_new_expert()) for exp in self.experts: exp.estimator.partial_fit(X, y, classes, sample_weight)
Fits a single sample of shape `X.shape=(1, n_attributes)` and `y.shape=(1)` Aggregates all experts' predictions, diminishes weight of experts whose predictions were wrong, and may create or remove experts every _period_ samples. Finally, trains each individual expert on the provided data. Train loop as described by Kolter and Maloof in the original paper. Parameters ---------- X: numpy.ndarray of shape (n_samples, n_features) Features matrix used for partially updating the model. y: Array-like An array-like of all the class labels for the samples in X. classes: list List of all existing classes. This is an optional parameter. sample_weight: numpy.ndarray of shape (n_samples), optional (default=None) Samples weight. If not provided, uniform weights are assumed. Applicability depends on the base estimator.
https://github.com/scikit-multiflow/scikit-multiflow/blob/d073a706b5006cba2584761286b7fa17e74e87be/src/skmultiflow/meta/dynamic_weighted_majority.py#L181-L242
import copy as cp import numpy as np from skmultiflow.core import BaseSKMObject, ClassifierMixin, MetaEstimatorMixin from skmultiflow.bayes import NaiveBayes import warnings def DynamicWeightedMajority(n_estimators=5, base_estimator=NaiveBayes(), period=50, beta=0.5, theta=0.01): warnings.warn("'DynamicWeightedMajority' has been renamed to " "'DynamicWeightedMajorityClassifier' in v0.5.0.\n" "The old name will be removed in v0.7.0", category=FutureWarning) return DynamicWeightedMajorityClassifier(n_estimators=n_estimators, base_estimator=base_estimator, period=period, beta=beta, theta=theta) class DynamicWeightedMajorityClassifier(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin): class WeightedExpert: def __init__(self, estimator, weight): self.estimator = estimator self.weight = weight def __init__(self, n_estimators=5, base_estimator=NaiveBayes(), period=50, beta=0.5, theta=0.01): super().__init__() self.n_estimators = n_estimators self.base_estimator = base_estimator self.beta = beta self.theta = theta self.period = period self.epochs = None self.num_classes = None self.experts = None self.reset() def partial_fit(self, X, y, classes=None, sample_weight=None): for i in range(len(X)): self.fit_single_sample( X[i:i + 1, :], y[i:i + 1], classes, sample_weight ) return self def predict(self, X): preds = np.array([np.array(exp.estimator.predict(X)) * exp.weight for exp in self.experts]) sum_weights = sum(exp.weight for exp in self.experts) aggregate = np.sum(preds / sum_weights, axis=0) return (aggregate + 0.5).astype(int) def predict_proba(self, X): raise NotImplementedError
BSD 3-Clause New or Revised License
pykale/pykale
kale/pipeline/multi_domain_adapter.py
_DINTrainer.__init__
python
def __init__( self, dataset, feature_extractor, task_classifier, n_classes: int, target_domain: str, kernel: str = "linear", kernel_mul: float = 2.0, kernel_num: int = 5, **base_params, ): super().__init__(dataset, feature_extractor, task_classifier, n_classes, target_domain, **base_params) self.kernel = kernel self.n_domains = len(self.domain_to_idx.values()) self.classifier = task_classifier(self.feature_dim, n_classes) self._kernel_mul = kernel_mul self._kernel_num = kernel_num
Domain independent network (DIN). It is under development and will be updated with references later.
https://github.com/pykale/pykale/blob/7450305c52041ce8700f1fc4dbb93a6069d060d6/kale/pipeline/multi_domain_adapter.py#L188-L208
import torch import torch.nn as nn from torch.nn.functional import one_hot import kale.predict.losses as losses from kale.embed.image_cnn import _Bottleneck from kale.pipeline.domain_adapter import BaseAdaptTrainer, get_aggregated_metrics def create_ms_adapt_trainer(method: str, dataset, feature_extractor, task_classifier, **train_params): method_dict = {"M3SDA": M3SDATrainer, "DIN": _DINTrainer, "MFSAN": MFSANTrainer} method = method.upper() if method not in method_dict.keys(): raise ValueError("Unsupported multi-source domain adaptation methods %s" % method) else: return method_dict[method](dataset, feature_extractor, task_classifier, **train_params) def _average_cls_output(x, classifiers: nn.ModuleDict): cls_output = [classifiers[key](x) for key in classifiers] return torch.stack(cls_output).mean(0) class BaseMultiSourceTrainer(BaseAdaptTrainer): def __init__( self, dataset, feature_extractor, task_classifier, n_classes: int, target_domain: str, **base_params, ): super().__init__(dataset, feature_extractor, task_classifier, **base_params) self.n_classes = n_classes self.feature_dim = feature_extractor.state_dict()[list(feature_extractor.state_dict().keys())[-2]].shape[0] self.domain_to_idx = dataset.domain_to_idx if target_domain not in self.domain_to_idx.keys(): raise ValueError( "The given target domain %s not in the dataset! The available domain names are %s" % (target_domain, self.domain_to_idx.keys()) ) self.target_domain = target_domain self.target_label = self.domain_to_idx[target_domain] self.base_params = base_params def forward(self, x): if self.feat is not None: x = self.feat(x) return x def compute_loss(self, batch, split_name="V"): raise NotImplementedError("Loss needs to be defined.") def validation_epoch_end(self, outputs): metrics_to_log = ( "val_loss", "V_source_acc", "V_target_acc", "V_domain_acc", ) return self._validation_epoch_end(outputs, metrics_to_log) def test_epoch_end(self, outputs): metrics_at_test = ( "test_loss", "Te_source_acc", "Te_target_acc", "Te_domain_acc", ) log_dict = get_aggregated_metrics(metrics_at_test, outputs) for key in log_dict: self.log(key, log_dict[key], prog_bar=True) class M3SDATrainer(BaseMultiSourceTrainer): def __init__( self, dataset, feature_extractor, task_classifier, n_classes: int, target_domain: str, k_moment: int = 3, **base_params, ): super().__init__(dataset, feature_extractor, task_classifier, n_classes, target_domain, **base_params) self.classifiers = dict() for domain_ in self.domain_to_idx.keys(): if domain_ != target_domain: self.classifiers[domain_] = task_classifier(self.feature_dim, n_classes) self.classifiers = nn.ModuleDict(self.classifiers) self.k_moment = k_moment def compute_loss(self, batch, split_name="V"): x, y, domain_labels = batch phi_x = self.forward(x) moment_loss = self._compute_domain_dist(phi_x, domain_labels) src_idx = torch.where(domain_labels != self.target_label)[0] tgt_idx = torch.where(domain_labels == self.target_label)[0] cls_loss, ok_src = self._compute_cls_loss(phi_x[src_idx], y[src_idx], domain_labels[src_idx]) if len(tgt_idx) > 0: y_tgt_hat = _average_cls_output(phi_x[tgt_idx], self.classifiers) _, ok_tgt = losses.cross_entropy_logits(y_tgt_hat, y[tgt_idx]) else: ok_tgt = 0.0 task_loss = cls_loss log_metrics = { f"{split_name}_source_acc": ok_src, f"{split_name}_target_acc": ok_tgt, f"{split_name}_domain_acc": moment_loss, } return task_loss, moment_loss, log_metrics def _compute_cls_loss(self, x, y, domain_labels: torch.Tensor): if len(y) == 0: return 0.0, 0.0 else: cls_loss = 0.0 ok_src = [] n_src = 0 for domain_ in self.domain_to_idx.keys(): if domain_ == self.target_domain: continue domain_idx = torch.where(domain_labels == self.domain_to_idx[domain_])[0] cls_output = self.classifiers[domain_](x[domain_idx]) loss_cls_, ok_src_ = losses.cross_entropy_logits(cls_output, y[domain_idx]) cls_loss += loss_cls_ ok_src.append(ok_src_) n_src += 1 cls_loss = cls_loss / n_src ok_src = torch.cat(ok_src) return cls_loss, ok_src def _compute_domain_dist(self, x, domain_labels): moment_loss = 0 for i in range(self.k_moment): moment_loss += losses._moment_k(x, domain_labels, i + 1) return moment_loss class _DINTrainer(BaseMultiSourceTrainer):
MIT License
migalkin/nodepiece
nc/models/gnn_layer.py
StarEConvLayer.message
python
def message(self, x_j, x_i, edge_type, rel_embed, edge_norm, mode, ent_embed=None, qualifier_ent=None, qualifier_rel=None, qual_index=None, source_index=None): weight = getattr(self, 'w_{}'.format(mode)) if self.p['STATEMENT_LEN'] != 3: if mode != 'loop': rel_emb = self.update_rel_emb_with_qualifier(ent_embed, rel_embed, qualifier_ent, qualifier_rel, edge_type, qual_index) else: rel_emb = torch.index_select(rel_embed, 0, edge_type) else: rel_emb = torch.index_select(rel_embed, 0, edge_type) xj_rel = self.rel_transform(x_j, rel_emb) out = torch.einsum('ij,jk->ik', xj_rel, weight) if self.p['STAREARGS']['ATTENTION'] and mode != 'loop': out = out.view(-1, self.heads, self.attn_dim) x_i = x_i.view(-1, self.heads, self.attn_dim) alpha = torch.einsum('bij,kij -> bi', [torch.cat([x_i, out], dim=-1), self.att]) alpha = F.leaky_relu(alpha, self.negative_slope) alpha = softmax(alpha, source_index, ent_embed.size(0)) alpha = F.dropout(alpha, p=self.attn_drop) return (out * alpha.view(-1, self.heads, 1)).view(-1, self.heads * self.attn_dim) else: return out if edge_norm is None else out * edge_norm.view(-1, 1)
The message method performs following functions Step1 : get updated relation representation (rel_embed) [edge_type] by aggregating qualifier information (self.update_rel_emb_with_qualifier). Step2 : Obtain edge message by transforming the node embedding with updated relation embedding (self.rel_transform). Step3 : Multiply edge embeddings (transform) by weight Step4 : Return the messages. They will be sent to subjects (1st line in the edge index COO) Over here the node embedding [the first list in COO matrix] is representing the message which will be sent on each edge More information about updating relation representation please refer to self.update_rel_emb_with_qualifier :param x_j: objects of the statements (2nd line in the COO) :param x_i: subjects of the statements (1st line in the COO) :param edge_type: relation types :param rel_embed: embedding matrix of all relations :param edge_norm: :param mode: in (direct) / out (inverse) / loop :param ent_embed: embedding matrix of all entities :param qualifier_ent: :param qualifier_rel: :param qual_index: :param source_index: :return:
https://github.com/migalkin/nodepiece/blob/d731c9990cdd7835f01f129f6134c3bff576821f/nc/models/gnn_layer.py#L367-L421
import torch import numpy as np import torch.nn.functional as F from utils.utils_gcn import get_param, ccorr, rotate, softmax from torch_scatter import scatter_add, scatter_mean from torch_geometric.nn import MessagePassing from torch.nn import Parameter class StarEConvLayer(MessagePassing): def __init__(self, in_channels, out_channels, num_rels, act=lambda x: x, config=None): super(self.__class__, self).__init__(flow='target_to_source', aggr='add') self.p = config self.in_channels = in_channels self.out_channels = out_channels self.num_rels = num_rels self.act = act self.device = None self.w_loop = get_param((in_channels, out_channels)) self.w_in = get_param((in_channels, out_channels)) self.w_out = get_param((in_channels, out_channels)) self.w_rel = get_param((in_channels, out_channels)) if self.p['STATEMENT_LEN'] != 3: if self.p['STAREARGS']['QUAL_AGGREGATE'] == 'sum' or self.p['STAREARGS']['QUAL_AGGREGATE'] == 'mul' or self.p['STAREARGS']['QUAL_AGGREGATE'] == 'attn': self.w_q = get_param((in_channels, in_channels)) elif self.p['STAREARGS']['QUAL_AGGREGATE'] == 'concat': self.w_q = get_param((2 * in_channels, in_channels)) self.loop_rel = get_param((1, in_channels)) self.loop_ent = get_param((1, in_channels)) self.drop = torch.nn.Dropout(self.p['STAREARGS']['GCN_DROP']) self.bn = torch.nn.BatchNorm1d(out_channels) if self.p['STAREARGS']['ATTENTION']: assert self.p['STAREARGS']['GCN_DIM'] == self.p['EMBEDDING_DIM'], "Current attn implementation requires those tto be identical" assert self.p['EMBEDDING_DIM'] % self.p['STAREARGS']['ATTENTION_HEADS'] == 0, "should be divisible" self.heads = self.p['STAREARGS']['ATTENTION_HEADS'] self.attn_dim = self.out_channels // self.heads self.negative_slope = self.p['STAREARGS']['ATTENTION_SLOPE'] self.attn_drop = self.p['STAREARGS']['ATTENTION_DROP'] self.att = get_param((1, self.heads, 2 * self.attn_dim)) if self.p['STAREARGS']['QUAL_AGGREGATE'] == 'attn': assert self.p['STAREARGS']['GCN_DIM'] == self.p[ 'EMBEDDING_DIM'], "Current attn implementation requires those tto be identical" assert self.p['EMBEDDING_DIM'] % self.p['STAREARGS']['ATTENTION_HEADS'] == 0, "should be divisible" if not self.p['STAREARGS']['ATTENTION']: self.heads = self.p['STAREARGS']['ATTENTION_HEADS'] self.attn_dim = self.out_channels // self.heads self.negative_slope = self.p['STAREARGS']['ATTENTION_SLOPE'] self.attn_drop = self.p['STAREARGS']['ATTENTION_DROP'] self.att_qual = get_param((1, self.heads, 2 * self.attn_dim)) if self.p['STAREARGS']['BIAS']: self.register_parameter('bias', Parameter( torch.zeros(out_channels))) def reset_parameters(self): torch.nn.init.xavier_normal_(self.w_loop.data) torch.nn.init.xavier_normal_(self.w_in.data) torch.nn.init.xavier_normal_(self.w_out.data) torch.nn.init.xavier_normal_(self.w_rel.data) if self.p['STATEMENT_LEN'] != 3: torch.nn.init.xavier_normal_(self.w_q.data) torch.nn.init.xavier_normal_(self.loop_rel.data) torch.nn.init.xavier_normal_(self.loop_ent.data) self.bn.reset_parameters() if self.p['STAREARGS']['ATTENTION']: torch.nn.init.xavier_normal_(self.att.data) if self.p['STAREARGS']['QUAL_AGGREGATE'] == 'attn': torch.nn.init.xavier_normal_(self.att_qual.data) def forward(self, x, edge_index, edge_type, rel_embed, qualifier_ent=None, qualifier_rel=None, quals=None): if self.device is None: self.device = edge_index.device rel_embed = torch.cat([rel_embed, self.loop_rel], dim=0) num_edges = edge_index.size(1) // 2 num_ent = x.size(0) self.in_index, self.out_index = edge_index[:, :num_edges], edge_index[:, num_edges:] self.in_type, self.out_type = edge_type[:num_edges], edge_type[num_edges:] if self.p['STATEMENT_LEN'] != 3: num_quals = quals.size(1) // 2 self.in_index_qual_ent, self.out_index_qual_ent = quals[1, :num_quals], quals[1, num_quals:] self.in_index_qual_rel, self.out_index_qual_rel = quals[0, :num_quals], quals[0, num_quals:] self.quals_index_in, self.quals_index_out = quals[2, :num_quals], quals[2, num_quals:] ''' Adding self loop by creating a COO matrix. Thus \ loop index [1,2,3,4,5] [1,2,3,4,5] loop type [10,10,10,10,10] --> assuming there are 9 relations ''' self.loop_index = torch.stack([torch.arange(num_ent), torch.arange(num_ent)]).to(self.device) self.loop_type = torch.full((num_ent,), rel_embed.size(0) - 1, dtype=torch.long).to(self.device) self.in_norm = self.compute_norm(self.in_index, num_ent) self.out_norm = self.compute_norm(self.out_index, num_ent) if self.p['STATEMENT_LEN'] != 3: in_res = self.propagate(self.in_index, x=x, edge_type=self.in_type, rel_embed=rel_embed, edge_norm=self.in_norm, mode='in', ent_embed=x, qualifier_ent=self.in_index_qual_ent, qualifier_rel=self.in_index_qual_rel, qual_index=self.quals_index_in, source_index=self.in_index[0]) loop_res = self.propagate(self.loop_index, x=x, edge_type=self.loop_type, rel_embed=rel_embed, edge_norm=None, mode='loop', ent_embed=None, qualifier_ent=None, qualifier_rel=None, qual_index=None, source_index=None) out_res = self.propagate(self.out_index, x=x, edge_type=self.out_type, rel_embed=rel_embed, edge_norm=self.out_norm, mode='out', ent_embed=x, qualifier_ent=self.out_index_qual_ent, qualifier_rel=self.out_index_qual_rel, qual_index=self.quals_index_out, source_index=self.out_index[0]) else: in_res = self.propagate(self.in_index, x=x, edge_type=self.in_type, rel_embed=rel_embed, edge_norm=self.in_norm, mode='in', ent_embed=x, qualifier_ent=None, qualifier_rel=None, qual_index=None, source_index=self.in_index[0]) loop_res = self.propagate(self.loop_index, x=x, edge_type=self.loop_type, rel_embed=rel_embed, edge_norm=None, mode='loop', ent_embed=x, qualifier_ent=None, qualifier_rel=None, qual_index=None, source_index=None) out_res = self.propagate(self.out_index, x=x, edge_type=self.out_type, rel_embed=rel_embed, edge_norm=self.out_norm, mode='out', ent_embed=x, qualifier_ent=None, qualifier_rel=None, qual_index=None, source_index=self.out_index[0]) out = self.drop(in_res) * (1 / 3) + self.drop(out_res) * (1 / 3) + loop_res * (1 / 3) if self.p['STAREARGS']['BIAS']: out = out + self.bias out = self.bn(out) return self.act(out), torch.matmul(rel_embed, self.w_rel)[:-1] def rel_transform(self, ent_embed, rel_embed): if self.p['STAREARGS']['OPN'] == 'corr': trans_embed = ccorr(ent_embed, rel_embed) elif self.p['STAREARGS']['OPN'] == 'sub': trans_embed = ent_embed - rel_embed elif self.p['STAREARGS']['OPN'] == 'mult': trans_embed = ent_embed * rel_embed elif self.p['STAREARGS']['OPN'] == 'rotate': trans_embed = rotate(ent_embed, rel_embed) else: raise NotImplementedError return trans_embed def qual_transform(self, qualifier_ent, qualifier_rel): if self.p['STAREARGS']['QUAL_OPN'] == 'corr': trans_embed = ccorr(qualifier_ent, qualifier_rel) elif self.p['STAREARGS']['QUAL_OPN'] == 'sub': trans_embed = qualifier_ent - qualifier_rel elif self.p['STAREARGS']['QUAL_OPN'] == 'mult': trans_embed = qualifier_ent * qualifier_rel elif self.p['STAREARGS']['QUAL_OPN'] == 'rotate': trans_embed = rotate(qualifier_ent, qualifier_rel) else: raise NotImplementedError return trans_embed def qualifier_aggregate(self, qualifier_emb, rel_part_emb, alpha=0.5, qual_index=None): if self.p['STAREARGS']['QUAL_AGGREGATE'] == 'sum': qualifier_emb = torch.einsum('ij,jk -> ik', self.coalesce_quals(qualifier_emb, qual_index, rel_part_emb.shape[0]), self.w_q) return alpha * rel_part_emb + (1 - alpha) * qualifier_emb elif self.p['STAREARGS']['QUAL_AGGREGATE'] == 'concat': qualifier_emb = self.coalesce_quals(qualifier_emb, qual_index, rel_part_emb.shape[0]) agg_rel = torch.cat((rel_part_emb, qualifier_emb), dim=1) return torch.mm(agg_rel, self.w_q) elif self.p['STAREARGS']['QUAL_AGGREGATE'] == 'mul': qualifier_emb = torch.mm(self.coalesce_quals(qualifier_emb, qual_index, rel_part_emb.shape[0], fill=1), self.w_q) return rel_part_emb * qualifier_emb elif self.p['STAREARGS']['QUAL_AGGREGATE'] == 'attn': expanded_rels = torch.index_select(rel_part_emb, 0, qual_index) expanded_rels = expanded_rels.view(-1, self.heads, self.attn_dim) qualifier_emb = torch.mm(qualifier_emb, self.w_q).view(-1, self.heads, self.attn_dim) alpha_r = torch.einsum('bij,kij -> bi', [torch.cat([expanded_rels, qualifier_emb], dim=-1), self.att_qual]) alpha_r = F.leaky_relu(alpha_r, self.negative_slope) alpha_r = softmax(alpha_r, qual_index, rel_part_emb.size(0)) alpha_r = F.dropout(alpha_r, p=self.attn_drop) expanded_rels = (expanded_rels * alpha_r.view(-1, self.heads, 1)).view(-1, self.heads * self.attn_dim) single_rels = scatter_add(expanded_rels, qual_index, dim=0, dim_size=rel_part_emb.size(0)) copy_mask = single_rels.sum(dim=1) != 0.0 rel_part_emb[copy_mask] = single_rels[copy_mask] return rel_part_emb else: raise NotImplementedError def update_rel_emb_with_qualifier(self, ent_embed, rel_embed, qualifier_ent, qualifier_rel, edge_type, qual_index=None): qualifier_emb_rel = rel_embed[qualifier_rel] qualifier_emb_ent = ent_embed[qualifier_ent] rel_part_emb = rel_embed[edge_type] qualifier_emb = self.qual_transform(qualifier_ent=qualifier_emb_ent, qualifier_rel=qualifier_emb_rel) return self.qualifier_aggregate(qualifier_emb, rel_part_emb, alpha=self.p['STAREARGS']['TRIPLE_QUAL_WEIGHT'], qual_index=qual_index)
MIT License
johnbrodie/pyjector
pyjector/pyjector.py
Pyjector._command_handler
python
def _command_handler(self, command, action): if action not in self.get_actions_for_command(command): raise InvalidCommandError( '{0} is not a valid action for comand {1}'.format( action, command) ) command_string = self._create_command_string(command, action) logging.info("send: " + repr(command_string)) self._do_handshake() self._send(command_string) sleep(self.config.get('wait_time', 1)) response = self.get_response() logging.info("recv: " + repr(response)) self._check_response(response) return response
Send the `command` and `action` to the device. :param command: The command to send, for example, "power". :param action: The action to send, for example, "on". :returns: str -- The response from the device. :raises: InvalidCommandError if `action` is not valid for `command`.
https://github.com/johnbrodie/pyjector/blob/ac982acba45e185fa32580519d7e93a3e6ab0367/pyjector/pyjector.py#L234-L258
from time import sleep import json import os import logging import serial PATH = os.path.abspath(os.path.dirname(__file__)) + '/projector_configs/' class CommandFailedError(Exception): class CommandExceptionError(Exception): class InvalidConfigError(Exception): class DeviceConfigMissingError(Exception): class InvalidCommandError(Exception): class Pyjector(object): possible_pyserial_settings = [ 'port', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout', 'xonxoff', 'rtscts', 'dsrdtr', 'writeTimeout', 'InterCharTimeout', ] pyserial_config_converter = { 'bytesize': { 5: serial.FIVEBITS, 6: serial.SIXBITS, 7: serial.SEVENBITS, 8: serial.EIGHTBITS, }, 'parity': { 'none': serial.PARITY_NONE, 'even': serial.PARITY_EVEN, 'odd': serial.PARITY_ODD, 'mark': serial.PARITY_MARK, 'space': serial.PARITY_SPACE, }, 'stopbits': { 1: serial.STOPBITS_ONE, 1.5: serial.STOPBITS_ONE_POINT_FIVE, 2: serial.STOPBITS_TWO, }, } def __init__( self, port=None, device_id='benq', **kwargs ): self.port = port self.device_id = device_id self.get_config(device_id, kwargs) self.serial = self._initialize_pyserial(port) self._create_commands() def get_config(self, device_id, overrides): self.available_configs = self._populate_configs() self.config = self.get_device_config_from_id(device_id) self._apply_overrides(overrides) self._validate_config() self.pyserial_config = self.get_pyserial_config() def _validate_config(self): if 'serial' not in self.config: raise InvalidConfigError( 'Configuration file for {0} does not contain needed serial' 'config values. Add a `serial` section to the config.'.format( self.device_id) ) if ('command_list' not in self.config or len(self.config['command_list']) == 0): raise InvalidConfigError( 'Configuration file for {0} does not define any commands. ' 'Add a `serial` section to the config.'.format( self.device_id) ) def _populate_configs(self): configs = {} for f in os.listdir(PATH): if f.endswith('.json'): data = open(PATH + f) json_data = json.loads(data.read()) name = os.path.splitext(f)[0] configs[name] = json_data return configs def _apply_overrides(self, overrides): self.config.update(overrides) def get_device_config_from_id(self, device_id): try: config = self.available_configs[device_id] except KeyError: raise DeviceConfigMissingError( 'Could not find device config with name {0}. ' 'Check that the file exists in ' ' `pyjector/projector_configs/`'.format(device_id) ) return config def get_pyserial_config(self): serial_config = self.config['serial'] for key, value in serial_config.items(): if key not in self.possible_pyserial_settings: raise InvalidConfigError( 'Configuration file for {0} specifies a serial ' 'setting "{1}" not recognized by pyserial. Check ' 'http://pyserial.sourceforge.net/pyserial_api.html' 'for valid settings'.format( self.device_id, key) ) if key in self.pyserial_config_converter: try: serial_config[key] = ( self.pyserial_config_converter[key][value]) except KeyError: raise InvalidConfigError( 'Configuration file for {0} specifies a serial ' 'setting for "{1}" for key "{2}" not recognized ' 'by pyserial. Check ' 'http://pyserial.sourceforge.net/pyserial_api.html' 'for valid settings'.format( self.device_id, value, key) ) return serial_config def _initialize_pyserial(self, port): return serial.Serial(port=port, **self.pyserial_config) def _send(self, data): logging.debug("_send: " + repr(data)) self.serial.write(data.encode()) def _recv(self, size=1): data = self.serial.read(size).decode() if data: logging.debug("_recv: " + repr(data)) return data def _do_handshake(self): h = self.config.get('handshake') if h == None: return self._send(h['send']) sleep(h['wait']) expected = h['expect'] resp = self._recv(len(expected)) if resp != expected: logging.error("unexpected response to handshake " + repr(resp))
MIT License
tjefferies/pymetalog
pymetalog/support.py
MLprobs
python
def MLprobs(x_old, step_len): l = len(x_old) x = pd.DataFrame() x['x'] = x_old.copy() x.sort_values(by='x') x['probs'] = 0 for i in range(0,l): if i == 0: x.loc[i,'probs'] = .5/l else: x.loc[i, 'probs'] = x.loc[i-1, 'probs'] + 1/l if len(x.index) > 100: y2 = np.linspace(step_len, 1 - step_len, int((1 - step_len) / step_len)) tailstep = step_len / 10 y1 = np.linspace(tailstep, (min(y2) - tailstep), int((min(y2) - tailstep) / tailstep)) y3 = np.linspace((max(y2) + tailstep), (max(y2) + tailstep * 9), int((tailstep * 9) / tailstep)) y = np.hstack((y1, y2, y3)) x_new = np.quantile(x_old, y) df_x = {} df_x['x'] = x_new df_x['probs'] = y x = df_x return x
Returns the quantile values x['x'] and corresponding bins x['y']. Called during metalog.__init__ method call. Args: x_old (:obj: `numpy.ndarray` of type numeric): Input data to fit the metalog distribution to. - must be an array of allowable types: int, float, numpy.int64, numpy.float64 step_len (:obj:`float`): Used to specify the bin width used to estimate the metalog. Returns: x: (:obj:`dict` with keys ['x','probs'] of type float): - x['x']: (:obj:`numpy.ndarray` of type float): * x['x'] is the quantile values found using the bin widths array x['y] - which is specified using the `step_len` parameter - x['probs']: (:obj:`numpy.ndarray` of type float): * x['probs'] is the array of bin widths specified for x['x']
https://github.com/tjefferies/pymetalog/blob/0cd332b132eb3c9117a827088f7082e346cb77d8/pymetalog/support.py#L5-L57
import pandas as pd import numpy as np
MIT License
terrainbento/terrainbento
terrainbento/base_class/stochastic_erosion_model.py
StochasticErosionModel._pre_water_erosion_steps
python
def _pre_water_erosion_steps(self): pass
Convenience function for pre-water erosion steps. If a model needs to do anything before each erosion step is run, e.g. recalculate a threshold value, that model should overwrite this function.
https://github.com/terrainbento/terrainbento/blob/3758d3526a3a134e2cee5263ccff5d51d3ea13d1/terrainbento/base_class/stochastic_erosion_model.py#L304-L311
import os import textwrap import numpy as np import scipy.stats as stats from landlab.components import PrecipitationDistribution from terrainbento.base_class import ErosionModel _STRING_LENGTH = 80 class StochasticErosionModel(ErosionModel): _required_fields = ["topographic__elevation"] def __init__( self, clock, grid, random_seed=0, record_rain=False, opt_stochastic_duration=False, mean_storm_duration=1, mean_interstorm_duration=1, mean_storm_depth=1, rainfall__shape_factor=1, number_of_sub_time_steps=1, rainfall_intermittency_factor=1, rainfall__mean_rate=1, storm_sequence_filename="storm_sequence.txt", frequency_filename="exceedance_summary.txt", **kwargs ): super().__init__(clock, grid, **kwargs) self._ensure_precip_runoff_are_vanilla() self.opt_stochastic_duration = opt_stochastic_duration if self.opt_stochastic_duration and ( "PrecipChanger" in self.boundary_handlers ): msg = ( "terrainbento StochasticErosionModel: setting " "opt_stochastic_duration=True and using the PrecipChanger " "boundary condition handler are not compatible." ) raise ValueError(msg) self.seed = int(random_seed) self.random_seed = random_seed self.frequency_filename = frequency_filename self.storm_sequence_filename = storm_sequence_filename self.mean_storm_duration = mean_storm_duration self.mean_interstorm_duration = mean_interstorm_duration self.mean_storm_depth = mean_storm_depth self.shape_factor = rainfall__shape_factor self.number_of_sub_time_steps = number_of_sub_time_steps self.rainfall_intermittency_factor = rainfall_intermittency_factor self.rainfall__mean_rate = rainfall__mean_rate if record_rain: self.record_rain = True self.rain_record = { "event_start_time": [], "event_duration": [], "rainfall_rate": [], "runoff_rate": [], } else: self.record_rain = False self.rain_record = None def calc_runoff_and_discharge(self): if self.rain_rate > 0.0 and self.infilt > 0.0: runoff = self.rain_rate - ( self.infilt * (1.0 - np.exp(-self.rain_rate / self.infilt)) ) if runoff <= 0: runoff = 0 else: runoff = self.rain_rate self.grid.at_node["surface_water__discharge"][:] = ( runoff * self.grid.at_node["drainage_area"] ) return runoff def run_for_stochastic(self, step, runtime): self.rain_generator._delta_t = step self.rain_generator._run_time = runtime for ( tr, p, ) in self.rain_generator.yield_storm_interstorm_duration_intensity(): self.rain_rate = p self.run_one_step(tr) def instantiate_rain_generator(self): if self.opt_stochastic_duration: self.rain_generator = PrecipitationDistribution( mean_storm_duration=self.mean_storm_duration, mean_interstorm_duration=self.mean_interstorm_duration, mean_storm_depth=self.mean_storm_depth, total_t=self.clock.stop, delta_t=self.clock.step, random_seed=self.seed, ) self.run_for = self.run_for_stochastic else: from scipy.special import gamma self.rain_generator = PrecipitationDistribution( mean_storm_duration=1.0, mean_interstorm_duration=1.0, mean_storm_depth=1.0, random_seed=self.seed, ) self.scale_factor = self.rainfall__mean_rate / gamma( 1.0 + (1.0 / self.shape_factor) ) if ( isinstance(self.number_of_sub_time_steps, (int, np.integer)) is False ): raise ValueError( ("number_of_sub_time_steps must be of type integer.") ) self.n_sub_steps = self.number_of_sub_time_steps def reset_random_seed(self): self.rain_generator.seed_generator(seedval=self.seed)
MIT License
bitwrap/bitwrap-io
bitwrap_io/storage/__init__.py
Database.schema_exists
python
def schema_exists(self): with self.cursor() as cur: cur.execute(sql.SQL(""" SELECT exists(select tablename from pg_tables where schemaname = %s and tablename = 'states'); """), [self.schema]) res = cur.fetchone()[0] return res
test that an event-machine schema exists
https://github.com/bitwrap/bitwrap-io/blob/cc84be5af4d37e5b711e59b12cf95b9b346c7e4f/bitwrap_io/storage/__init__.py#L76-L87
from string import Template from contextlib import contextmanager import psycopg2 from psycopg2 import sql from bitwrap_io.storage.postgres import connect class Storage(object): _pool = {} def __init__(self, schema, **kwargs): if schema in Storage._pool: self.db = Storage._pool[schema] else: self.db = Database(schema, kwargs) Storage._pool[schema] = self.db def commit(self, req): if 'payload' not in req or not req['payload']: req['payload'] = '{}' _sql = sql.SQL(""" INSERT INTO {schema}.events(oid, action, payload) VALUES(%s, %s, %s) RETURNING to_json((hash, oid, seq )::{schema}.event) as event; """).format(schema=sql.Identifier(self.db.schema)) with self.db.cursor() as cur: try: cur.execute(_sql, [req['oid'], req['action'], req['payload']]) res = cur.fetchone() return res[0] except psycopg2.IntegrityError: msg = 'INVALID_OUTPUT' except psycopg2.InternalError: msg = 'INVALID_INPUT' except psycopg2.ProgrammingError as ex: msg = str(ex).splitlines()[0] return {'oid': req['oid'], 'action': req['action'], '__err__': msg} class Database(object): def __init__(self, schema, rds_config): self.pool = connect(**rds_config) self.schema = schema self.states = States(self) self.events = Events(self) @contextmanager def cursor(self): conn = self.pool.getconn() cursor = conn.cursor() try: yield cursor conn.commit() finally: self.pool.putconn(conn)
MIT License
michaelnowotny/cocos
cocos/multi_processing/multi_core_batch_processing.py
map_combine_multicore
python
def map_combine_multicore( f: tp.Callable[..., ResultType], combination: tp.Callable[[tp.Iterable[ResultType]], ResultType], args_list: tp.Optional[tp.Sequence[tp.Sequence]] = None, kwargs_list: tp.Optional[tp.Sequence[tp.Dict[str, tp.Any]]] = None, number_of_batches: tp.Optional[int] = None, multiprocessing_pool_type: MultiprocessingPoolType = MultiprocessingPoolType.default()) -> ResultType: args_list, kwargs_list, number_of_batches = _extract_arguments_and_number_of_batches( args_list=args_list, kwargs_list=kwargs_list, number_of_batches=number_of_batches) def wrapped_f(index, *args, **kwargs) -> ResultType: return index, f(*args, **kwargs) results = [] if multiprocessing_pool_type == MultiprocessingPoolType.LOKY: from concurrent.futures import as_completed from loky import get_reusable_executor executor = get_reusable_executor(timeout=None, context='loky') futures = [executor.submit(wrapped_f, i, *args, **kwargs) for i, (args, kwargs) in enumerate(zip(args_list, kwargs_list))] for future in as_completed(futures): results.append(future.result()) elif multiprocessing_pool_type == MultiprocessingPoolType.PATHOS: from pathos.pools import ProcessPool pool = ProcessPool() futures = [pool.apipe(wrapped_f, i, *args, **kwargs) for i, (args, kwargs) in enumerate(zip(args_list, kwargs_list))] for future in futures: results.append(future.get()) else: raise ValueError(f'Multiprocessing pool type {multiprocessing_pool_type} not supported') results = sorted(results, key=lambda x: x[0]) results = [result[1] for result in results] return combination(results)
This function evaluates the function `f` on elements of `args_list` and `kwargs_list` in parallel on multiple cpu cores and aggregates results in a single step by calling the function `combination` with a list of all results. Results provided to `combination` are in the same order as they appear in `args_list` and `kwargs_list`. If the arguments for each run of 'f' are identical and they have already been applied to the function that is passed then 'args_list' and 'kwargs_list' may both be None but the argument 'number_of_batches' must be specified so the method knows how many times to run the function 'f'. Args: f: The map function to be evaluated over elements of 'args_list' and 'kwargs_list'. combination: A function that aggregates a list of all results in a single step args_list: A sequence of sequences of positional arguments. kwargs_list: A sequence of dictionaries of keyword arguments. number_of_batches: The number of function evaluations is required if 'args_list' and 'kwargs_list' are both empty. multiprocessing_pool_type: the type of multi-processing pool (see class MultiprocessingPoolType)
https://github.com/michaelnowotny/cocos/blob/3c34940d7d9eb8592a97788a5df84b8d472f2928/cocos/multi_processing/multi_core_batch_processing.py#L97-L175
import typing as tp from cocos.multi_processing.utilities import ( ResultType, MultiprocessingPoolType, _extract_arguments_and_number_of_batches ) def map_reduce_multicore( f: tp.Callable[..., ResultType], reduction: tp.Callable[[ResultType, ResultType], ResultType], initial_value: ResultType, args_list: tp.Optional[tp.Sequence[tp.Sequence]] = None, kwargs_list: tp.Optional[tp.Sequence[tp.Dict[str, tp.Any]]] = None, number_of_batches: tp.Optional[int] = None, multiprocessing_pool_type: MultiprocessingPoolType = MultiprocessingPoolType.default()) -> ResultType: args_list, kwargs_list, number_of_batches = _extract_arguments_and_number_of_batches( args_list=args_list, kwargs_list=kwargs_list, number_of_batches=number_of_batches) def wrapped_f(index, *args, **kwargs) -> ResultType: return index, f(*args, **kwargs) if multiprocessing_pool_type == MultiprocessingPoolType.LOKY: from concurrent.futures import as_completed from loky import get_reusable_executor executor = get_reusable_executor(timeout=None, context='loky') futures = [executor.submit(wrapped_f, i, *args, **kwargs) for i, (args, kwargs) in enumerate(zip(args_list, kwargs_list))] result_from_future = lambda x: x.result() elif multiprocessing_pool_type == MultiprocessingPoolType.PATHOS: from pathos.pools import ProcessPool pool = ProcessPool() futures = [pool.apipe(wrapped_f, i, *args, **kwargs) for i, (args, kwargs) in enumerate(zip(args_list, kwargs_list))] result_from_future = lambda x: x.get() else: raise ValueError(f'Multiprocessing pool type {multiprocessing_pool_type} not supported') results = [result_from_future(future) for future in futures] results = sorted(results, key=lambda x: x[0]) results = [result[1] for result in results] result = initial_value for new_result in results: result = reduction(result, new_result) return result
MIT License
wikimedia/pywikibot
pywikibot/site/_tokenwallet.py
TokenWallet.__contains__
python
def __contains__(self, key): return key in self._tokens.setdefault(self.site.user(), {})
Return True if the given token name is cached.
https://github.com/wikimedia/pywikibot/blob/5097f5b9a7ef9d39f35f17edd11faf3086a01d1d/pywikibot/site/_tokenwallet.py#L86-L88
from pywikibot import log from pywikibot.exceptions import Error class TokenWallet: def __init__(self, site): self.site = site self._tokens = {} self.failed_cache = set() def load_tokens(self, types, all=False): if self.site.user() is None: self.site.login() self._tokens.setdefault(self.site.user(), {}).update( self.site.get_tokens(types, all=all)) if all is not False: for key in types: if key not in self._tokens[self.site.user()]: self.failed_cache.add((self.site.user(), key)) def __getitem__(self, key): if self.site.user() is None: self.site.login() user_tokens = self._tokens.setdefault(self.site.user(), {}) failed_cache_key = (self.site.user(), key) if self.site.mw_version >= '1.24wmf19' and key in {'edit', 'delete', 'protect', 'move', 'block', 'unblock', 'email', 'import', 'options'}: log('Token {!r} was replaced by {!r}'.format(key, 'csrf')) key = 'csrf' try: key = self.site.validate_tokens([key])[0] except IndexError: raise Error( "Requested token '{}' is invalid on {} wiki." .format(key, self.site)) if (key not in user_tokens and failed_cache_key not in self.failed_cache): self.load_tokens([key], all=False if user_tokens else None) if key in user_tokens: return user_tokens[key] self.failed_cache.add(failed_cache_key) raise Error( "Action '{}' is not allowed for user {} on {} wiki." .format(key, self.site.user(), self.site))
MIT License
sloria/textblob
textblob/en/np_extractors.py
_normalize_tags
python
def _normalize_tags(chunk): ret = [] for word, tag in chunk: if tag == 'NP-TL' or tag == 'NP': ret.append((word, 'NNP')) continue if tag.endswith('-TL'): ret.append((word, tag[:-3])) continue if tag.endswith('S'): ret.append((word, tag[:-1])) continue ret.append((word, tag)) return ret
Normalize the corpus tags. ("NN", "NN-PL", "NNS") -> "NN"
https://github.com/sloria/textblob/blob/81791b149c358c3db2e445734e3d4ffbf3658597/textblob/en/np_extractors.py#L165-L181
from __future__ import unicode_literals, absolute_import import nltk from textblob.taggers import PatternTagger from textblob.decorators import requires_nltk_corpus from textblob.utils import tree2str, filter_insignificant from textblob.base import BaseNPExtractor class ChunkParser(nltk.ChunkParserI): def __init__(self): self._trained = False @requires_nltk_corpus def train(self): train_data = [[(t, c) for _, t, c in nltk.chunk.tree2conlltags(sent)] for sent in nltk.corpus.conll2000.chunked_sents('train.txt', chunk_types=['NP'])] unigram_tagger = nltk.UnigramTagger(train_data) self.tagger = nltk.BigramTagger(train_data, backoff=unigram_tagger) self._trained = True def parse(self, sentence): if not self._trained: self.train() pos_tags = [pos for (word, pos) in sentence] tagged_pos_tags = self.tagger.tag(pos_tags) chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags] conlltags = [(word, pos, chunktag) for ((word, pos), chunktag) in zip(sentence, chunktags)] return nltk.chunk.util.conlltags2tree(conlltags) class ConllExtractor(BaseNPExtractor): POS_TAGGER = PatternTagger() CFG = { ('NNP', 'NNP'): 'NNP', ('NN', 'NN'): 'NNI', ('NNI', 'NN'): 'NNI', ('JJ', 'JJ'): 'JJ', ('JJ', 'NN'): 'NNI', } INSIGNIFICANT_SUFFIXES = ['DT', 'CC', 'PRP$', 'PRP'] def __init__(self, parser=None): self.parser = ChunkParser() if not parser else parser def extract(self, text): sentences = nltk.tokenize.sent_tokenize(text) noun_phrases = [] for sentence in sentences: parsed = self._parse_sentence(sentence) phrases = [_normalize_tags(filter_insignificant(each, self.INSIGNIFICANT_SUFFIXES)) for each in parsed if isinstance(each, nltk.tree.Tree) and each.label() == 'NP' and len(filter_insignificant(each)) >= 1 and _is_match(each, cfg=self.CFG)] nps = [tree2str(phrase) for phrase in phrases] noun_phrases.extend(nps) return noun_phrases def _parse_sentence(self, sentence): tagged = self.POS_TAGGER.tag(sentence) return self.parser.parse(tagged) class FastNPExtractor(BaseNPExtractor): CFG = { ('NNP', 'NNP'): 'NNP', ('NN', 'NN'): 'NNI', ('NNI', 'NN'): 'NNI', ('JJ', 'JJ'): 'JJ', ('JJ', 'NN'): 'NNI', } def __init__(self): self._trained = False @requires_nltk_corpus def train(self): train_data = nltk.corpus.brown.tagged_sents(categories='news') regexp_tagger = nltk.RegexpTagger([ (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'(-|:|;)$', ':'), (r'\'*$', 'MD'), (r'(The|the|A|a|An|an)$', 'AT'), (r'.*able$', 'JJ'), (r'^[A-Z].*$', 'NNP'), (r'.*ness$', 'NN'), (r'.*ly$', 'RB'), (r'.*s$', 'NNS'), (r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*', 'NN'), ]) unigram_tagger = nltk.UnigramTagger(train_data, backoff=regexp_tagger) self.tagger = nltk.BigramTagger(train_data, backoff=unigram_tagger) self._trained = True return None def _tokenize_sentence(self, sentence): tokens = nltk.word_tokenize(sentence) return tokens def extract(self, sentence): if not self._trained: self.train() tokens = self._tokenize_sentence(sentence) tagged = self.tagger.tag(tokens) tags = _normalize_tags(tagged) merge = True while merge: merge = False for x in range(0, len(tags) - 1): t1 = tags[x] t2 = tags[x + 1] key = t1[1], t2[1] value = self.CFG.get(key, '') if value: merge = True tags.pop(x) tags.pop(x) match = '%s %s' % (t1[0], t2[0]) pos = value tags.insert(x, (match, pos)) break matches = [t[0] for t in tags if t[1] in ['NNP', 'NNI']] return matches
MIT License
campaignmonitor/createsend-python
lib/createsend/client.py
Client.get_primary_contact
python
def get_primary_contact(self): response = self._get(self.uri_for('primarycontact')) return json_to_py(response)
retrieves the primary contact for this client
https://github.com/campaignmonitor/createsend-python/blob/7399f44a90507f2c555b83d176904bb261983959/lib/createsend/client.py#L155-L158
from __future__ import absolute_import import json from createsend.createsend import CreateSendBase from createsend.utils import json_to_py class Client(CreateSendBase): def __init__(self, auth=None, client_id=None): self.client_id = client_id super(Client, self).__init__(auth) def create(self, company, timezone, country): body = { "CompanyName": company, "TimeZone": timezone, "Country": country} response = self._post("/clients.json", json.dumps(body)) self.client_id = json_to_py(response) return self.client_id def details(self): response = self._get("/clients/%s.json" % self.client_id) return json_to_py(response) def campaigns(self): response = self._get(self.uri_for("campaigns")) return json_to_py(response) def scheduled(self): response = self._get(self.uri_for("scheduled")) return json_to_py(response) def drafts(self): response = self._get(self.uri_for("drafts")) return json_to_py(response) def lists(self): response = self._get(self.uri_for("lists")) return json_to_py(response) def lists_for_email(self, email_address): params = {"email": email_address} response = self._get(self.uri_for("listsforemail"), params=params) return json_to_py(response) def segments(self): response = self._get(self.uri_for("segments")) return json_to_py(response) def suppressionlist(self, page=1, page_size=1000, order_field="email", order_direction="asc"): params = { "page": page, "pagesize": page_size, "orderfield": order_field, "orderdirection": order_direction} response = self._get(self.uri_for("suppressionlist"), params=params) return json_to_py(response) def suppress(self, email): body = { "EmailAddresses": [email] if isinstance(email, str) else email} response = self._post(self.uri_for("suppress"), json.dumps(body)) def unsuppress(self, email): params = {"email": email} response = self._put(self.uri_for("unsuppress"), body=" ", params=params) def templates(self): response = self._get(self.uri_for("templates")) return json_to_py(response) def set_basics(self, company, timezone, country): body = { "CompanyName": company, "TimeZone": timezone, "Country": country} response = self._put(self.uri_for('setbasics'), json.dumps(body)) def set_payg_billing(self, currency, can_purchase_credits, client_pays, markup_percentage, markup_on_delivery=0, markup_per_recipient=0, markup_on_design_spam_test=0): body = { "Currency": currency, "CanPurchaseCredits": can_purchase_credits, "ClientPays": client_pays, "MarkupPercentage": markup_percentage, "MarkupOnDelivery": markup_on_delivery, "MarkupPerRecipient": markup_per_recipient, "MarkupOnDesignSpamTest": markup_on_design_spam_test} response = self._put(self.uri_for('setpaygbilling'), json.dumps(body)) def set_monthly_billing(self, currency, client_pays, markup_percentage, monthly_scheme=None): body = { "Currency": currency, "ClientPays": client_pays, "MarkupPercentage": markup_percentage} if monthly_scheme is not None: body["MonthlyScheme"] = monthly_scheme response = self._put(self.uri_for( 'setmonthlybilling'), json.dumps(body)) def transfer_credits(self, credits, can_use_my_credits_when_they_run_out): body = { "Credits": credits, "CanUseMyCreditsWhenTheyRunOut": can_use_my_credits_when_they_run_out} response = self._post(self.uri_for('credits'), json.dumps(body)) return json_to_py(response) def people(self): response = self._get(self.uri_for('people')) return json_to_py(response)
MIT License
tensorflow/graphics
tensorflow_graphics/math/sampling.py
logspace_1d
python
def logspace_1d(near: TensorLike, far: TensorLike, num_samples: int, base: float = 10.0, name="logspace_1d") -> tf.Tensor: with tf.name_scope(name): near = tf.convert_to_tensor(near) far = tf.convert_to_tensor(far) shape.compare_batch_dimensions( tensors=(tf.expand_dims(near, axis=-1), tf.expand_dims(far, axis=-1)), tensor_names=("near", "far"), last_axes=-1, broadcast_compatible=True) linspace = tf.linspace(near, far, num_samples, axis=-1) return tf.math.pow(base, linspace)
Sample evenly spaced numbers from an interval on a log scale. Args: near: A tensor of shape `[A1, ... An]` containing the starting points of the sampling interval. far: A tensor of shape `[A1, ... An]` containing the ending points of the sampling interval. num_samples: The number M of points to be sampled. base: The logarithmic base. name: A name for this op that defaults to "logspace_1d". Returns: A tensor of shape `[A1, ..., An, M]` indicating the M points on the ray
https://github.com/tensorflow/graphics/blob/d0817aec7dee35635814e925a59d83955459d93c/tensorflow_graphics/math/sampling.py#L159-L188
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape from tensorflow_graphics.util.type_alias import TensorLike def regular_1d(near: TensorLike, far: TensorLike, num_samples: int, name="regular_1d") -> tf.Tensor: with tf.name_scope(name): near = tf.convert_to_tensor(near) far = tf.convert_to_tensor(far) shape.compare_batch_dimensions( tensors=(tf.expand_dims(near, axis=-1), tf.expand_dims(far, axis=-1)), tensor_names=("near", "far"), last_axes=-1, broadcast_compatible=True) return tf.linspace(near, far, num_samples, axis=-1) def regular_inverse_1d(near: TensorLike, far: TensorLike, num_samples: int, name="regular_inverse_1d") -> tf.Tensor: with tf.name_scope(name): near = tf.convert_to_tensor(near) far = tf.convert_to_tensor(far) shape.compare_batch_dimensions( tensors=(tf.expand_dims(near, axis=-1), tf.expand_dims(far, axis=-1)), tensor_names=("near", "far"), last_axes=-1, broadcast_compatible=True) return 1. / tf.linspace(1. / near, 1. / far, num_samples, axis=-1) def uniform_1d(near: TensorLike, far: TensorLike, num_samples: int, name="uniform_1d") -> tf.Tensor: with tf.name_scope(name): near = tf.convert_to_tensor(near) far = tf.convert_to_tensor(far) shape.compare_batch_dimensions( tensors=(tf.expand_dims(near, axis=-1), tf.expand_dims(far, axis=-1)), tensor_names=("near", "far"), last_axes=-1, broadcast_compatible=True) target_shape = tf.concat([tf.shape(near), [num_samples]], axis=-1) random_samples = tf.random.uniform(target_shape, minval=tf.expand_dims(near, -1), maxval=tf.expand_dims(far, -1)) return tf.sort(random_samples, axis=-1) def stratified_1d(near: TensorLike, far: TensorLike, num_samples: int, name="stratified_1d") -> tf.Tensor: with tf.name_scope(name): near = tf.convert_to_tensor(near) far = tf.convert_to_tensor(far) shape.compare_batch_dimensions( tensors=(tf.expand_dims(near, axis=-1), tf.expand_dims(far, axis=-1)), tensor_names=("near", "far"), last_axes=-1, broadcast_compatible=True) bin_borders = tf.linspace(0.0, 1.0, num_samples + 1, axis=-1) bin_below = bin_borders[..., :-1] bin_above = bin_borders[..., 1:] target_shape = tf.concat([tf.shape(near), [num_samples]], axis=-1) random_point_in_bin = tf.random.uniform(target_shape) z_values = bin_below + (bin_above - bin_below) * random_point_in_bin z_values = (tf.expand_dims(near, -1) * (1. - z_values) + tf.expand_dims(far, -1) * z_values) return z_values
Apache License 2.0
compas-dev/compas
src/compas/datastructures/mesh/transformations.py
mesh_transform
python
def mesh_transform(mesh, transformation): vertices = list(mesh.vertices()) xyz = [mesh.vertex_coordinates(vertex) for vertex in vertices] xyz[:] = transform_points(xyz, transformation) for index, vertex in enumerate(vertices): mesh.vertex_attributes(vertex, 'xyz', xyz[index])
Transform a mesh. Parameters ---------- mesh : compas.datastructures.Mesh The mesh. transformation : compas.geometry.Transformation The transformation. Notes ----- The mesh is modified in-place. Examples -------- >>> from compas.datastructures import Mesh >>> from compas.geometry import matrix_from_axis_and_angle >>> mesh = Mesh.from_polyhedron(6) >>> T = matrix_from_axis_and_angle([0, 0, 1], math.pi / 4) >>> tmesh = mesh.copy() >>> mesh_transform(tmesh, T)
https://github.com/compas-dev/compas/blob/d795a8bfe9f21ffa124d09e37e9c0ed2e3520057/src/compas/datastructures/mesh/transformations.py#L14-L42
from __future__ import print_function from __future__ import absolute_import from __future__ import division from compas.geometry import transform_points __all__ = [ 'mesh_transform', 'mesh_transformed', ]
MIT License
zeaphoo/postmodel
postmodel/ext/callflow.py
register_postmodel
python
def register_postmodel( app: CallFlow, default_db_url, extra_db_urls = {}, modules: Optional[Dict[str, List[str]]] = None, generate_schemas: bool = False, ) -> None: @app.before_start async def init_postmodel(): await Postmodel.init(default_db_url, modules=modules, extra_db_urls=extra_db_urls) await logger.info("Postmodel started.") if generate_schemas: await logger.info("Postmodel generating schema") await Postmodel.generate_schemas() @app.after_stop async def close_postmodel(): await Postmodel.close() await logger.info("Postmodel shutdown.")
Registers ``before_server_start`` and ``after_server_stop`` hooks to set-up and tear-down Postmodel inside a CallFlow webserver. You can configure using ``(db_url, modules)``. Parameters ---------- app: CallFlow app.. db_url: Use a DB_URL string. See :ref:`db_url` modules: Dictionary of ``key``: [``list_of_modules``] that defined "apps" and modules that should be discovered for models. generate_schemas: True to generate schema immediately. Only useful for dev environments
https://github.com/zeaphoo/postmodel/blob/727a0a4f12d313fc83488ebecf6368847c6c97a6/postmodel/ext/callflow.py#L10-L47
import logging from typing import Dict, List, Optional from callflow import CallFlow from postmodel import Postmodel from basepy.asynclog import logger
MIT License
migalkin/stare
data_loaders/load.py
load_wd50k_33_statements
python
def load_wd50k_33_statements(maxlen: int) -> Dict: wd50k_DIR = PARSED_DATA_DIR / 'wd50k_33' with open(wd50k_DIR / 'train_statements.pkl', 'rb') as f: train_statements = pickle.load(f) with open(wd50k_DIR / 'valid_statements.pkl', 'rb') as f: valid_statements = pickle.load(f) with open(wd50k_DIR / 'test_statements.pkl', 'rb') as f: test_statements = pickle.load(f) statement_entities, statement_predicates = _get_uniques_(train_data=train_statements, valid_data=valid_statements, test_data=test_statements) st_entities = ['__na__'] + statement_entities st_predicates = ['__na__'] + statement_predicates entoid = {pred: i for i, pred in enumerate(st_entities)} prtoid = {pred: i for i, pred in enumerate(st_predicates)} train, valid, test = [], [], [] for st in train_statements: id_st = [] for i, uri in enumerate(st): id_st.append(entoid[uri] if i % 2 is 0 else prtoid[uri]) train.append(id_st) for st in valid_statements: id_st = [] for i, uri in enumerate(st): id_st.append(entoid[uri] if i % 2 is 0 else prtoid[uri]) valid.append(id_st) for st in test_statements: id_st = [] for i, uri in enumerate(st): id_st.append(entoid[uri] if i % 2 is 0 else prtoid[uri]) test.append(id_st) train, valid, test = _pad_statements_(train, maxlen), _pad_statements_(valid, maxlen), _pad_statements_( test, maxlen) return {"train": train, "valid": valid, "test": test, "n_entities": len(st_entities), "n_relations": len(st_predicates), 'e2id': entoid, 'r2id': prtoid}
Pull up data from parsed data (thanks magic mike!) and preprocess it to death. :return: dict
https://github.com/migalkin/stare/blob/f40a5ee082d61851477e9870c21e991c7d91deb3/data_loaders/load.py#L421-L469
import json from tqdm import tqdm from functools import partial from typing import List, Union, Dict, Callable import numpy as np import pickle from utils.utils import PARSED_DATA_DIR, KNOWN_DATASETS from pathlib import Path from utils.utils_mytorch import FancyDict import random def _conv_to_our_format_(data, filter_literals=True): conv_data = [] dropped_statements = 0 dropped_quals = 0 for datum in tqdm(data): try: conv_datum = [] head, tail, rel_h, rel_t = None, None, None, None for rel, val in datum.items(): if rel[-2:] == '_h' and type(val) is str: head = val rel_h = rel[:-2] if rel[-2:] == '_t' and type(val) is str: tail = val rel_t = rel[:-2] if filter_literals and "http://" in tail: dropped_statements += 1 raise Exception assert head and tail and rel_h and rel_t, f"Weird data point. Some essentials not found. Quitting\nD:{datum}" assert rel_h == rel_t, f"Weird data point. Head and Tail rels are different. Quitting\nD: {datum}" datum.pop(rel_h + '_h') datum.pop(rel_t + '_t') datum.pop('N') conv_datum += [head, rel_h, tail] for k, v in datum.items(): for _v in v: if filter_literals and "http://" in _v: dropped_quals += 1 continue conv_datum += [k, _v] conv_data.append(tuple(conv_datum)) except Exception: continue print(f"\n Dropped {dropped_statements} statements and {dropped_quals} quals with literals \n ") return conv_data def _conv_to_our_quint_format_(data, filter_literals=True): conv_data = [] dropped_statements = 0 dropped_quals = 0 for datum in tqdm(data): try: conv_datum = [] head, tail, rel_h, rel_t = None, None, None, None for rel, val in datum.items(): if rel[-2:] == '_h' and type(val) is str: head = val rel_h = rel[:-2] if rel[-2:] == '_t' and type(val) is str: tail = val rel_t = rel[:-2] if filter_literals and "http://" in tail: dropped_statements += 1 raise Exception assert head and tail and rel_h and rel_t, f"Weird data point. Some essentials not found. Quitting\nD:{datum}" assert rel_h == rel_t, f"Weird data point. Head and Tail rels are different. Quitting\nD: {datum}" datum.pop(rel_h + '_h') datum.pop(rel_t + '_t') datum.pop('N') conv_datum += [head, rel_h, tail, None, None] if len(datum.items()) == 0: conv_data.append(tuple(conv_datum)) else: for k, v in datum.items(): conv_datum[3] = k for _v in v: if filter_literals and "http://" in _v: dropped_quals += 1 continue conv_datum[4] = _v conv_data.append(tuple(conv_datum)) except Exception: continue print(f"\n Dropped {dropped_statements} statements and {dropped_quals} quals with literals \n ") return conv_data def _conv_jf17k_to_quints(data): result = [] for statement in data: ents = statement[0::2] rels = statement[1::2] if len(rels) == 1: result.append(statement) else: s, p, o = statement[0], statement[1], statement[2] qual_rel = rels[1:] qual_ent = ents[2:] for i in range(len(qual_rel)): result.append([s, p, o, qual_rel[i], qual_ent[i]]) return result def _get_uniques_(train_data: List[tuple], valid_data: List[tuple], test_data: List[tuple]) -> ( list, list): statement_entities, statement_predicates = [], [] for statement in train_data + valid_data + test_data: statement_entities += statement[::2] statement_predicates += statement[1::2] statement_entities = sorted(list(set(statement_entities))) statement_predicates = sorted(list(set(statement_predicates))) return statement_entities, statement_predicates def _pad_statements_(data: List[list], maxlen: int) -> List[list]: result = [ statement + [0] * (maxlen - len(statement)) if len(statement) < maxlen else statement[ :maxlen] for statement in data] return result def clean_literals(data: List[list]) -> List[list]: result = [] for triple in data: if "http://" not in triple[2]: result.append(triple) return result def remove_dups(data: List[list]) -> List[list]: new_l = [] for datum in tqdm(data): if datum not in new_l: new_l.append(datum) return new_l def load_wd50k_quints() -> Dict: wd50k_DIR = PARSED_DATA_DIR / 'wd50k' with open(wd50k_DIR / 'train_quints.pkl', 'rb') as f: train_quints = pickle.load(f) with open(wd50k_DIR / 'valid_quints.pkl', 'rb') as f: valid_quints = pickle.load(f) with open(wd50k_DIR / 'test_quints.pkl', 'rb') as f: test_quints = pickle.load(f) quints_entities, quints_predicates = [], [] for quint in train_quints + valid_quints + test_quints: quints_entities += [quint[0], quint[2]] if quint[4]: quints_entities.append(quint[4]) quints_predicates.append(quint[1]) if quint[3]: quints_predicates.append(quint[3]) quints_entities = sorted(list(set(quints_entities))) quints_predicates = sorted(list(set(quints_predicates))) q_entities = ['__na__'] + quints_entities q_predicates = ['__na__'] + quints_predicates entoid = {pred: i for i, pred in enumerate(q_entities)} prtoid = {pred: i for i, pred in enumerate(q_predicates)} train = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]], prtoid[q[3]] if q[3] is not None else prtoid['__na__'], entoid[q[4]] if q[4] is not None else entoid['__na__']] for q in train_quints] valid = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]], prtoid[q[3]] if q[3] is not None else prtoid['__na__'], entoid[q[4]] if q[4] is not None else entoid['__na__']] for q in valid_quints] test = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]], prtoid[q[3]] if q[3] is not None else prtoid['__na__'], entoid[q[4]] if q[4] is not None else entoid['__na__']] for q in test_quints] return {"train": train, "valid": valid, "test": test, "n_entities": len(q_entities), "n_relations": len(q_predicates), 'e2id': entoid, 'r2id': prtoid} def load_wd50k_triples() -> Dict: wd50k_DIR = PARSED_DATA_DIR / 'wd50k' with open(wd50k_DIR / 'train_triples.pkl', 'rb') as f: train_triples = pickle.load(f) with open(wd50k_DIR / 'valid_triples.pkl', 'rb') as f: valid_triples = pickle.load(f) with open(wd50k_DIR / 'test_triples.pkl', 'rb') as f: test_triples = pickle.load(f) triples_entities, triples_predicates = [], [] for triple in train_triples + valid_triples + test_triples: triples_entities += [triple[0], triple[2]] triples_predicates.append(triple[1]) triples_entities = ['__na__'] + sorted(list(set(triples_entities))) triples_predicates = ['__na__'] + sorted(list(set(triples_predicates))) entoid = {pred: i for i, pred in enumerate(triples_entities)} prtoid = {pred: i for i, pred in enumerate(triples_predicates)} train = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]]] for q in train_triples] valid = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]]] for q in valid_triples] test = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]]] for q in test_triples] return {"train": train, "valid": valid, "test": test, "n_entities": len(triples_entities), "n_relations": len(triples_predicates), 'e2id': entoid, 'r2id': prtoid} def load_wd50k_statements(maxlen: int) -> Dict: wd50k_DIR = PARSED_DATA_DIR / 'wd50k' with open(wd50k_DIR / 'train_statements.pkl', 'rb') as f: train_statements = pickle.load(f) with open(wd50k_DIR / 'valid_statements.pkl', 'rb') as f: valid_statements = pickle.load(f) with open(wd50k_DIR / 'test_statements.pkl', 'rb') as f: test_statements = pickle.load(f) statement_entities, statement_predicates = _get_uniques_(train_data=train_statements, valid_data=valid_statements, test_data=test_statements) st_entities = ['__na__'] + statement_entities st_predicates = ['__na__'] + statement_predicates entoid = {pred: i for i, pred in enumerate(st_entities)} prtoid = {pred: i for i, pred in enumerate(st_predicates)} train, valid, test = [], [], [] for st in train_statements: id_st = [] for i, uri in enumerate(st): id_st.append(entoid[uri] if i % 2 is 0 else prtoid[uri]) train.append(id_st) for st in valid_statements: id_st = [] for i, uri in enumerate(st): id_st.append(entoid[uri] if i % 2 is 0 else prtoid[uri]) valid.append(id_st) for st in test_statements: id_st = [] for i, uri in enumerate(st): id_st.append(entoid[uri] if i % 2 is 0 else prtoid[uri]) test.append(id_st) train, valid, test = _pad_statements_(train, maxlen), _pad_statements_(valid, maxlen), _pad_statements_( test, maxlen) return {"train": train, "valid": valid, "test": test, "n_entities": len(st_entities), "n_relations": len(st_predicates), 'e2id': entoid, 'r2id': prtoid} def load_wd50k_33_quints() -> Dict: wd50k_DIR = PARSED_DATA_DIR / 'wd50k_33' with open(wd50k_DIR / 'train_quints.pkl', 'rb') as f: train_quints = pickle.load(f) with open(wd50k_DIR / 'valid_quints.pkl', 'rb') as f: valid_quints = pickle.load(f) with open(wd50k_DIR / 'test_quints.pkl', 'rb') as f: test_quints = pickle.load(f) quints_entities, quints_predicates = [], [] for quint in train_quints + valid_quints + test_quints: quints_entities += [quint[0], quint[2]] if quint[4]: quints_entities.append(quint[4]) quints_predicates.append(quint[1]) if quint[3]: quints_predicates.append(quint[3]) quints_entities = sorted(list(set(quints_entities))) quints_predicates = sorted(list(set(quints_predicates))) q_entities = ['__na__'] + quints_entities q_predicates = ['__na__'] + quints_predicates entoid = {pred: i for i, pred in enumerate(q_entities)} prtoid = {pred: i for i, pred in enumerate(q_predicates)} train = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]], prtoid[q[3]] if q[3] is not None else prtoid['__na__'], entoid[q[4]] if q[4] is not None else entoid['__na__']] for q in train_quints] valid = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]], prtoid[q[3]] if q[3] is not None else prtoid['__na__'], entoid[q[4]] if q[4] is not None else entoid['__na__']] for q in valid_quints] test = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]], prtoid[q[3]] if q[3] is not None else prtoid['__na__'], entoid[q[4]] if q[4] is not None else entoid['__na__']] for q in test_quints] return {"train": train, "valid": valid, "test": test, "n_entities": len(q_entities), "n_relations": len(q_predicates), 'e2id': entoid, 'r2id': prtoid} def load_wd50k_33_triples() -> Dict: wd50k_DIR = PARSED_DATA_DIR / 'wd50k_33' with open(wd50k_DIR / 'train_triples.pkl', 'rb') as f: train_triples = pickle.load(f) with open(wd50k_DIR / 'valid_triples.pkl', 'rb') as f: valid_triples = pickle.load(f) with open(wd50k_DIR / 'test_triples.pkl', 'rb') as f: test_triples = pickle.load(f) triples_entities, triples_predicates = [], [] for triple in train_triples + valid_triples + test_triples: triples_entities += [triple[0], triple[2]] triples_predicates.append(triple[1]) triples_entities = ['__na__'] + sorted(list(set(triples_entities))) triples_predicates = ['__na__'] + sorted(list(set(triples_predicates))) entoid = {pred: i for i, pred in enumerate(triples_entities)} prtoid = {pred: i for i, pred in enumerate(triples_predicates)} train = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]]] for q in train_triples] valid = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]]] for q in valid_triples] test = [[entoid[q[0]], prtoid[q[1]], entoid[q[2]]] for q in test_triples] return {"train": train, "valid": valid, "test": test, "n_entities": len(triples_entities), "n_relations": len(triples_predicates), 'e2id': entoid, 'r2id': prtoid}
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_object_meta.py
V1ObjectMeta.finalizers
python
def finalizers(self, finalizers): self._finalizers = finalizers
Sets the finalizers of this V1ObjectMeta. Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. # noqa: E501 :param finalizers: The finalizers of this V1ObjectMeta. # noqa: E501 :type: list[str]
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_object_meta.py#L257-L266
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1ObjectMeta(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'annotations': 'dict(str, str)', 'cluster_name': 'str', 'creation_timestamp': 'datetime', 'deletion_grace_period_seconds': 'int', 'deletion_timestamp': 'datetime', 'finalizers': 'list[str]', 'generate_name': 'str', 'generation': 'int', 'labels': 'dict(str, str)', 'managed_fields': 'list[V1ManagedFieldsEntry]', 'name': 'str', 'namespace': 'str', 'owner_references': 'list[V1OwnerReference]', 'resource_version': 'str', 'self_link': 'str', 'uid': 'str' } attribute_map = { 'annotations': 'annotations', 'cluster_name': 'clusterName', 'creation_timestamp': 'creationTimestamp', 'deletion_grace_period_seconds': 'deletionGracePeriodSeconds', 'deletion_timestamp': 'deletionTimestamp', 'finalizers': 'finalizers', 'generate_name': 'generateName', 'generation': 'generation', 'labels': 'labels', 'managed_fields': 'managedFields', 'name': 'name', 'namespace': 'namespace', 'owner_references': 'ownerReferences', 'resource_version': 'resourceVersion', 'self_link': 'selfLink', 'uid': 'uid' } def __init__(self, annotations=None, cluster_name=None, creation_timestamp=None, deletion_grace_period_seconds=None, deletion_timestamp=None, finalizers=None, generate_name=None, generation=None, labels=None, managed_fields=None, name=None, namespace=None, owner_references=None, resource_version=None, self_link=None, uid=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._annotations = None self._cluster_name = None self._creation_timestamp = None self._deletion_grace_period_seconds = None self._deletion_timestamp = None self._finalizers = None self._generate_name = None self._generation = None self._labels = None self._managed_fields = None self._name = None self._namespace = None self._owner_references = None self._resource_version = None self._self_link = None self._uid = None self.discriminator = None if annotations is not None: self.annotations = annotations if cluster_name is not None: self.cluster_name = cluster_name if creation_timestamp is not None: self.creation_timestamp = creation_timestamp if deletion_grace_period_seconds is not None: self.deletion_grace_period_seconds = deletion_grace_period_seconds if deletion_timestamp is not None: self.deletion_timestamp = deletion_timestamp if finalizers is not None: self.finalizers = finalizers if generate_name is not None: self.generate_name = generate_name if generation is not None: self.generation = generation if labels is not None: self.labels = labels if managed_fields is not None: self.managed_fields = managed_fields if name is not None: self.name = name if namespace is not None: self.namespace = namespace if owner_references is not None: self.owner_references = owner_references if resource_version is not None: self.resource_version = resource_version if self_link is not None: self.self_link = self_link if uid is not None: self.uid = uid @property def annotations(self): return self._annotations @annotations.setter def annotations(self, annotations): self._annotations = annotations @property def cluster_name(self): return self._cluster_name @cluster_name.setter def cluster_name(self, cluster_name): self._cluster_name = cluster_name @property def creation_timestamp(self): return self._creation_timestamp @creation_timestamp.setter def creation_timestamp(self, creation_timestamp): self._creation_timestamp = creation_timestamp @property def deletion_grace_period_seconds(self): return self._deletion_grace_period_seconds @deletion_grace_period_seconds.setter def deletion_grace_period_seconds(self, deletion_grace_period_seconds): self._deletion_grace_period_seconds = deletion_grace_period_seconds @property def deletion_timestamp(self): return self._deletion_timestamp @deletion_timestamp.setter def deletion_timestamp(self, deletion_timestamp): self._deletion_timestamp = deletion_timestamp @property def finalizers(self): return self._finalizers @finalizers.setter
Apache License 2.0
target/grease
tgt_grease/enterprise/Model/KafkaSource.py
KafkaSource.get_configs
python
def get_configs(self): self.ioc.getLogger().info("Kafka configs loaded") return self.conf.get_source('kafka')
Gets all Configs with the source 'kafka' Returns: list[dict]: A list of all kafka config dicts
https://github.com/target/grease/blob/7ebf3df71d5c80a8ed9df44d9b64b735a9d0f899/tgt_grease/enterprise/Model/KafkaSource.py#L362-L370
import json from time import time import threading from multiprocessing import Pipe import kafka from kafka import KafkaConsumer from tgt_grease.core import GreaseContainer from tgt_grease.enterprise.Model.CentralScheduling import Scheduling from .Configuration import PrototypeConfig MIN_BACKLOG = 50 MAX_BACKLOG = 200 SLEEP_TIME = 5 MAX_CONSUMERS = 32 class KafkaSource(object): def __init__(self, ioc=None): if ioc and isinstance(ioc, GreaseContainer): self.ioc = ioc else: self.ioc = GreaseContainer() self.conf = PrototypeConfig(self.ioc) self.configs = [] def run(self, config=None): if config: self.configs = [config] else: self.configs = self.get_configs() if not self.validate_configs(self.configs): self.ioc.getLogger().error("One or more Kafka Configs are invalid, stopping.") return False threads = [] for conf in self.configs: threads.append(self.create_consumer_manager_thread(conf)) while threads: threads = list(filter(lambda x: x.is_alive(), threads)) self.ioc.getLogger().critical("All Kafka consumer managers have died, stopping.") return False def create_consumer_manager_thread(self, config): KafkaSource.sleep(SLEEP_TIME) thread = threading.Thread(target=KafkaSource.consumer_manager, args=(self.ioc, config,)) thread.daemon = False thread.start() self.ioc.getLogger().info("Kafka consumer manager thread started for config: {0}".format(config.get("name"))) return thread @staticmethod def consumer_manager(ioc, config): monitor_consumer = KafkaSource.create_consumer(ioc, config) threads = [KafkaSource.create_consumer_thread(ioc, config)] while threads: KafkaSource.reallocate_consumers(ioc, config, monitor_consumer, threads) threads = list(filter(lambda x: x[0].is_alive(), threads)) return False @staticmethod def create_consumer_thread(ioc, config): parent_conn, child_conn = Pipe() thread = threading.Thread(target=KafkaSource.consume, args=(ioc, config, child_conn,)) thread.daemon = True thread.start() ioc.getLogger().info("Kafka consumer thread started for config: {0}".format(config.get("name"))) return thread, parent_conn @staticmethod def consume(ioc, config, pipe): consumer = KafkaSource.create_consumer(ioc, config) for msg in consumer: if pipe.poll(): ioc.getLogger().trace("Kill signal received, stopping", trace=True) return False message_dict = KafkaSource.parse_message(ioc, config, msg) if message_dict: KafkaSource.send_to_scheduling(ioc, config, message_dict) return False @staticmethod def sleep(sleep_sec): wake_time = time() + sleep_sec while time() < wake_time: continue @staticmethod def create_consumer(ioc, config): consumer = None while not consumer: try: consumer = KafkaConsumer( group_id=config.get('name'), *config.get('topics'), **{'bootstrap_servers': ",".join(config.get('servers'))} ) except kafka.errors.NoBrokersAvailable: ioc.getLogger().error("No Kafka brokers available for config: {0}, retrying.".format(config.get('name'))) KafkaSource.sleep(SLEEP_TIME) ioc.getLogger().info("Kafka consumer created under group_id: {0}".format(config.get('name'))) KafkaSource.sleep(SLEEP_TIME) return consumer @staticmethod def parse_message(ioc, config, message): try: message = json.loads(message.value, strict=False) ioc.getLogger().trace("Message successfully loaded", trace=True) except ValueError: ioc.getLogger().trace("Failed to unload message", trace=True) return {} final = {} for key, alias in config.get("key_aliases", {}).items(): pointer = message for sub_key in key.split(config.get("key_sep", ".")): if not isinstance(pointer, dict) or sub_key not in pointer: ioc.getLogger().trace("Subkey: {0} missing from message".format(sub_key), trace=True) return {} pointer = pointer[sub_key] final[alias] = str(pointer) ioc.getLogger().trace("Message succesfully parsed", trace=True) return final @staticmethod def reallocate_consumers(ioc, config, monitor_consumer, threads): min_backlog = config.get("min_backlog", MIN_BACKLOG) max_backlog = config.get("max_backlog", MAX_BACKLOG) max_consumers = config.get("max_consumers", MAX_CONSUMERS) backlog1 = KafkaSource.get_backlog(ioc, monitor_consumer) KafkaSource.sleep(SLEEP_TIME) backlog2 = KafkaSource.get_backlog(ioc, monitor_consumer) if backlog1 > max_backlog and backlog2 > max_backlog and len(threads) < max_consumers: threads.append(KafkaSource.create_consumer_thread(ioc, config)) ioc.getLogger().info("Backlog max reached, spawning a new consumer for {0}".format(config.get('name')), verbose=True) return 1 elif backlog1 <= min_backlog and backlog2 <= min_backlog and len(threads) > 1: KafkaSource.kill_consumer_thread(ioc, threads[0]) ioc.getLogger().info("Backlog min reached, killing a consumer for {0}".format(config.get('name')), verbose=True) return -1 ioc.getLogger().info("No reallocation needed for {0}".format(config.get('name'))) return 0 @staticmethod def kill_consumer_thread(ioc, thread_tup): thread_tup[1].send("STOP") ioc.getLogger().trace("Kill signal sent to consumer thread", trace=True) KafkaSource.sleep(SLEEP_TIME) @staticmethod def get_backlog(ioc, consumer): if not consumer.assignment(): ioc.getLogger().trace("Assigning consumer to topic", trace=True) consumer.poll() partitions = consumer.assignment() if not partitions: ioc.getLogger().error("No partitions found for kafka consumer") return -1. try: current_offsets = [consumer.position(part) for part in partitions] end_offsets = list(consumer.end_offsets(partitions).values()) except kafka.errors.KafkaTimeoutError: ioc.getLogger().error("KafkaTimeout during backlog check") return -1. except kafka.errors.UnsupportedVersionError: ioc.getLogger().error("This version of kafka does not support backlog lookups") return -1. if not current_offsets or not end_offsets or len(current_offsets) != len(end_offsets): ioc.getLogger().error("Backlog check failed for kafka consumer - invalid offsets") return -1. return float(sum(end_offsets) - sum(current_offsets)) / len(partitions) @staticmethod def send_to_scheduling(ioc, config, message): scheduler = Scheduling(ioc) if not message: return False if scheduler.scheduleDetection(config.get('source'), config.get('name'), message): ioc.getLogger().trace( "Data scheduled for detection from source [{0}]".format(config.get('source')), trace=True ) return True else: ioc.getLogger().error("Scheduling failed for kafka source document!", notify=False) return False
MIT License
magenta/ddsp
ddsp/spectral_ops.py
compute_mfcc
python
def compute_mfcc(audio, lo_hz=20.0, hi_hz=8000.0, fft_size=1024, mel_bins=128, mfcc_bins=13, overlap=0.75, pad_end=True, sample_rate=16000): logmel = compute_logmel( audio, lo_hz=lo_hz, hi_hz=hi_hz, bins=mel_bins, fft_size=fft_size, overlap=overlap, pad_end=pad_end, sample_rate=sample_rate) mfccs = tf.signal.mfccs_from_log_mel_spectrograms(logmel) return mfccs[..., :mfcc_bins]
Calculate Mel-frequency Cepstral Coefficients.
https://github.com/magenta/ddsp/blob/56266e9c255019df050a3c20255caa2beaa912ac/ddsp/spectral_ops.py#L121-L141
import crepe from ddsp.core import safe_log from ddsp.core import tf_float32 import gin import librosa import numpy as np import tensorflow.compat.v2 as tf CREPE_SAMPLE_RATE = 16000 _CREPE_FRAME_SIZE = 1024 F0_RANGE = 127.0 LD_RANGE = 120.0 def stft(audio, frame_size=2048, overlap=0.75, pad_end=True): assert frame_size * overlap % 2.0 == 0.0 audio = tf_float32(audio) if len(audio.shape) == 3: audio = tf.squeeze(audio, axis=-1) s = tf.signal.stft( signals=audio, frame_length=int(frame_size), frame_step=int(frame_size * (1.0 - overlap)), fft_length=int(frame_size), pad_end=pad_end) return s def stft_np(audio, frame_size=2048, overlap=0.75, pad_end=True): assert frame_size * overlap % 2.0 == 0.0 hop_size = int(frame_size * (1.0 - overlap)) is_2d = (len(audio.shape) == 2) if pad_end: n_samples_initial = int(audio.shape[-1]) n_frames = int(np.ceil(n_samples_initial / hop_size)) n_samples_final = (n_frames - 1) * hop_size + frame_size pad = n_samples_final - n_samples_initial padding = ((0, 0), (0, pad)) if is_2d else ((0, pad),) audio = np.pad(audio, padding, 'constant') def stft_fn(y): return librosa.stft(y=y, n_fft=int(frame_size), hop_length=hop_size, center=False).T s = np.stack([stft_fn(a) for a in audio]) if is_2d else stft_fn(audio) return s @gin.register def compute_mag(audio, size=2048, overlap=0.75, pad_end=True): mag = tf.abs(stft(audio, frame_size=size, overlap=overlap, pad_end=pad_end)) return tf_float32(mag) @gin.register def compute_mel(audio, lo_hz=0.0, hi_hz=8000.0, bins=64, fft_size=2048, overlap=0.75, pad_end=True, sample_rate=16000): mag = compute_mag(audio, fft_size, overlap, pad_end) num_spectrogram_bins = int(mag.shape[-1]) linear_to_mel_matrix = tf.signal.linear_to_mel_weight_matrix( bins, num_spectrogram_bins, sample_rate, lo_hz, hi_hz) mel = tf.tensordot(mag, linear_to_mel_matrix, 1) mel.set_shape(mag.shape[:-1].concatenate(linear_to_mel_matrix.shape[-1:])) return mel @gin.register def compute_logmag(audio, size=2048, overlap=0.75, pad_end=True): return safe_log(compute_mag(audio, size, overlap, pad_end)) @gin.register def compute_logmel(audio, lo_hz=80.0, hi_hz=7600.0, bins=64, fft_size=2048, overlap=0.75, pad_end=True, sample_rate=16000): mel = compute_mel(audio, lo_hz, hi_hz, bins, fft_size, overlap, pad_end, sample_rate) return safe_log(mel) @gin.register
Apache License 2.0
mrgambal/vulyk
vulyk/bootstrap/__init__.py
init_app
python
def init_app(name): key = 'app' if not hasattr(init_app, key): app = flask.Flask(name) app.config.from_object('vulyk.settings') try: app.config.from_object('local_settings') except ImportError: pass app.template_folder = app.config.get('TEMPLATES_FOLDER', 'templates') app.static_folder = app.config.get('STATIC_FOLDER', 'static') _logging.init_logger(app=app) app.logger.info('STARTING.') db = MongoEngine(app) app.logger.debug('Database is available at %s:%s', app.config['MONGODB_SETTINGS'].get('HOST', 'localhost'), app.config['MONGODB_SETTINGS'].get('PORT', 27017)) _assets.init(app) _social_login.init_social_login(app, db) if app.config.get('ENABLE_ADMIN', False): from . import _admin app.admin = _admin.init_admin(app) _blueprints.init_blueprints(app) setattr(init_app, key, app) app.logger.info('Vulyk bootstrapping complete.') return getattr(init_app, key)
:param name: application alias :type name: str :return: Bootstrapped cached application instance :rtype: flask.Flask
https://github.com/mrgambal/vulyk/blob/4ea617bb9a1c4778ce6dfa084c53e2667d037f67/vulyk/bootstrap/__init__.py#L20-L66
import flask from flask_mongoengine import MongoEngine from . import _assets, _logging, _social_login, _blueprints from ._tasks import init_plugins __all__ = [ 'init_app', 'init_plugins' ]
BSD 3-Clause New or Revised License
dials/dials
algorithms/refinement/parameterisation/scan_varying_prediction_parameters.py
ScanVaryingPredictionParameterisation._get_goniometer_parameterisation
python
def _get_goniometer_parameterisation(self, experiment_id): param_set = self._exp_to_param[experiment_id] gp = None if param_set.gonio_param is not None: gp = self._goniometer_parameterisations[param_set.gonio_param] return gp
Return the goniometer parameterisation for the requested experiment number (or None if the goniometer in that experiment is not parameterised)
https://github.com/dials/dials/blob/a2cb71bf410e179b92554bcce2e21388e1dc25d1/algorithms/refinement/parameterisation/scan_varying_prediction_parameters.py#L227-L236
import math from collections import namedtuple from scitbx import matrix from dials.algorithms.refinement.parameterisation.prediction_parameters import ( SparseGradientVectorMixin, XYPhiPredictionParameterisation, ) from dials.array_family import flex class StateDerivativeCache: def __init__(self, parameterisations=None): if parameterisations is None: parameterisations = [] self._cache = dict.fromkeys(parameterisations) self._Pair = namedtuple("Pair", ["derivative", "iselection"]) self.clear() self._nref = 0 def build_gradients(self, parameterisation, isel=None, imatch=None): entry = self._cache[parameterisation] shape = None for e in entry: if e: shape = e[0].derivative.n break if shape is None: raise TypeError("No model state derivatives found") if shape == (3, 1): arr_type = flex.vec3_double null = (0, 0, 0) elif shape == (3, 3): arr_type = flex.mat3_double null = (0, 0, 0, 0, 0, 0, 0, 0, 0) else: raise TypeError("Unrecognised model state derivative type") for p_data in entry: ds_dp = arr_type(self._nref, null) for pair in p_data: ds_dp.set_selected(pair.iselection, pair.derivative) if imatch is not None: ds_dp = ds_dp.select(imatch) if isel is not None: ds_dp = ds_dp.select(isel) yield ds_dp def clear(self): for p in self._cache: self._cache[p] = [[] for i in range(p.num_free())] def append(self, parameterisation, iparam, derivative, iselection): l1 = self._cache[parameterisation] l2 = l1[iparam] l2.append(self._Pair(derivative, iselection)) @property def nref(self): return self._nref @nref.setter def nref(self, value): self._nref = value class ScanVaryingPredictionParameterisation(XYPhiPredictionParameterisation): def __init__( self, experiments, detector_parameterisations=None, beam_parameterisations=None, xl_orientation_parameterisations=None, xl_unit_cell_parameterisations=None, goniometer_parameterisations=None, ): if detector_parameterisations is None: detector_parameterisations = [] if beam_parameterisations is None: beam_parameterisations = [] if xl_orientation_parameterisations is None: xl_orientation_parameterisations = [] if xl_unit_cell_parameterisations is None: xl_unit_cell_parameterisations = [] if goniometer_parameterisations is None: goniometer_parameterisations = [] self._varying_detectors = any( hasattr(p, "num_sets") for p in detector_parameterisations ) self._varying_beams = any( hasattr(p, "num_sets") for p in beam_parameterisations ) self._varying_xl_orientations = any( hasattr(p, "num_sets") for p in xl_orientation_parameterisations ) self._varying_xl_unit_cells = any( hasattr(p, "num_sets") for p in xl_unit_cell_parameterisations ) self._varying_goniometers = any( hasattr(p, "num_sets") for p in goniometer_parameterisations ) to_cache = [] if self._varying_detectors: to_cache.extend(detector_parameterisations) if self._varying_beams: to_cache.extend(beam_parameterisations) if self._varying_xl_orientations: to_cache.extend(xl_orientation_parameterisations) if self._varying_xl_unit_cells: to_cache.extend(xl_unit_cell_parameterisations) if self._varying_goniometers: to_cache.extend(goniometer_parameterisations) self._derivative_cache = StateDerivativeCache(to_cache) super().__init__( experiments, detector_parameterisations=detector_parameterisations, beam_parameterisations=beam_parameterisations, xl_orientation_parameterisations=xl_orientation_parameterisations, xl_unit_cell_parameterisations=xl_unit_cell_parameterisations, goniometer_parameterisations=goniometer_parameterisations, ) self.set_scan_varying_errors = False def _get_xl_orientation_parameterisation(self, experiment_id): param_set = self._exp_to_param[experiment_id] xl_op = None if param_set.xl_ori_param is not None: xl_op = self._xl_orientation_parameterisations[param_set.xl_ori_param] return xl_op def _get_xl_unit_cell_parameterisation(self, experiment_id): param_set = self._exp_to_param[experiment_id] xl_ucp = None if param_set.xl_uc_param is not None: xl_ucp = self._xl_unit_cell_parameterisations[param_set.xl_uc_param] return xl_ucp def _get_beam_parameterisation(self, experiment_id): param_set = self._exp_to_param[experiment_id] bp = None if param_set.beam_param is not None: bp = self._beam_parameterisations[param_set.beam_param] return bp def _get_detector_parameterisation(self, experiment_id): param_set = self._exp_to_param[experiment_id] dp = None if param_set.det_param is not None: dp = self._detector_parameterisations[param_set.det_param] return dp
BSD 3-Clause New or Revised License
unidata/siphon
siphon/catalog.py
IndexableMapping.__getitem__
python
def __getitem__(self, item): try: item + '' return super(IndexableMapping, self).__getitem__(item) except TypeError: return list(self.values())[item]
Return an item either by index or name.
https://github.com/unidata/siphon/blob/feb7f60260d157c1c282469f755c3998ef6a6e0f/siphon/catalog.py#L32-L38
from collections import OrderedDict from datetime import datetime import logging import re import warnings import xml.etree.ElementTree as ET try: from urlparse import urljoin, urlparse except ImportError: from urllib.parse import urljoin, urlparse from .http_util import session_manager from .metadata import TDSCatalogMetadata logging.basicConfig(level=logging.ERROR) log = logging.getLogger(__name__) class IndexableMapping(OrderedDict):
BSD 3-Clause New or Revised License
henniggroup/gasp-python
gasp/development.py
RedundancyGuard.set_all_to_defaults
python
def set_all_to_defaults(self): self.lattice_length_tol = self.default_lattice_length_tol self.lattice_angle_tol = self.default_lattice_angle_tol self.site_tol = self.default_site_tol self.use_primitive_cell = self.default_use_primitive_cell self.attempt_supercell = self.default_attempt_supercell self.rmsd_tol = self.default_rmsd_tol self.epa_diff = self.default_epa_diff
Sets all the redundancy parameters to default values.
https://github.com/henniggroup/gasp-python/blob/0c8d993c82e0e1c69a05b3c34bbb2fcbbdbb7f07/gasp/development.py#L882-L893
from __future__ import division, unicode_literals, print_function from pymatgen.core.composition import Composition from pymatgen.core.periodic_table import Element, DummySpecie from pymatgen.core.structure import Molecule from pymatgen.analysis.phase_diagram import CompoundPhaseDiagram from pymatgen.analysis.phase_diagram import PDEntry from pymatgen.analysis.structure_matcher import StructureMatcher from pymatgen.analysis.molecule_matcher import IsomorphismMolAtomMapper, MoleculeMatcher from pymatgen.analysis.structure_matcher import ElementComparator try: import openbabel as ob except ImportError: ob = None import warnings import math class Constraints(object): def __init__(self, constraints_parameters, composition_space): if composition_space.objective_function == 'epa': self.default_min_num_atoms = max( 2, int(composition_space.endpoints[0].num_atoms)) else: self.default_min_num_atoms = 2 self.default_max_num_atoms = 30 self.default_min_lattice_length = 0.5 self.default_max_lattice_length = 20 self.default_min_lattice_angle = 40 self.default_max_lattice_angle = 140 self.default_allow_endpoints = True self.default_mid_factor = 0.6 if constraints_parameters in (None, 'default'): self.set_all_to_defaults(composition_space) else: if 'min_num_atoms' not in constraints_parameters: self.min_num_atoms = self.default_min_num_atoms elif constraints_parameters['min_num_atoms'] in (None, 'default'): self.min_num_atoms = self.default_min_num_atoms else: self.min_num_atoms = constraints_parameters['min_num_atoms'] if 'max_num_atoms' not in constraints_parameters: self.max_num_atoms = self.default_max_num_atoms elif constraints_parameters['max_num_atoms'] in (None, 'default'): self.max_num_atoms = self.default_max_num_atoms else: self.max_num_atoms = constraints_parameters['max_num_atoms'] if 'min_lattice_length' not in constraints_parameters: self.min_lattice_length = self.default_min_lattice_length elif constraints_parameters['min_lattice_length'] in (None, 'default'): self.min_lattice_length = self.default_min_lattice_length else: self.min_lattice_length = constraints_parameters[ 'min_lattice_length'] if 'max_lattice_length' not in constraints_parameters: self.max_lattice_length = self.default_max_lattice_length elif constraints_parameters['max_lattice_length'] in (None, 'default'): self.max_lattice_length = self.default_max_lattice_length else: self.max_lattice_length = constraints_parameters['max_lattice_length'] if 'min_lattice_angle' not in constraints_parameters: self.min_lattice_angle = self.default_min_lattice_angle elif constraints_parameters['min_lattice_angle'] in (None, 'default'): self.min_lattice_angle = self.default_min_lattice_angle else: self.min_lattice_angle = constraints_parameters['min_lattice_angle'] if 'max_lattice_angle' not in constraints_parameters: self.max_lattice_angle = self.default_max_lattice_angle elif constraints_parameters['max_lattice_angle'] in (None, 'default'): self.max_lattice_angle = self.default_max_lattice_angle else: self.max_lattice_angle = constraints_parameters[ 'max_lattice_angle'] if 'allow_endpoints' not in constraints_parameters: self.allow_endpoints = self.default_allow_endpoints elif constraints_parameters['allow_endpoints'] in (None, 'default'): self.allow_endpoints = self.default_allow_endpoints else: self.allow_endpoints = constraints_parameters[ 'allow_endpoints'] if 'per_species_mids' not in constraints_parameters: self.set_all_mids_to_defaults(composition_space) elif constraints_parameters['per_species_mids'] in (None, 'default'): self.set_all_mids_to_defaults(composition_space) else: self.per_species_mids = constraints_parameters[ 'per_species_mids'] for key in self.per_species_mids: if self.per_species_mids[key] in (None, 'default'): elements = key.split() radius1 = Element(elements[0]).atomic_radius radius2 = Element(elements[1]).atomic_radius self.per_species_mids[key] = self.default_mid_factor*( radius1 + radius2) self.set_some_mids_to_defaults(composition_space) self.check_num_atoms_range(composition_space) def set_all_to_defaults(self, composition_space): self.min_num_atoms = self.default_min_num_atoms self.max_num_atoms = self.default_max_num_atoms self.min_lattice_length = self.default_min_lattice_length self.max_lattice_length = self.default_max_lattice_length self.min_lattice_angle = self.default_min_lattice_angle self.max_lattice_angle = self.default_max_lattice_angle self.allow_endpoints = self.default_allow_endpoints self.set_all_mids_to_defaults(composition_space) def set_all_mids_to_defaults(self, composition_space): elements = composition_space.get_all_elements() self.per_species_mids = {} for i in range(0, len(elements)): for j in range(i, len(elements)): self.per_species_mids[ str(elements[i].symbol + " " + elements[j].symbol) ] = self.default_mid_factor*(elements[i].atomic_radius + elements[j].atomic_radius) def set_some_mids_to_defaults(self, composition_space): elements = composition_space.get_all_elements() missing_pairs = [] for i in range(0, len(elements)): for j in range(i, len(elements)): test_key1 = elements[i].symbol + " " + elements[j].symbol test_key2 = elements[j].symbol + " " + elements[i].symbol if test_key1 not in self.per_species_mids and test_key2 not in self.per_species_mids: missing_pairs.append(test_key1) for pair in missing_pairs: p = pair.split() self.per_species_mids[str(pair)] = self.default_mid_factor*( Element(p[0]).atomic_radius + Element(p[1]).atomic_radius) def get_max_mid(self): max_mid = 0 for key in self.per_species_mids: if self.per_species_mids[key] > max_mid: max_mid = self.per_species_mids[key] return max_mid def check_num_atoms_range(self, composition_space): if len(composition_space.endpoints) == 1: atoms_per_comp = composition_space.endpoints[0].reduced_composition.num_atoms bottom = int(math.ceil(self.min_num_atoms/atoms_per_comp)) top = int(math.floor(self.max_num_atoms/atoms_per_comp)) if top < bottom: print('The range defined by the minimum and maximum number of ' 'atoms constraints does not contain an integer multiple ' 'of the number of atoms in the specified composition.') print('Please use the "min_num_atoms" and "max_num_atoms" ' 'keywords in the Constraints block to set a valid range ' 'for the allowed number of atoms.') print('Quitting...') quit() class Developer(object): def __init__(self, developer_parameters, geometry): self.default_niggli = True if geometry.shape == 'bulk': self.default_scale_density = True else: self.default_scale_density = False if developer_parameters in (None, 'default'): self.niggli = self.default_niggli self.scale_density = self.default_scale_density else: if 'niggli' not in developer_parameters: self.niggli = self.default_niggli elif developer_parameters['niggli'] in (None, 'default'): self.niggli = self.default_niggli else: self.niggli = developer_parameters['niggli'] if 'scale_density' not in developer_parameters: self.scale_density = self.default_scale_density elif developer_parameters['scale_density'] in (None, 'default'): self.scale_density = self.default_scale_density else: self.scale_density = developer_parameters['scale_density'] def develop(self, organism, composition_space, constraints, geometry, pool): if not self.satisfies_num_atoms_constraints(organism, constraints): return False if not self.is_in_composition_space(organism, composition_space, constraints, pool): return False if self.niggli: if not self.niggli_reduction(organism, geometry, constraints): return False if self.scale_density and len( pool.promotion_set) > 0 and organism.epa is None: if not self.scale_volume(organism, composition_space, pool): return False if not self.satisfies_lattice_constraints(organism, constraints): return False if not self.satisfies_mids_constraints(organism, constraints): return False if not self.satisfies_geometry_constraints(organism, geometry): return False return True def satisfies_num_atoms_constraints(self, organism, constraints): if len(organism.cell.sites) > constraints.max_num_atoms: print("Organism {} failed max number of atoms constraint ".format( organism.id)) return False if len(organism.cell.sites) < constraints.min_num_atoms: print("Organism {} failed min number of atoms constraint ".format( organism.id)) return False return True def is_in_composition_space(self, organism, composition_space, constraints, pool): if composition_space.objective_function == 'epa': return self.is_in_composition_space_epa(organism, composition_space) elif composition_space.objective_function == 'pd': return self.is_in_composition_space_pd(organism, composition_space, constraints, pool) def is_in_composition_space_epa(self, organism, composition_space): reduced_composition = composition_space.endpoints[ 0].reduced_composition org_reduced_composition = organism.composition.reduced_composition if not reduced_composition.almost_equals(org_reduced_composition): print("Organism {} has incorrect composition ".format(organism.id)) return False return True def is_in_composition_space_pd(self, organism, composition_space, constraints, pool): pdentries = [] for endpoint in composition_space.endpoints: pdentries.append(PDEntry(endpoint, -10)) pdentries.append(PDEntry(organism.composition, -10)) composition_checker = CompoundPhaseDiagram( pdentries, composition_space.endpoints) if len(composition_checker.transform_entries( pdentries, composition_space.endpoints)[0]) == len( composition_space.endpoints): print('Organism {} lies outside the composition space '.format( organism.id)) return False if not constraints.allow_endpoints and len(pool.to_list()) > 0: for endpoint in composition_space.endpoints: if endpoint.almost_equals( organism.composition.reduced_composition): print('Organism {} is at a composition space ' 'endpoint '.format(organism.id)) return False return True def niggli_reduction(self, organism, geometry, constraints): if geometry.shape == 'bulk': if not organism.cell.reduce_cell(): print('Niggli cell reduction failed on organism {} during ' 'development '.format(organism.id)) return False elif geometry.shape == 'sheet': if not organism.cell.reduce_sheet_cell(geometry, constraints): print('2D Niggli cell reduction failed on organism {} ' 'during development '.format(organism.id)) return False return True def scale_volume(self, organism, composition_space, pool): if composition_space.objective_function == 'epa': return self.scale_volume_epa(organism, pool) elif composition_space.objective_function == 'pd': return self.scale_volume_pd(organism, composition_space, pool) def scale_volume_epa(self, organism, pool): vpa_sum = 0 for org in pool.promotion_set: vpa_sum += org.cell.volume/len(org.cell.sites) vpa_mean = vpa_sum/len(pool.promotion_set) num_atoms = len(organism.cell.sites) new_vol = vpa_mean*num_atoms with warnings.catch_warnings(): warnings.simplefilter('ignore') organism.cell.scale_lattice(new_vol) if str(organism.cell.lattice.a) == 'nan' or organism.cell.lattice.a > 100: print('Volume scaling failed on organism {} during ' 'development '.format(organism.id)) return False return True def scale_volume_pd(self, organism, composition_space, pool): pdentries = [] for org in pool.promotion_set: pdentries.append(PDEntry(org.composition, org.total_energy)) compound_pd = CompoundPhaseDiagram(pdentries, composition_space.endpoints) transformed_entry = compound_pd.transform_entries( [PDEntry(organism.composition, 10)], composition_space.endpoints) transformed_list = str(transformed_entry[0][0]).split() del transformed_list[0] popped = '' while popped != 'with': popped = transformed_list.pop() symbols = [] amounts = [] for entry in transformed_list: split_entry = entry.split('0+') symbols.append(split_entry[0]) amounts.append(float(split_entry[1])) dummy_species_amounts = {} for i in range(len(symbols)): dummy_species_amounts[DummySpecie(symbol=symbols[i])] = amounts[i] dummy_comp = Composition(dummy_species_amounts) decomp = compound_pd.get_decomposition(dummy_comp) fractions = [] comps = [] for item in decomp: fractions.append(decomp[item]) first_split = str(item).split(',') second_split = first_split[0].split() while second_split[0] != 'composition': del second_split[0] del second_split[0] comp_string = '' for symbol in second_split: comp_string += str(symbol) comps.append(Composition(comp_string)) vpa_mean = 0 for i in range(len(comps)): for org in pool.promotion_set: if (comps[i].reduced_composition).almost_equals( org.composition.reduced_composition): vpa_mean += (org.cell.volume/len( org.cell.sites))*fractions[i] num_atoms = len(organism.cell.sites) new_vol = vpa_mean*num_atoms with warnings.catch_warnings(): warnings.simplefilter('ignore') organism.cell.scale_lattice(new_vol) if str(organism.cell.lattice.a) == 'nan' or organism.cell.lattice.a > 100: print('Volume scaling failed on organism {} during ' 'development '.format(organism.id)) return False return True def satisfies_lattice_constraints(self, organism, constraints): lengths = organism.cell.lattice.abc for length in lengths: if length > constraints.max_lattice_length: print('Organism {} failed max lattice length ' 'constraint '.format(organism.id)) return False elif length < constraints.min_lattice_length: print('Organism {} failed min lattice length ' 'constraint '.format(organism.id)) return False angles = organism.cell.lattice.angles for angle in angles: if angle > constraints.max_lattice_angle: print('Organism {} failed max lattice angle ' 'constraint '.format(organism.id)) return False elif angle < constraints.min_lattice_angle: print('Organism {} failed min lattice angle ' 'constraint '.format(organism.id)) return False return True def satisfies_mids_constraints(self, organism, constraints): organism.cell.merge_sites(mode='delete') species_symbols = organism.cell.symbol_set for site in organism.cell.sites: for species_symbol in species_symbols: test_key1 = species_symbol + " " + site.specie.symbol test_key2 = site.specie.symbol + " " + species_symbol if test_key1 in constraints.per_species_mids: mid = constraints.per_species_mids[test_key1] elif test_key2 in constraints.per_species_mids: mid = constraints.per_species_mids[test_key2] neighbors = organism.cell.get_neighbors(site, mid) for neighbor in neighbors: if neighbor[0].specie.symbol == species_symbol: print('Organism {} failed per-species minimum ' 'interatomic distance constraint '.format( organism.id)) return False return True def satisfies_geometry_constraints(self, organism, geometry): if geometry.get_size(organism.cell) > geometry.max_size: print("Organism {} failed max size constraint ".format( organism.id)) return False if geometry.get_size(organism.cell) < geometry.min_size: print("Organism {} failed min size constraint ".format( organism.id)) return False return True class RedundancyGuard(object): def __init__(self, redundancy_parameters, geometry): self.default_lattice_length_tol = 0.05 self.default_lattice_angle_tol = 2 self.default_site_tol = 0.1 self.default_use_primitive_cell = True self.default_attempt_supercell = True self.default_rmsd_tol = 0.1 self.default_epa_diff = 0.0 if redundancy_parameters in (None, 'default'): self.set_all_to_defaults() else: if 'lattice_length_tol' not in redundancy_parameters: self.lattice_length_tol = self.default_lattice_length_tol elif redundancy_parameters['lattice_length_tol'] in (None, 'default'): self.lattice_length_tol = self.default_lattice_length_tol else: self.lattice_length_tol = redundancy_parameters[ 'lattice_length_tol'] if 'lattice_angle_tol' not in redundancy_parameters: self.lattice_angle_tol = self.default_lattice_angle_tol elif redundancy_parameters['lattice_angle_tol'] in (None, 'default'): self.lattice_angle_tol = self.default_lattice_angle_tol else: self.lattice_angle_tol = redundancy_parameters[ 'lattice_angle_tol'] if 'site_tol' not in redundancy_parameters: self.site_tol = self.default_site_tol elif redundancy_parameters['site_tol'] in (None, 'default'): self.site_tol = self.default_site_tol else: self.site_tol = redundancy_parameters['site_tol'] if 'use_primitive_cell' not in redundancy_parameters: self.use_primitive_cell = self.default_use_primitive_cell elif redundancy_parameters['use_primitive_cell'] in (None, 'default'): self.use_primitive_cell = self.default_use_primitive_cell else: self.use_primitive_cell = redundancy_parameters[ 'use_primitive_cell'] if 'attempt_supercell' not in redundancy_parameters: self.attempt_supercell = self.default_attempt_supercell elif redundancy_parameters['attempt_supercell'] in (None, 'default'): self.attempt_supercell = self.default_attempt_supercell else: self.attempt_supercell = redundancy_parameters[ 'attempt_supercell'] if 'rmsd_tol' not in redundancy_parameters: self.rmsd_tol = self.default_rmsd_tol elif redundancy_parameters['rmsd_tol'] in (None, 'default'): self.rmsd_tol = self.default_rmsd_tol else: self.rmsd_tol = redundancy_parameters['rmsd_tol'] if 'epa_diff' not in redundancy_parameters: self.epa_diff = self.default_epa_diff elif redundancy_parameters['epa_diff'] in (None, 'default'): self.epa_diff = self.default_epa_diff else: self.epa_diff = redundancy_parameters['epa_diff'] self.structure_matcher = StructureMatcher( self.lattice_length_tol, self.site_tol, self.lattice_angle_tol, self.use_primitive_cell, False, self.attempt_supercell, False, ElementComparator()) if geometry.shape == 'cluster' or geometry.shape == 'wire': iso_mol_atom_mapper = IsomorphismMolAtomMapper() self.molecule_matcher = MoleculeMatcher(self.rmsd_tol, iso_mol_atom_mapper) ob.obErrorLog.SetOutputLevel(0)
MIT License
sgherbst/msdsl
msdsl/expr/expr.py
concatenate
python
def concatenate(operands): operands = wrap_constants(operands) format_cls = get_highest_format_cls(operands) assert issubclass(format_cls, UIntFormat) if len(operands) == 0: raise ValueError('Concatenation requires at least one operand.') elif len(operands) == 1: return operands[0] else: return Concatenate(operands)
Concatenate a list of operands given in *operands*. :param operands: Operands that shall be concatenated :return: All operands concatenated
https://github.com/sgherbst/msdsl/blob/e38d5ecdb88b3574bda62f22a4f91ce3e4173d12/msdsl/expr/expr.py#L562-L583
from functools import reduce from typing import List, Tuple from numbers import Number, Integral, Real from math import floor, ceil from copy import deepcopy from msdsl.expr.format import RealFormat, SIntFormat, UIntFormat, Format, IntFormat def wrap_constant(operand): if isinstance(operand, Integral): if operand < 0: return SIntConstant(operand) else: return UIntConstant(operand) elif isinstance(operand, Real): return RealConstant(operand) else: return operand def wrap_constants(operands): return [wrap_constant(operand) for operand in operands] def get_highest_format_cls(operands): if any(isinstance(operand.format_, RealFormat) for operand in operands): return RealFormat elif any(isinstance(operand.format_, SIntFormat) for operand in operands): return SIntFormat elif any(isinstance(operand.format_, UIntFormat) for operand in operands): return UIntFormat else: raise Exception('Cannot determine highest format class.') def promote_operand(operand, promoted_cls): if issubclass(promoted_cls, UIntFormat): return to_uint(operand) elif issubclass(promoted_cls, SIntFormat): return to_sint(operand) elif issubclass(promoted_cls, RealFormat): return to_real(operand) else: raise Exception('Unknown format type: ' + promoted_cls.__name__) def promote_operands(operands, promoted_cls): return [promote_operand(operand=operand, promoted_cls=promoted_cls) for operand in operands] class ModelExpr: def __init__(self, format_): self.format_ = format_ def __add__(self, other): return sum_op([self, other]) def __mul__(self, other): return prod_op([self, other]) def __sub__(self, other): return self.__add__(-other) def __truediv__(self, other): if isinstance(other, Number): return (1.0/other)*self else: raise NotImplementedError def __radd__(self, other): return self.__add__(other) def __rmul__(self, other): return self.__mul__(other) def __rsub__(self, other): return (-self).__add__(other) def __neg__(self): return -1*self def __pos__(self): return self def __invert__(self): return BitwiseInv(self) def __and__(self, other): return BitwiseAnd([self, other]) def __or__(self, other): return BitwiseOr([self, other]) def __xor__(self, other): return BitwiseXor([self, other]) def __rshift__(self, other): return ArithmeticRightShift(self, other) def __lshift__(self, other): return ArithmeticLeftShift(self, other) def __getitem__(self, item): return BitwiseAccess(self, item) def __le__(self, other): return LessThanOrEquals(self, other) def __lt__(self, other): return LessThan(self, other) def __ge__(self, other): return GreaterThanOrEquals(self, other) def __gt__(self, other): return GreaterThan(self, other) def __eq__(self, other): return EqualTo(self, other) def __ne__(self, other): return NotEqualTo(self, other) class ModelOperator(ModelExpr): def __init__(self, operands: List, format_): super().__init__(format_=format_) self.operands = wrap_constants(operands) class UnaryOperator(ModelOperator): def __init__(self, operand, format_): super().__init__(operands=[operand], format_=format_) @property def operand(self): return self.operands[0] class BinaryOperator(ModelOperator): def __init__(self, lhs, rhs, format_): super().__init__(operands=[lhs, rhs], format_=format_) @property def lhs(self): return self.operands[0] @property def rhs(self): return self.operands[1] class ComparisonOperator(BinaryOperator): comp_op = None def __init__(self, lhs, rhs): lhs = wrap_constant(lhs) rhs = wrap_constant(rhs) format_cls = get_highest_format_cls([lhs, rhs]) lhs, rhs = promote_operands([lhs, rhs], format_cls) super().__init__(lhs=lhs, rhs=rhs, format_=UIntFormat(width=1)) def __str__(self): return f'{self.lhs} {self.comp_op} {self.rhs}' class ArithmeticOperator(ModelOperator): initial = None def __init__(self, operands): format_ = reduce(self.function, [operand.format_ for operand in operands]) super().__init__(operands=operands, format_=format_) @classmethod def function(cls, a, b): raise NotImplementedError @classmethod def merge_with_same_operator(cls, operands): new_operands = [] for operand in operands: if isinstance(operand, cls): new_operands.extend(operand.operands) else: new_operands.append(operand) return new_operands @classmethod def merge_constants(cls, operands, format_cls): new_operands = [] const_term = cls.initial for operand in operands: if isinstance(operand, Constant): const_term = cls.function(const_term, operand.value) else: new_operands.append(operand) if const_term != cls.initial: new_operands.append(Constant(value=const_term, format_=format_cls.from_value(const_term))) return new_operands @classmethod def flatten(cls, operands): if len(operands) == 0: return wrap_constant(cls.initial) elif len(operands) == 1: return operands[0] else: return cls(operands) class BitwiseOperator(ModelOperator): def __init__(self, operands): operands = wrap_constants(operands) assert all(isinstance(operand.format_, UIntFormat) for operand in operands), 'Bitwise operations only currently support unsigned operands.' width = max(operand.format_.width for operand in operands) super().__init__(operands=operands, format_=UIntFormat(width=width)) def sum_op(operands): operands = list(operands) if len(operands) == 0: operands = [0] operands = wrap_constants(operands) format_cls = get_highest_format_cls(operands) operands = promote_operands(operands, format_cls) operands = Sum.merge_with_same_operator(operands) operands = Sum.merge_constants(operands, format_cls) return Sum.flatten(operands) class Sum(ArithmeticOperator): initial = 0 @classmethod def function(cls, a, b): return a + b def __str__(self): return '(' + '+'.join(str(operand) for operand in self.operands) + ')' def prod_op(operands): operands = wrap_constants(operands) format_cls = get_highest_format_cls(operands) operands = promote_operands(operands, format_cls) operands = Product.merge_with_same_operator(operands) operands = Product.merge_constants(operands, format_cls) operands = Product.check_for_zero(operands) return Product.flatten(operands) class Product(ArithmeticOperator): initial = 1 @classmethod def function(cls, a, b): return a * b @classmethod def check_for_zero(cls, operands): if any(((isinstance(operand, Constant) and operand.value == 0) or (isinstance(operand, Array) and operand.all_zeros)) for operand in operands): return [wrap_constant(0)] else: return operands def __str__(self): return '(' + '*'.join(str(operand) for operand in self.operands) + ')' def min_op(operands): operands = wrap_constants(operands) format_cls = get_highest_format_cls(operands) operands = promote_operands(operands, format_cls) operands = Min.merge_with_same_operator(operands) operands = Min.merge_constants(operands, format_cls) return Min.flatten(operands) class Min(ArithmeticOperator): initial = +float('inf') @classmethod def function(cls, a, b): if isinstance(a, Format) and isinstance(b, Format): return a.min_with(b) elif isinstance(a, Number) and isinstance(b, Number): return min(a, b) else: raise Exception('Min can only be applied to numbers and Format operands at this time.') def __str__(self): return 'min(' + ', '.join(str(operand) for operand in self.operands) + ')' def max_op(operands): operands = wrap_constants(operands) format_cls = get_highest_format_cls(operands) operands = promote_operands(operands, format_cls) operands = Max.merge_with_same_operator(operands) operands = Max.merge_constants(operands, format_cls) return Max.flatten(operands) class Max(ArithmeticOperator): initial = -float('inf') @classmethod def function(cls, a, b): if isinstance(a, Format) and isinstance(b, Format): return a.max_with(b) elif isinstance(a, Number) and isinstance(b, Number): return max(a, b) else: raise Exception('Max can only be applied to numbers and Format operands at this time.') def __str__(self): return 'max(' + ', '.join(str(operand) for operand in self.operands) + ')' class BitwiseAnd(BitwiseOperator): def __str__(self): return '(' + '&'.join(str(operand) for operand in self.operands) + ')' class BitwiseOr(BitwiseOperator): def __str__(self): return '(' + '|'.join(str(operand) for operand in self.operands) + ')' class BitwiseXor(BitwiseOperator): def __str__(self): return '(' + '^'.join(str(operand) for operand in self.operands) + ')' class BitwiseInv(UnaryOperator): def __init__(self, operand): operand = wrap_constant(operand) assert isinstance(operand.format_, UIntFormat), 'Bitwise inversion only currently support unsigned operands.' super().__init__(operand=operand, format_=operand.format_) def __str__(self): return f'(~{self.operand})' class ArithmeticShift(UnaryOperator): shift_op = None @classmethod def function(cls, operand, shift: Integral): raise NotImplementedError @classmethod def compute_output_width(cls, in_format: IntFormat, shift: Integral): raise NotImplementedError def __init__(self, operand, shift: Integral): operand = wrap_constant(operand) assert isinstance(operand.format_, (UIntFormat, SIntFormat)), f'{self.__class__.__name__} only supports integer operands.' width = self.compute_output_width(in_format=operand.format_, shift=shift) min_val = self.function(operand=operand.format_.min_val, shift=shift) max_val = self.function(operand=operand.format_.max_val, shift=shift) if isinstance(operand.format_, UIntFormat): format_ = UIntFormat(width=width, min_val=min_val, max_val=max_val) elif isinstance(operand.format_, SIntFormat): format_ = SIntFormat(width=width, min_val=min_val, max_val=max_val) else: raise Exception('Unknown format type.') self.shift = shift super().__init__(operand=operand, format_=format_) def __str__(self): return f'({self.operand}{self.shift_op}{self.shift})' class ArithmeticLeftShift(ArithmeticShift): shift_op = '<<<' @classmethod def function(cls, operand, shift: Integral): return (operand << shift) @classmethod def compute_output_width(cls, in_format: IntFormat, shift: Integral): return (in_format.width + shift) class ArithmeticRightShift(ArithmeticShift): shift_op = '>>>' @classmethod def function(cls, operand, shift: Integral): return (operand >> shift) @classmethod def compute_output_width(cls, in_format: IntFormat, shift: Integral): return max(in_format.width - shift, 1) class BitwiseAccess(UnaryOperator): def __init__(self, operand, key): operand = wrap_constant(operand) assert isinstance(operand.format_, (UIntFormat, SIntFormat)), f'{self.__class__.__name__} only supports integer operands.' if isinstance(key, Integral): msb = key lsb = key elif isinstance(key, slice): msb = key.start lsb = key.stop else: raise Exception(f'Unknown indexing type: {key.__class__.__name__}') assert isinstance(msb, Integral), 'MSB must be an integer.' assert 0 <= msb < operand.format_.width, f'MSB value out of range: {msb} (input width is {operand.format_.width})' assert isinstance(lsb, Integral), 'LSB must be an integer.' assert 0 <= lsb < operand.format_.width, f'LSB value out of range: {lsb} (input width is {operand.format_.width})' assert lsb <= msb, 'LSB must be less than or equal to MSB.' width = msb - lsb + 1 if isinstance(operand.format_, UIntFormat): format_ = UIntFormat(width=width) elif isinstance(operand.format_, SIntFormat): format_ = SIntFormat(width=width) else: raise Exception('Unknown format type.') self.msb = msb self.lsb = lsb super().__init__(operand=operand, format_=format_) def __str__(self): return f'({self.operand}[{self.msb}:{self.lsb}])' class LessThan(ComparisonOperator): comp_op = '<' class LessThanOrEquals(ComparisonOperator): comp_op = '<=' class GreaterThan(ComparisonOperator): comp_op = '>' class GreaterThanOrEquals(ComparisonOperator): comp_op = '>=' class EqualTo(ComparisonOperator): comp_op = '==' class NotEqualTo(ComparisonOperator): comp_op = '!='
MIT License
pabigot/pyxb
pyxb/namespace/resolution.py
_NamespaceResolution_mixin._replaceComponent_csc
python
def _replaceComponent_csc (self, existing_def, replacement_def): try: index = self.__unresolvedComponents.index(existing_def) if (replacement_def is None) or (replacement_def in self.__unresolvedComponents): del self.__unresolvedComponents[index] else: assert isinstance(replacement_def, _Resolvable_mixin) self.__unresolvedComponents[index] = replacement_def if existing_def in self.__unresolvedDependents: del self.__unresolvedDependents[existing_def] except ValueError: pass return getattr(super(_NamespaceResolution_mixin, self), '_replaceComponent_csc', lambda *args, **kw: replacement_def)(existing_def, replacement_def)
Replace a component definition if present in the list of unresolved components.
https://github.com/pabigot/pyxb/blob/14737c23a125fd12c954823ad64fc4497816fae3/pyxb/namespace/resolution.py#L176-L193
import logging import pyxb import pyxb.utils.utility from pyxb.namespace import archive, utility from pyxb.utils import six _log = logging.getLogger(__name__) class _Resolvable_mixin (pyxb.cscRoot): _TraceResolution = False def isResolved (self): raise NotImplementedError("_Resolvable_mixin.isResolved in %s"% (type(self).__name__,)) def _resolve (self): raise NotImplementedError("_Resolvable_mixin._resolve in %s"% (type(self).__name__,)) def _queueForResolution (self, why=None, depends_on=None): if (why is not None) and self._TraceResolution: _log.info('Resolution delayed for %s: %s\n\tDepends on: %s', self, why, depends_on) self._namespaceContext().queueForResolution(self, depends_on) class _NamespaceResolution_mixin (pyxb.cscRoot): __importedNamespaces = None __referencedNamespaces = None __unresolvedComponents = None __unresolvedDependents = None def _reset (self): getattr(super(_NamespaceResolution_mixin, self), '_reset', lambda *args, **kw: None)() self.__unresolvedComponents = [] self.__unresolvedDependents = {} self.__importedNamespaces = set() self.__referencedNamespaces = set() def _getState_csc (self, kw): kw.update({ 'importedNamespaces': self.__importedNamespaces, 'referencedNamespaces': self.__referencedNamespaces, }) return getattr(super(_NamespaceResolution_mixin, self), '_getState_csc', lambda _kw: _kw)(kw) def _setState_csc (self, kw): self.__importedNamespaces = kw['importedNamespaces'] self.__referencedNamespaces = kw['referencedNamespaces'] return getattr(super(_NamespaceResolution_mixin, self), '_setState_csc', lambda _kw: self)(kw) def importNamespace (self, namespace): self.__importedNamespaces.add(namespace) return self def _referenceNamespace (self, namespace): self._activate() self.__referencedNamespaces.add(namespace) return self def importedNamespaces (self): return frozenset(self.__importedNamespaces) def _transferReferencedNamespaces (self, module_record): assert isinstance(module_record, archive.ModuleRecord) module_record._setReferencedNamespaces(self.__referencedNamespaces) self.__referencedNamespaces.clear() def referencedNamespaces (self): return frozenset(self.__referencedNamespaces) def queueForResolution (self, resolvable, depends_on=None): assert isinstance(resolvable, _Resolvable_mixin) if not resolvable.isResolved(): assert depends_on is None or isinstance(depends_on, _Resolvable_mixin) self.__unresolvedComponents.append(resolvable) if depends_on is not None and not depends_on.isResolved(): from pyxb.xmlschema import structures assert isinstance(depends_on, _Resolvable_mixin) assert isinstance(depends_on, structures._NamedComponent_mixin) self.__unresolvedDependents.setdefault(resolvable, set()).add(depends_on) return resolvable def needsResolution (self): return self.__unresolvedComponents is not None
Apache License 2.0
awslabs/aws-data-wrangler
awswrangler/athena/_read.py
read_sql_table
python
def read_sql_table( table: str, database: str, ctas_approach: bool = True, categories: Optional[List[str]] = None, chunksize: Optional[Union[int, bool]] = None, s3_output: Optional[str] = None, workgroup: Optional[str] = None, encryption: Optional[str] = None, kms_key: Optional[str] = None, keep_files: bool = True, ctas_database_name: Optional[str] = None, ctas_temp_table_name: Optional[str] = None, ctas_bucketing_info: Optional[Tuple[List[str], int]] = None, use_threads: Union[bool, int] = True, boto3_session: Optional[boto3.Session] = None, max_cache_seconds: int = 0, max_cache_query_inspections: int = 50, max_remote_cache_entries: int = 50, max_local_cache_entries: int = 100, data_source: Optional[str] = None, s3_additional_kwargs: Optional[Dict[str, Any]] = None, pyarrow_additional_kwargs: Optional[Dict[str, Any]] = None, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: table = catalog.sanitize_table_name(table=table) return read_sql_query( sql=f'SELECT * FROM "{table}"', database=database, data_source=data_source, ctas_approach=ctas_approach, categories=categories, chunksize=chunksize, s3_output=s3_output, workgroup=workgroup, encryption=encryption, kms_key=kms_key, keep_files=keep_files, ctas_database_name=ctas_database_name, ctas_temp_table_name=ctas_temp_table_name, ctas_bucketing_info=ctas_bucketing_info, use_threads=use_threads, boto3_session=boto3_session, max_cache_seconds=max_cache_seconds, max_cache_query_inspections=max_cache_query_inspections, max_remote_cache_entries=max_remote_cache_entries, max_local_cache_entries=max_local_cache_entries, s3_additional_kwargs=s3_additional_kwargs, pyarrow_additional_kwargs=pyarrow_additional_kwargs, )
Extract the full table AWS Athena and return the results as a Pandas DataFrame. **Related tutorial:** - `Amazon Athena <https://aws-data-wrangler.readthedocs.io/en/2.12.1/ tutorials/006%20-%20Amazon%20Athena.html>`_ - `Athena Cache <https://aws-data-wrangler.readthedocs.io/en/2.12.1/ tutorials/019%20-%20Athena%20Cache.html>`_ - `Global Configurations <https://aws-data-wrangler.readthedocs.io/en/2.12.1/ tutorials/021%20-%20Global%20Configurations.html>`_ **There are two approaches to be defined through ctas_approach parameter:** **1** - ctas_approach=True (Default): Wrap the query with a CTAS and then reads the table data as parquet directly from s3. PROS: - Faster for mid and big result sizes. - Can handle some level of nested types. CONS: - Requires create/delete table permissions on Glue. - Does not support timestamp with time zone - Does not support columns with repeated names. - Does not support columns with undefined data types. - A temporary table will be created and then deleted immediately. **2** - ctas_approach=False: Does a regular query on Athena and parse the regular CSV result on s3. PROS: - Faster for small result sizes (less latency). - Does not require create/delete table permissions on Glue - Supports timestamp with time zone. CONS: - Slower for big results (But stills faster than other libraries that uses the regular Athena's API) - Does not handle nested types at all. Note ---- The resulting DataFrame (or every DataFrame in the returned Iterator for chunked queries) have a `query_metadata` attribute, which brings the query result metadata returned by `Boto3/Athena <https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services /athena.html#Athena.Client.get_query_execution>`_ . For a practical example check out the `related tutorial <https://aws-data-wrangler.readthedocs.io/en/2.12.1/ tutorials/024%20-%20Athena%20Query%20Metadata.html>`_! Note ---- Valid encryption modes: [None, 'SSE_S3', 'SSE_KMS']. `P.S. 'CSE_KMS' is not supported.` Note ---- Create the default Athena bucket if it doesn't exist and s3_output is None. (E.g. s3://aws-athena-query-results-ACCOUNT-REGION/) Note ---- `chunksize` argument (Memory Friendly) (i.e batching): Return an Iterable of DataFrames instead of a regular DataFrame. There are two batching strategies: - If **chunksize=True**, a new DataFrame will be returned for each file in the query result. - If **chunksize=INTEGER**, Wrangler will iterate on the data by number of rows igual the received INTEGER. `P.S.` `chunksize=True` is faster and uses less memory while `chunksize=INTEGER` is more precise in number of rows for each Dataframe. `P.P.S.` If `ctas_approach=False` and `chunksize=True`, you will always receive an interador with a single DataFrame because regular Athena queries only produces a single output file. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- table : str Table name. database : str AWS Glue/Athena database name. ctas_approach: bool Wraps the query using a CTAS, and read the resulted parquet data on S3. If false, read the regular CSV on S3. categories: List[str], optional List of columns names that should be returned as pandas.Categorical. Recommended for memory restricted environments. chunksize : Union[int, bool], optional If passed will split the data in a Iterable of DataFrames (Memory friendly). If `True` wrangler will iterate on the data by files in the most efficient way without guarantee of chunksize. If an `INTEGER` is passed Wrangler will iterate on the data by number of rows igual the received INTEGER. s3_output : str, optional AWS S3 path. workgroup : str, optional Athena workgroup. encryption : str, optional Valid values: [None, 'SSE_S3', 'SSE_KMS']. Notice: 'CSE_KMS' is not supported. kms_key : str, optional For SSE-KMS, this is the KMS key ARN or ID. keep_files : bool Should Wrangler delete or keep the staging files produced by Athena? ctas_database_name : str, optional The name of the alternative database where the CTAS temporary table is stored. If None, the default `database` is used. ctas_temp_table_name : str, optional The name of the temporary table and also the directory name on S3 where the CTAS result is stored. If None, it will use the follow random pattern: `f"temp_table_{uuid.uuid4().hex}"`. On S3 this directory will be under under the pattern: `f"{s3_output}/{ctas_temp_table_name}/"`. ctas_bucketing_info: Tuple[List[str], int], optional Tuple consisting of the column names used for bucketing as the first element and the number of buckets as the second element. Only `str`, `int` and `bool` are supported as column data types for bucketing. use_threads : bool, int True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. If integer is provided, specified number is used. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. max_cache_seconds: int Wrangler can look up in Athena's history if this table has been read before. If so, and its completion time is less than `max_cache_seconds` before now, wrangler skips query execution and just returns the same results as last time. If cached results are valid, wrangler ignores the `ctas_approach`, `s3_output`, `encryption`, `kms_key`, `keep_files` and `ctas_temp_table_name` params. If reading cached data fails for any reason, execution falls back to the usual query run path. max_cache_query_inspections : int Max number of queries that will be inspected from the history to try to find some result to reuse. The bigger the number of inspection, the bigger will be the latency for not cached queries. Only takes effect if max_cache_seconds > 0. max_remote_cache_entries : int Max number of queries that will be retrieved from AWS for cache inspection. The bigger the number of inspection, the bigger will be the latency for not cached queries. Only takes effect if max_cache_seconds > 0 and default value is 50. max_local_cache_entries : int Max number of queries for which metadata will be cached locally. This will reduce the latency and also enables keeping more than `max_remote_cache_entries` available for the cache. This value should not be smaller than max_remote_cache_entries. Only takes effect if max_cache_seconds > 0 and default value is 100. data_source : str, optional Data Source / Catalog name. If None, 'AwsDataCatalog' will be used by default. s3_additional_kwargs : Optional[Dict[str, Any]] Forwarded to botocore requests. e.g. s3_additional_kwargs={'RequestPayer': 'requester'} pyarrow_additional_kwargs : Optional[Dict[str, Any]] Forward to the ParquetFile class or converting an Arrow table to Pandas, currently only an "coerce_int96_timestamp_unit" or "timestamp_as_object" argument will be considered. If reading parquet fileswhere you cannot convert a timestamp to pandas Timestamp[ns] consider setting timestamp_as_object=True, to allow for timestamp units > NS. If reading parquet data that still uses INT96 (like Athena outputs) you can use coerce_int96_timestamp_unit to specify what timestamp unit to encode INT96 to (by default this is "ns", if you know the output parquet came from a system that encodes timestamp to a particular unit then set this to that same unit e.g. coerce_int96_timestamp_unit="ms"). Returns ------- Union[pd.DataFrame, Iterator[pd.DataFrame]] Pandas DataFrame or Generator of Pandas DataFrames if chunksize is passed. Examples -------- >>> import awswrangler as wr >>> df = wr.athena.read_sql_table(table="...", database="...") >>> scanned_bytes = df.query_metadata["Statistics"]["DataScannedInBytes"]
https://github.com/awslabs/aws-data-wrangler/blob/f82b7e12d4126ec63f739f6f172139ed2e7d73ac/awswrangler/athena/_read.py#L886-L1116
import csv import datetime import logging import re import sys import uuid from typing import Any, Dict, Iterator, List, Match, NamedTuple, Optional, Tuple, Union import boto3 import botocore.exceptions import pandas as pd from awswrangler import _utils, catalog, exceptions, s3 from awswrangler._config import apply_configs from awswrangler._data_types import cast_pandas_with_athena_types from awswrangler.athena._utils import ( _apply_query_metadata, _empty_dataframe_response, _get_query_metadata, _get_s3_output, _get_workgroup_config, _LocalMetadataCacheManager, _QueryMetadata, _start_query_execution, _WorkGroupConfig, ) _logger: logging.Logger = logging.getLogger(__name__) class _CacheInfo(NamedTuple): has_valid_cache: bool file_format: Optional[str] = None query_execution_id: Optional[str] = None query_execution_payload: Optional[Dict[str, Any]] = None def _extract_ctas_manifest_paths(path: str, boto3_session: Optional[boto3.Session] = None) -> List[str]: bucket_name, key_path = _utils.parse_path(path) client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session) body: bytes = client_s3.get_object(Bucket=bucket_name, Key=key_path)["Body"].read() return [x for x in body.decode("utf-8").split("\n") if x != ""] def _fix_csv_types_generator( dfs: Iterator[pd.DataFrame], parse_dates: List[str], binaries: List[str] ) -> Iterator[pd.DataFrame]: for df in dfs: yield _fix_csv_types(df=df, parse_dates=parse_dates, binaries=binaries) def _add_query_metadata_generator( dfs: Iterator[pd.DataFrame], query_metadata: _QueryMetadata ) -> Iterator[pd.DataFrame]: for df in dfs: df = _apply_query_metadata(df=df, query_metadata=query_metadata) yield df def _fix_csv_types(df: pd.DataFrame, parse_dates: List[str], binaries: List[str]) -> pd.DataFrame: if len(df.index) > 0: for col in parse_dates: df[col] = df[col].dt.date.replace(to_replace={pd.NaT: None}) for col in binaries: df[col] = df[col].str.encode(encoding="utf-8") return df def _delete_after_iterate( dfs: Iterator[pd.DataFrame], paths: List[str], use_threads: Union[bool, int], boto3_session: boto3.Session, s3_additional_kwargs: Optional[Dict[str, str]], ) -> Iterator[pd.DataFrame]: for df in dfs: yield df s3.delete_objects( path=paths, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs ) def _prepare_query_string_for_comparison(query_string: str) -> str: query_string = "".join(query_string.split()).strip("()").lower() query_string = query_string[:-1] if query_string.endswith(";") else query_string return query_string def _compare_query_string(sql: str, other: str) -> bool: comparison_query = _prepare_query_string_for_comparison(query_string=other) _logger.debug("sql: %s", sql) _logger.debug("comparison_query: %s", comparison_query) if sql == comparison_query: return True return False def _get_last_query_infos( max_remote_cache_entries: int, boto3_session: Optional[boto3.Session] = None, workgroup: Optional[str] = None, ) -> List[Dict[str, Any]]: client_athena: boto3.client = _utils.client(service_name="athena", session=boto3_session) page_size = 50 args: Dict[str, Union[str, Dict[str, int]]] = { "PaginationConfig": {"MaxItems": max_remote_cache_entries, "PageSize": page_size} } if workgroup is not None: args["WorkGroup"] = workgroup paginator = client_athena.get_paginator("list_query_executions") uncached_ids = [] for page in paginator.paginate(**args): _logger.debug("paginating Athena's queries history...") query_execution_id_list: List[str] = page["QueryExecutionIds"] for query_execution_id in query_execution_id_list: if query_execution_id not in _cache_manager: uncached_ids.append(query_execution_id) if uncached_ids: new_execution_data = [] for i in range(0, len(uncached_ids), page_size): new_execution_data.extend( client_athena.batch_get_query_execution(QueryExecutionIds=uncached_ids[i : i + page_size]).get( "QueryExecutions" ) ) _cache_manager.update_cache(new_execution_data) return _cache_manager.sorted_successful_generator() def _parse_select_query_from_possible_ctas(possible_ctas: str) -> Optional[str]: possible_ctas = possible_ctas.lower() parquet_format_regex: str = r"format\s*=\s*\'parquet\'\s*," is_parquet_format: Optional[Match[str]] = re.search(pattern=parquet_format_regex, string=possible_ctas) if is_parquet_format is not None: unstripped_select_statement_regex: str = r"\s+as\s+\(*(select|with).*" unstripped_select_statement_match: Optional[Match[str]] = re.search( unstripped_select_statement_regex, possible_ctas, re.DOTALL ) if unstripped_select_statement_match is not None: stripped_select_statement_match: Optional[Match[str]] = re.search( r"(select|with).*", unstripped_select_statement_match.group(0), re.DOTALL ) if stripped_select_statement_match is not None: return stripped_select_statement_match.group(0) return None def _check_for_cached_results( sql: str, boto3_session: boto3.Session, workgroup: Optional[str], max_cache_seconds: int, max_cache_query_inspections: int, max_remote_cache_entries: int, ) -> _CacheInfo: if max_cache_seconds <= 0: return _CacheInfo(has_valid_cache=False) num_executions_inspected: int = 0 comparable_sql: str = _prepare_query_string_for_comparison(sql) current_timestamp: datetime.datetime = datetime.datetime.now(datetime.timezone.utc) _logger.debug("current_timestamp: %s", current_timestamp) for query_info in _get_last_query_infos( max_remote_cache_entries=max_remote_cache_entries, boto3_session=boto3_session, workgroup=workgroup, ): query_execution_id: str = query_info["QueryExecutionId"] query_timestamp: datetime.datetime = query_info["Status"]["CompletionDateTime"] _logger.debug("query_timestamp: %s", query_timestamp) if (current_timestamp - query_timestamp).total_seconds() > max_cache_seconds: return _CacheInfo( has_valid_cache=False, query_execution_id=query_execution_id, query_execution_payload=query_info ) statement_type: Optional[str] = query_info.get("StatementType") if statement_type == "DDL" and query_info["Query"].startswith("CREATE TABLE"): parsed_query: Optional[str] = _parse_select_query_from_possible_ctas(possible_ctas=query_info["Query"]) if parsed_query is not None: if _compare_query_string(sql=comparable_sql, other=parsed_query): return _CacheInfo( has_valid_cache=True, file_format="parquet", query_execution_id=query_execution_id, query_execution_payload=query_info, ) elif statement_type == "DML" and not query_info["Query"].startswith("INSERT"): if _compare_query_string(sql=comparable_sql, other=query_info["Query"]): return _CacheInfo( has_valid_cache=True, file_format="csv", query_execution_id=query_execution_id, query_execution_payload=query_info, ) num_executions_inspected += 1 _logger.debug("num_executions_inspected: %s", num_executions_inspected) if num_executions_inspected >= max_cache_query_inspections: return _CacheInfo(has_valid_cache=False) return _CacheInfo(has_valid_cache=False) def _fetch_parquet_result( query_metadata: _QueryMetadata, keep_files: bool, categories: Optional[List[str]], chunksize: Optional[int], use_threads: Union[bool, int], boto3_session: boto3.Session, s3_additional_kwargs: Optional[Dict[str, Any]], temp_table_fqn: Optional[str] = None, pyarrow_additional_kwargs: Optional[Dict[str, Any]] = None, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: ret: Union[pd.DataFrame, Iterator[pd.DataFrame]] chunked: Union[bool, int] = False if chunksize is None else chunksize _logger.debug("chunked: %s", chunked) if query_metadata.manifest_location is None: return _empty_dataframe_response(bool(chunked), query_metadata) manifest_path: str = query_metadata.manifest_location metadata_path: str = manifest_path.replace("-manifest.csv", ".metadata") _logger.debug("manifest_path: %s", manifest_path) _logger.debug("metadata_path: %s", metadata_path) paths: List[str] = _extract_ctas_manifest_paths(path=manifest_path, boto3_session=boto3_session) if not paths: if not temp_table_fqn: raise exceptions.EmptyDataFrame("Query would return untyped, empty dataframe.") database, temp_table_name = map(lambda x: x.replace('"', ""), temp_table_fqn.split(".")) dtype_dict = catalog.get_table_types(database=database, table=temp_table_name, boto3_session=boto3_session) df = pd.DataFrame(columns=list(dtype_dict.keys())) df = cast_pandas_with_athena_types(df=df, dtype=dtype_dict) df = _apply_query_metadata(df=df, query_metadata=query_metadata) return df ret = s3.read_parquet( path=paths, use_threads=use_threads, boto3_session=boto3_session, chunked=chunked, categories=categories, ignore_index=True, pyarrow_additional_kwargs=pyarrow_additional_kwargs, ) if chunked is False: ret = _apply_query_metadata(df=ret, query_metadata=query_metadata) else: ret = _add_query_metadata_generator(dfs=ret, query_metadata=query_metadata) paths_delete: List[str] = paths + [manifest_path, metadata_path] _logger.debug("type(ret): %s", type(ret)) if chunked is False: if keep_files is False: s3.delete_objects( path=paths_delete, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) return ret if keep_files is False: return _delete_after_iterate( dfs=ret, paths=paths_delete, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) return ret def _fetch_csv_result( query_metadata: _QueryMetadata, keep_files: bool, chunksize: Optional[int], use_threads: Union[bool, int], boto3_session: boto3.Session, s3_additional_kwargs: Optional[Dict[str, Any]], ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: _chunksize: Optional[int] = chunksize if isinstance(chunksize, int) else None _logger.debug("_chunksize: %s", _chunksize) if query_metadata.output_location is None or query_metadata.output_location.endswith(".csv") is False: chunked = _chunksize is not None return _empty_dataframe_response(chunked, query_metadata) path: str = query_metadata.output_location _logger.debug("Start CSV reading from %s", path) ret = s3.read_csv( path=[path], dtype=query_metadata.dtype, parse_dates=query_metadata.parse_timestamps, converters=query_metadata.converters, quoting=csv.QUOTE_ALL, keep_default_na=False, na_values=["", "NaN"], chunksize=_chunksize, skip_blank_lines=False, use_threads=False, boto3_session=boto3_session, ) _logger.debug("Start type casting...") _logger.debug(type(ret)) if _chunksize is None: df = _fix_csv_types(df=ret, parse_dates=query_metadata.parse_dates, binaries=query_metadata.binaries) df = _apply_query_metadata(df=df, query_metadata=query_metadata) if keep_files is False: s3.delete_objects( path=[path, f"{path}.metadata"], use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) return df dfs = _fix_csv_types_generator(dfs=ret, parse_dates=query_metadata.parse_dates, binaries=query_metadata.binaries) dfs = _add_query_metadata_generator(dfs=dfs, query_metadata=query_metadata) if keep_files is False: return _delete_after_iterate( dfs=dfs, paths=[path, f"{path}.metadata"], use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) return dfs def _resolve_query_with_cache( cache_info: _CacheInfo, categories: Optional[List[str]], chunksize: Optional[Union[int, bool]], use_threads: Union[bool, int], session: Optional[boto3.Session], s3_additional_kwargs: Optional[Dict[str, Any]], pyarrow_additional_kwargs: Optional[Dict[str, Any]] = None, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: _logger.debug("cache_info:\n%s", cache_info) if cache_info.query_execution_id is None: raise RuntimeError("Trying to resolve with cache but w/o any query execution ID.") query_metadata: _QueryMetadata = _get_query_metadata( query_execution_id=cache_info.query_execution_id, boto3_session=session, categories=categories, query_execution_payload=cache_info.query_execution_payload, metadata_cache_manager=_cache_manager, ) if cache_info.file_format == "parquet": return _fetch_parquet_result( query_metadata=query_metadata, keep_files=True, categories=categories, chunksize=chunksize, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, pyarrow_additional_kwargs=pyarrow_additional_kwargs, ) if cache_info.file_format == "csv": return _fetch_csv_result( query_metadata=query_metadata, keep_files=True, chunksize=chunksize, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, ) raise exceptions.InvalidArgumentValue(f"Invalid data type: {cache_info.file_format}.") def _resolve_query_without_cache_ctas( sql: str, database: Optional[str], data_source: Optional[str], s3_output: Optional[str], keep_files: bool, chunksize: Union[int, bool, None], categories: Optional[List[str]], encryption: Optional[str], workgroup: Optional[str], kms_key: Optional[str], wg_config: _WorkGroupConfig, alt_database: Optional[str], name: Optional[str], ctas_bucketing_info: Optional[Tuple[List[str], int]], use_threads: Union[bool, int], s3_additional_kwargs: Optional[Dict[str, Any]], boto3_session: boto3.Session, pyarrow_additional_kwargs: Optional[Dict[str, Any]] = None, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: path: str = f"{s3_output}/{name}" ext_location: str = "\n" if wg_config.enforced is True else f",\n external_location = '{path}'\n" fully_qualified_name: str = f'"{alt_database}"."{name}"' if alt_database else f'"{database}"."{name}"' bucketing_str = ( (f",\n" f" bucketed_by = ARRAY{ctas_bucketing_info[0]},\n" f" bucket_count = {ctas_bucketing_info[1]}") if ctas_bucketing_info else "" ) sql = ( f"CREATE TABLE {fully_qualified_name}\n" f"WITH(\n" f" format = 'Parquet',\n" f" parquet_compression = 'SNAPPY'" f"{bucketing_str}" f"{ext_location}" f") AS\n" f"{sql}" ) _logger.debug("sql: %s", sql) try: query_id: str = _start_query_execution( sql=sql, wg_config=wg_config, database=database, data_source=data_source, s3_output=s3_output, workgroup=workgroup, encryption=encryption, kms_key=kms_key, boto3_session=boto3_session, ) except botocore.exceptions.ClientError as ex: error: Dict[str, Any] = ex.response["Error"] if error["Code"] == "InvalidRequestException" and "Exception parsing query" in error["Message"]: raise exceptions.InvalidCtasApproachQuery( "Is not possible to wrap this query into a CTAS statement. Please use ctas_approach=False." ) if error["Code"] == "InvalidRequestException" and "extraneous input" in error["Message"]: raise exceptions.InvalidCtasApproachQuery( "Is not possible to wrap this query into a CTAS statement. Please use ctas_approach=False." ) raise ex _logger.debug("query_id: %s", query_id) try: query_metadata: _QueryMetadata = _get_query_metadata( query_execution_id=query_id, boto3_session=boto3_session, categories=categories, metadata_cache_manager=_cache_manager, ) except exceptions.QueryFailed as ex: msg: str = str(ex) if "Column name" in msg and "specified more than once" in msg: raise exceptions.InvalidCtasApproachQuery( f"Please, define distinct names for your columns OR pass ctas_approach=False. Root error message: {msg}" ) if "Column name not specified" in msg: raise exceptions.InvalidArgumentValue( "Please, define all columns names in your query. (E.g. 'SELECT MAX(col1) AS max_col1, ...')" ) if "Column type is unknown" in msg: raise exceptions.InvalidArgumentValue( "Please, don't leave undefined columns types in your query. You can cast to ensure it. " "(E.g. 'SELECT CAST(NULL AS INTEGER) AS MY_COL, ...')" ) raise ex return _fetch_parquet_result( query_metadata=query_metadata, keep_files=keep_files, categories=categories, chunksize=chunksize, use_threads=use_threads, s3_additional_kwargs=s3_additional_kwargs, boto3_session=boto3_session, temp_table_fqn=fully_qualified_name, pyarrow_additional_kwargs=pyarrow_additional_kwargs, ) def _resolve_query_without_cache_regular( sql: str, database: Optional[str], data_source: Optional[str], s3_output: Optional[str], keep_files: bool, chunksize: Union[int, bool, None], categories: Optional[List[str]], encryption: Optional[str], workgroup: Optional[str], kms_key: Optional[str], wg_config: _WorkGroupConfig, use_threads: Union[bool, int], s3_additional_kwargs: Optional[Dict[str, Any]], boto3_session: boto3.Session, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: _logger.debug("sql: %s", sql) query_id: str = _start_query_execution( sql=sql, wg_config=wg_config, database=database, data_source=data_source, s3_output=s3_output, workgroup=workgroup, encryption=encryption, kms_key=kms_key, boto3_session=boto3_session, ) _logger.debug("query_id: %s", query_id) query_metadata: _QueryMetadata = _get_query_metadata( query_execution_id=query_id, boto3_session=boto3_session, categories=categories, metadata_cache_manager=_cache_manager, ) return _fetch_csv_result( query_metadata=query_metadata, keep_files=keep_files, chunksize=chunksize, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) def _resolve_query_without_cache( sql: str, database: str, data_source: Optional[str], ctas_approach: bool, categories: Optional[List[str]], chunksize: Union[int, bool, None], s3_output: Optional[str], workgroup: Optional[str], encryption: Optional[str], kms_key: Optional[str], keep_files: bool, ctas_database_name: Optional[str], ctas_temp_table_name: Optional[str], ctas_bucketing_info: Optional[Tuple[List[str], int]], use_threads: Union[bool, int], s3_additional_kwargs: Optional[Dict[str, Any]], boto3_session: boto3.Session, pyarrow_additional_kwargs: Optional[Dict[str, Any]] = None, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: wg_config: _WorkGroupConfig = _get_workgroup_config(session=boto3_session, workgroup=workgroup) _s3_output: str = _get_s3_output(s3_output=s3_output, wg_config=wg_config, boto3_session=boto3_session) _s3_output = _s3_output[:-1] if _s3_output[-1] == "/" else _s3_output if ctas_approach is True: if ctas_temp_table_name is not None: name: str = catalog.sanitize_table_name(ctas_temp_table_name) else: name = f"temp_table_{uuid.uuid4().hex}" try: return _resolve_query_without_cache_ctas( sql=sql, database=database, data_source=data_source, s3_output=_s3_output, keep_files=keep_files, chunksize=chunksize, categories=categories, encryption=encryption, workgroup=workgroup, kms_key=kms_key, wg_config=wg_config, alt_database=ctas_database_name, name=name, ctas_bucketing_info=ctas_bucketing_info, use_threads=use_threads, s3_additional_kwargs=s3_additional_kwargs, boto3_session=boto3_session, pyarrow_additional_kwargs=pyarrow_additional_kwargs, ) finally: catalog.delete_table_if_exists( database=ctas_database_name or database, table=name, boto3_session=boto3_session ) return _resolve_query_without_cache_regular( sql=sql, database=database, data_source=data_source, s3_output=_s3_output, keep_files=keep_files, chunksize=chunksize, categories=categories, encryption=encryption, workgroup=workgroup, kms_key=kms_key, wg_config=wg_config, use_threads=use_threads, s3_additional_kwargs=s3_additional_kwargs, boto3_session=boto3_session, ) @apply_configs def read_sql_query( sql: str, database: str, ctas_approach: bool = True, categories: Optional[List[str]] = None, chunksize: Optional[Union[int, bool]] = None, s3_output: Optional[str] = None, workgroup: Optional[str] = None, encryption: Optional[str] = None, kms_key: Optional[str] = None, keep_files: bool = True, ctas_database_name: Optional[str] = None, ctas_temp_table_name: Optional[str] = None, ctas_bucketing_info: Optional[Tuple[List[str], int]] = None, use_threads: Union[bool, int] = True, boto3_session: Optional[boto3.Session] = None, max_cache_seconds: int = 0, max_cache_query_inspections: int = 50, max_remote_cache_entries: int = 50, max_local_cache_entries: int = 100, data_source: Optional[str] = None, params: Optional[Dict[str, Any]] = None, s3_additional_kwargs: Optional[Dict[str, Any]] = None, pyarrow_additional_kwargs: Optional[Dict[str, Any]] = None, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: if ctas_approach and data_source not in (None, "AwsDataCatalog"): raise exceptions.InvalidArgumentCombination( "Queries with ctas_approach=True (default) does not support " "data_source values different than None and 'AwsDataCatalog'. " "Please check the related tutorial for more details " "(https://github.com/awslabs/aws-data-wrangler/blob/main/" "tutorials/006%20-%20Amazon%20Athena.ipynb)" ) chunksize = sys.maxsize if ctas_approach is False and chunksize is True else chunksize session: boto3.Session = _utils.ensure_session(session=boto3_session) if params is None: params = {} for key, value in params.items(): sql = sql.replace(f":{key};", str(value)) max_remote_cache_entries = min(max_remote_cache_entries, max_local_cache_entries) _cache_manager.max_cache_size = max_local_cache_entries cache_info: _CacheInfo = _check_for_cached_results( sql=sql, boto3_session=session, workgroup=workgroup, max_cache_seconds=max_cache_seconds, max_cache_query_inspections=max_cache_query_inspections, max_remote_cache_entries=max_remote_cache_entries, ) _logger.debug("cache_info:\n%s", cache_info) if cache_info.has_valid_cache is True: _logger.debug("Valid cache found. Retrieving...") try: return _resolve_query_with_cache( cache_info=cache_info, categories=categories, chunksize=chunksize, use_threads=use_threads, session=session, s3_additional_kwargs=s3_additional_kwargs, pyarrow_additional_kwargs=pyarrow_additional_kwargs, ) except Exception as e: _logger.error(e) _logger.debug("Corrupted cache. Continuing to execute query...") return _resolve_query_without_cache( sql=sql, database=database, data_source=data_source, ctas_approach=ctas_approach, categories=categories, chunksize=chunksize, s3_output=s3_output, workgroup=workgroup, encryption=encryption, kms_key=kms_key, keep_files=keep_files, ctas_database_name=ctas_database_name, ctas_temp_table_name=ctas_temp_table_name, ctas_bucketing_info=ctas_bucketing_info, use_threads=use_threads, s3_additional_kwargs=s3_additional_kwargs, boto3_session=session, pyarrow_additional_kwargs=pyarrow_additional_kwargs, ) @apply_configs
Apache License 2.0
atmtools/typhon
typhon/plots/plots.py
plot_bitfield
python
def plot_bitfield(ax, X, Y, bitfield, flag_dict, cmap, cax=None, pcolor_args={}, colorbar_args={}, unflagged="unflagged", joiner=", "): unique_values = np.unique( bitfield.data if isinstance(bitfield, np.ma.MaskedArray) else bitfield) if not 0 in unique_values: unique_values = np.concatenate([[0], unique_values]) labels = {v: joiner.join(flag_dict[x] for x in flag_dict.keys() if v&x) or unflagged for v in unique_values} trans = dict(enumerate(unique_values)) new = bitfield.copy() for (to, fr) in trans.items(): if isinstance(new, np.ma.MaskedArray): new.data[new.data==fr] = to else: new[new==fr] = to formatter = FuncFormatter( lambda val, loc: labels[trans[val]]) img = ax.pcolor(X, Y, new, cmap=get_cmap(cmap, unique_values.size), **pcolor_args) cb = ax.figure.colorbar(img, cax=cax, ticks=list(trans.keys()), format=formatter, **colorbar_args) img.set_clim(min(trans.keys())-0.5, max(trans.keys())+0.5) return (img, cb)
Plot a bitfield of categories with pcolor The numeric values in a bitfield are not directly meaningful. Rather, the relevant information is whether a particular bit is set. This function plots a 2-D bitfield using pcolor, then displays each unique combination of flags (or the absence of any flags) as a distinct category, and shows the corresponding labels in the colourbar. This assumes that, even when there are many possible flags, only a small subset of combinations of flags actually occurs within the data. Should this exceed a dozen or so, then the colorbar/legend will become particularly crowded. Note that a colorbar may not be the optimal legend to show alongside categorical data but as this function already exists it is more convenient to exploit than others. Currently this function only works with pcolor, not with pcolormesh, scatter, or other plotting functions for 3-D data. See https://gist.github.com/jakevdp/8a992f606899ac24b711 for an illustration of what the result may look like, although that is for the case of a scatter rather than pcolor plot. Parameters: ax (Axes): Axes (or subclass thereof, such as GeoAxes) to plot in. X (ndarray): X-values for bitfield. Interpretation as for pcolor. Y (ndarray): Y-values for bitfield. Interpretation as for pcolor. bitfield (ndarray): Bitfield to be plotted. flag_dict (Mapping[int, str]): Mapping of flag values to their meanings. Keys should be powers of 2. For example, {1: "DO_NOT_USE", 2: "BAD_GEOLOCATION", 4: "BAD_TIME"}. cmap (str): Colourmap to use. This needs to be passed here because it needs to be converted to be discrete corresponding to the number of unique values. I recommend to choose a qualitative colourmap such as Set1, Set2, or Set3. cax (Axes): Optional. If given, put colorbar here. pcolor_args (Mapping): Extra arguments to be passed to pcolor. colorbar_args (Mapping): Extra arguments to be passed to colorbar. unflagged (str): Label to use for unflagged values. Defaults to "unflagged". joiner (str): How to join different flags. Returns: (AxesImage, Colorbar) that were generated
https://github.com/atmtools/typhon/blob/815dcb1d7cb2718ffe81cd08386739438e7782cc/typhon/plots/plots.py#L898-L1018
import collections from datetime import datetime import itertools import math import warnings import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from matplotlib.ticker import FuncFormatter from matplotlib.cm import get_cmap import scipy.stats as stats from typhon.plots import formatter from typhon.math import stats as tpstats __all__ = [ 'binned_statistic', 'plot_distribution_as_percentiles', 'heatmap', 'histogram', 'scatter_density_plot_matrix', 'diff_histogram', 'profile_p', 'profile_p_log', 'profile_z', 'channels', 'colored_bars', 'plot_bitfield', ] def binned_statistic( x, y, bins=20, ax=None, ptype=None, pargs=None, **kwargs ): if ax is None: ax = plt.gca() if pargs is None: pargs = {} if ptype is None or ptype == "scatter": default = { "statistic": "median", "bins": bins, **kwargs, } statistics, bin_edges, bin_ind = stats.binned_statistic( x, values=y, **default ) bin_width = (bin_edges[1] - bin_edges[0]) bin_centers = bin_edges[1:] - bin_width / 2 plot = ax.plot(bin_centers, statistics, **pargs) elif ptype == "boxplot": bin_lefts = np.linspace(x.min(), x.max(), bins) bins_indices = np.digitize(x, bin_lefts) plot = ax.boxplot( [y[bins_indices == i] for i in range(bins)], **pargs ) bin_width = (bin_lefts[1] - bin_lefts[0]) bin_centers = bin_lefts[1:] + bin_width / 2 ax.set_xticklabels([f"{center:.1f}" for center in bin_centers]) else: raise ValueError(f"Unknown plot type {ptype}!") return plot def plot_distribution_as_percentiles(ax, x, y, nbins=10, bins=None, ptiles=(5, 25, 50, 75, 95), linestyles=(":", "--", "-", "--", ":"), ptile_to_legend=True, label=None, **kwargs): if bins is None: bins = np.linspace(x.min(), x.max(), nbins) scores = tpstats.get_distribution_as_percentiles(x, y, bins, ptiles) d_ls = collections.defaultdict(list) locallab = None for (ls, pt) in zip(linestyles, ptiles): d_ls[ls].append(pt) for i in range(len(ptiles)): if label is not None: if math.isclose(ptiles[i], 50): locallab = label + " (median)" else: if ptile_to_legend and linestyles[i] in d_ls: locallab = label + " (p-{:s})".format( "/".join("{:d}".format(x) for x in d_ls.pop(linestyles[i]))) else: locallab = None else: label = None ax.plot(bins, scores[:, i], linestyle=linestyles[i], label=locallab, **kwargs) def heatmap(x, y, bins=20, bisectrix=True, ax=None, **kwargs): if ax is None: ax = plt.gca() kwargs_defaults = { 'cmap': plt.get_cmap('Greys', 8), 'rasterized': True, } kwargs_defaults.update(kwargs) N, xedges, yedges, img = ax.hist2d(x, y, bins, **kwargs_defaults) if bisectrix: ax.plot((x.min(), x.max()), (x.min(), x.max()), color='red', linestyle='--', linewidth=2) return img def histogram(data, kind=None, ax=None, **kwargs): if ax is None: ax = plt.gca() if kind == "points" or kind == "line": hist_keys = {"bins", "range", "normed", "weights", "density"} hist_kwargs = {key: value for key, value in kwargs.items() if key in hist_keys} y, bin_edges = np.histogram(data, **hist_kwargs) bin_centers = 0.5 * (bin_edges[1:] + bin_edges[:-1]) plot_kwargs = {key: value for key, value in kwargs.items() if key not in hist_keys} if kind == "points": ax.scatter(bin_centers, y, **plot_kwargs) elif kind == "line": ax.plot(bin_centers, y, '-', **plot_kwargs) elif kind is None or kind == "standard": ax.hist(data, **kwargs) else: raise ValueError(f"Unknown kind of histogram: {kind}!") def scatter_density_plot_matrix( M=None, hist_kw={}, hexbin_kw={"mincnt": 1, "cmap": "viridis"}, plot_dist_kw={"color": "tan", "ptiles": [5, 25, 50, 75, 95], "linestyles": [":", "--", "-", "--", ":"], "linewidth": 1.5}, ranges={}, units=None, **kwargs): if M is None: M = np.empty( dtype=[(k, v.dtype) for (k, v) in kwargs.items()], shape=kwargs.copy().popitem()[1].shape) for (k, v) in kwargs.items(): M[k] = v elif not M.dtype.fields: MM = np.empty( dtype=",".join([M.dtype.descr[0][1]]*M.shape[1]), shape=M.shape[0]) for i in range(M.shape[1]): MM["f{:d}".format(i)] = M[:, i] M = MM if len(M.dtype.fields) > 20: raise ValueError( "You've given me {:d} fields to plot. " "That would result in {:d} subplots. I refuse to take " "more than 20 fields.".format(len(M.dtype.fields), len(M.dtype.fields)**2)) if units is None: units = {} N = len(M.dtype.names) (f, ax_all) = plt.subplots(N, N, figsize=(4+3*N, 4+3*N)) for ((x_i, x_f), (y_i, y_f)) in itertools.product( enumerate(M.dtype.names), repeat=2): a = ax_all[y_i, x_i] x = M[x_f] y = M[y_f] if x_i == y_i: rng = ranges.get(x_f, (x.min(), x.max())) a.hist(x, range=rng, **hist_kw) else: rng = (ranges.get(x_f, (x.min(), x.max())), ranges.get(y_f, (y.min(), y.max()))) inrange = ((x >= rng[0][0]) & (x <= rng[0][1]) & (y >= rng[1][0]) & (y <= rng[1][1])) if not inrange.any(): warnings.warn( "Combination {:s}/{:s} has no valid values".format( x_f, y_f), RuntimeWarning) continue x = x[inrange] y = y[inrange] a.hexbin(x, y, extent=[rng[0][0], rng[0][1], rng[1][0], rng[1][1]], **hexbin_kw) plot_distribution_as_percentiles( a, x, y, **plot_dist_kw) a.set_xlim(rng[0]) a.set_ylim(rng[1]) if x_i == 0: a.set_ylabel( "{:s} [{:s}]".format(y_f, units[y_f]) if y_f in units else "{:s} [{:~}]".format(y_f, y.u) if hasattr(y, "u") else y_f) if y_i == N-1: a.set_xlabel( "{:s} [{:s}]".format(x_f, units[x_f]) if x_f in units else "{:s} [{:~}]".format(x_f, x.u) if hasattr(x, "u") else x_f) return f def profile_p(p, x, ax=None, **kwargs): if ax is None: ax = plt.gca() formatter.set_yaxis_formatter(formatter.HectoPascalFormatter(), ax=ax) if ax.get_subplotspec().is_first_col(): ax.set_ylabel('Pressure [hPa]') ret = ax.plot(x, p, **kwargs) if hasattr(ax.yaxis, 'set_inverted'): ax.yaxis.set_inverted(True) elif not ax.yaxis_inverted(): ax.invert_yaxis() return ret def profile_p_log(p, x, ax=None, **kwargs): if ax is None: ax = plt.gca() ax.set_yscale('log') ret = profile_p(p, x, ax=ax, **kwargs) formatter.set_yaxis_formatter(formatter.HectoPascalLogFormatter(), ax=ax) return ret def profile_z(z, x, ax=None, **kwargs): if ax is None: ax = plt.gca() zmin = np.min((np.min(z), *ax.get_ylim())) zmax = np.max((np.max(z), *ax.get_ylim())) ax.set_ylim(zmin, zmax) if ax.is_first_col(): ax.set_ylabel('Height [km]') ret = ax.plot(x, z, **kwargs) formatter.set_yaxis_formatter(formatter.ScalingFormatter('kilo', '{x:g}'), ax=ax) return ret def diff_histogram(array1, array2, ax=None, plot_args=None, **hist_args): if ax is None: ax = plt.gca() bins_are_timestamps = False if array1.dtype.type == np.datetime64 or array1.dtype.type == datetime: array1 = array1.astype("M8[ns]").astype("float") bins_are_timestamps = True if array2.dtype.type == np.datetime64 or array2.dtype.type == datetime: array2 = array2.astype("M8[ns]").astype("float") bins_are_timestamps = True if "range" in hist_args: range_array = np.array(hist_args["range"]) if range_array.dtype.type == np.datetime64 or isinstance(range_array.item(0), datetime): range_array = range_array.astype("M8[ns]").astype("float") hist_args["range"] = range_array.tolist() elif "bins" not in hist_args and "range" not in hist_args: start = min(array1.min(), array2.min()) end = max(array1.max(), array2.max()) hist_args["range"] = [start, end] y1, bins1 = np.histogram(array1, **hist_args) y2, bins2 = np.histogram(array2, **hist_args) if not np.allclose(bins1, bins2): raise ValueError("Arrays could not be grouped into the same bins!") if bins_are_timestamps: bins1 = bins1.astype("M8[ns]") diff = y1 - y2 if plot_args is None: plot_args = {} bar_plot = ax.step(bins1[:-1], diff, **plot_args) return bar_plot, bins1, diff def channels(met_mm_backend, ylim=None, ax=None, **kwargs): if ax is None: ax = plt.gca() if ylim is None: ylim = ax.get_ylim() ymin, ymax = ylim def plot_band(center, width, ymin, ymax): xy = (center - 0.5 * width, ymin) height = ymax - ymin return ax.add_patch(Rectangle(xy, width, height, **kwargs)) band_centers = [] band_widths = [] for center, off1, off2, width in met_mm_backend: if off1 == 0: band_centers += [center] band_widths += [width] else: band_centers += [center - off1, center + off1] band_widths += [width, width] if off2 != 0: for bc in band_centers[-2:]: band_centers += [bc - off2, bc + off2] band_widths += [width, width] patches = [] for center, width in zip(band_centers, band_widths): patches.append(plot_band(center, width, ymin, ymax)) return patches def colored_bars(x, y, c=None, cmap=None, vmin=None, vmax=None, ax=None, **kwargs): if ax is None: ax = plt.gca() if c is None: c = y cmap = plt.get_cmap(cmap) absmax = np.max(np.abs(c)) vmin = -absmax if vmin is None else vmin vmax = absmax if vmax is None else vmax norm = plt.Normalize(vmin=vmin, vmax=vmax) sm = plt.cm.ScalarMappable(norm=norm, cmap=cmap) sm.set_array(c.flat) ret = ax.bar(x, y, color=cmap(norm(c)), **kwargs) return sm, ret
MIT License
bitsofbinary/yarabuilder
yarabuilder/yararule.py
YaraStrings.__init__
python
def __init__(self): self.raw_strings = [] self.strings = collections.OrderedDict() self.number_of_strings = 0 self.number_of_anonymous_strings = 0 self.valid_str_types = ["text", "hex", "regex"] self.logger = logging.getLogger(__name__)
Constructor for YaraStrings
https://github.com/bitsofbinary/yarabuilder/blob/76107825ce869a534705b9ab834e108b4b051802/yarabuilder/yararule.py#L475-L484
import collections import logging class YaraCommentEnabledClass: def __init__(self): self.yara_comment = YaraComment() def add_comment(self, comment, position="inline"): if position == "above": self.yara_comment.above.append(comment) elif position == "inline": self.yara_comment.inline = comment elif position == "below": self.yara_comment.below.append(comment) def build_comments(self, raw, whitespace=" "): if self.yara_comment.above: for above_comment in reversed(self.yara_comment.above): raw = "// %s\n%s%s%s" % (above_comment, whitespace, whitespace, raw,) if self.yara_comment.inline: raw = "%s // %s" % (raw, self.yara_comment.inline) if self.yara_comment.below: for below_comment in self.yara_comment.below: raw = "%s\n%s%s// %s" % (raw, whitespace, whitespace, below_comment,) return raw class YaraComment: def __init__(self): self.above = [] self.inline = None self.below = [] def get_yara_comment(self): yara_comment = {} if self.above: yara_comment["above"] = self.above if self.inline: yara_comment["inline"] = self.inline if self.below: yara_comment["below"] = self.below return yara_comment def set_yara_comment(self, yara_comment): if "above" in yara_comment: self.above = yara_comment["above"] if "inline" in yara_comment: self.inline = yara_comment["inline"] if "below" in yara_comment: self.below = yara_comment["below"] class YaraMetaEntry(YaraCommentEnabledClass): def __init__(self, name, value, position, meta_type="text"): self.name = name self.value = value self.position = position self.meta_type = meta_type self.yara_comment = YaraComment() self.raw_meta_entry = None def build_meta_entry(self, whitespace=" "): if self.meta_type == "text": self.raw_meta_entry = '%s = "%s"' % (self.name, self.value) elif self.meta_type == "int": self.raw_meta_entry = "%s = %d" % (self.name, self.value) elif self.meta_type == "bool": if self.value: self.raw_meta_entry = "%s = true" % self.name else: self.raw_meta_entry = "%s = false" % self.name self.raw_meta_entry = self.build_comments( self.raw_meta_entry, whitespace=whitespace ) def get_yara_meta_entry(self): meta_entry = { "name": self.name, "value": self.value, "position": self.position, "meta_type": self.meta_type, } if ( self.yara_comment.above or self.yara_comment.inline or self.yara_comment.below ): meta_entry["comment"] = self.yara_comment.get_yara_comment() return meta_entry def set_yara_meta_entry(self, yara_meta_entry): if not all( k in yara_meta_entry for k in ("name", "value", "position", "meta_type") ): raise KeyError("Meta entry does not have the correct keys") self.name = yara_meta_entry["name"] self.value = yara_meta_entry["value"] self.position = yara_meta_entry["position"] self.meta_type = yara_meta_entry["meta_type"] if "comment" in yara_meta_entry: self.yara_comment.set_yara_comment(yara_meta_entry["comment"]) class YaraMeta: def __init__(self, logger=None): self.meta = collections.OrderedDict() self.raw_meta = [] self.number_of_meta_entries = 0 self.valid_meta_types = ["text", "int", "bool"] self.logger = logger or logging.getLogger(__name__) def add_meta(self, name, value, meta_type="text"): if meta_type not in self.valid_meta_types: self.logger.warning( 'Invalid meta_type provided ("%s"), defaulting to "text"', meta_type ) meta_type = "text" if name not in self.meta: self.meta[name] = [] self.meta[name].append( YaraMetaEntry(name, value, self.number_of_meta_entries, meta_type=meta_type) ) self.number_of_meta_entries += 1 return len(self.meta[name]) - 1 def build_meta(self, whitespace=" "): self.raw_meta = [None] * self.number_of_meta_entries for meta_entries in self.meta.values(): for meta_entry in meta_entries: meta_entry.build_meta_entry(whitespace=whitespace) self.raw_meta[meta_entry.position] = meta_entry.raw_meta_entry def get_yara_meta(self): yara_meta = collections.OrderedDict() for name, meta_entry_list in self.meta.items(): yara_meta_entries = [] for meta_entry in meta_entry_list: yara_meta_entries.append(meta_entry.get_yara_meta_entry()) yara_meta[name] = yara_meta_entries return yara_meta def set_yara_meta(self, yara_meta): for meta_entry_name, yara_meta_value in yara_meta.items(): if meta_entry_name not in self.meta: self.meta[meta_entry_name] = [] for yara_meta_entry in yara_meta_value: temp_yara_meta_entry = YaraMetaEntry(None, None, None) temp_yara_meta_entry.set_yara_meta_entry(yara_meta_entry) self.meta[meta_entry_name].append(temp_yara_meta_entry) class YaraString(YaraCommentEnabledClass): def __init__( self, name, value, str_type="text", is_anonymous=False, regex_flags=None, newline_after=False, ): self.name = name self.value = value self.str_type = str_type self.modifiers = [] self.is_anonymous = is_anonymous self.regex_flags = regex_flags self.newline_after = newline_after self.raw_string = None self.yara_comment = YaraComment() def build_string(self, whitespace=" "): self.raw_string = "$" if not self.is_anonymous: self.raw_string += self.name self.raw_string += " = " if self.str_type == "text": self.raw_string += '"%s"' % self.value elif self.str_type == "hex": self.raw_string += "{%s}" % self.value elif self.str_type == "regex": if self.regex_flags: self.raw_string += "/%s/%s" % (self.value, self.regex_flags) else: self.raw_string += "/%s/" % self.value if self.modifiers: for modifier in self.modifiers: self.raw_string += " %s" % modifier self.raw_string = self.build_comments(self.raw_string, whitespace=whitespace) if self.newline_after: self.raw_string += "\n" def get_yara_string(self): yara_string = { "name": self.name, "value": self.value, "str_type": self.str_type, "is_anonymous": self.is_anonymous, } if self.modifiers: yara_string["modifiers"] = self.modifiers if self.regex_flags: yara_string["regex_flags"] = self.regex_flags if ( self.yara_comment.above or self.yara_comment.inline or self.yara_comment.below ): yara_string["comment"] = self.yara_comment.get_yara_comment() return yara_string def set_yara_string(self, yara_string): if not all( k in yara_string for k in ("name", "value", "str_type", "is_anonymous") ): raise KeyError("String does not have the correct keys") self.name = yara_string["name"] self.value = yara_string["value"] self.str_type = yara_string["str_type"] self.is_anonymous = yara_string["is_anonymous"] if "modifiers" in yara_string: self.modifiers = yara_string["modifiers"] if "comment" in yara_string: self.yara_comment.set_yara_comment(yara_string["comment"]) if "regex_flags" in yara_string: self.regex_flags = yara_string["regex_flags"] class YaraStrings:
MIT License
nccgroup/blesuite
blesuite/connection_manager.py
BLEConnectionManager.get_gatt_server
python
def get_gatt_server(self): return self.gatt_server
Retrieve the GATT server for the BLEConnectionManager instance. :return: GATT Server :rtype: blesuite.pybt.gatt.Server
https://github.com/nccgroup/blesuite/blob/0e2e534f7d9beaa8e9d76223959a60b064f61e22/blesuite/connection_manager.py#L897-L904
from blesuite.pybt.roles import LECentral, LEPeripheral from blesuite.pybt.core import Connection from blesuite.pybt.gatt import UUID, AttributeDatabase, Server from blesuite.pybt.gap import GAP from blesuite.gatt_procedures import gatt_procedure_write_handle, gatt_procedure_write_handle_async, gatt_procedure_read_handle, gatt_procedure_read_handle_async, gatt_procedure_read_uuid, gatt_procedure_read_uuid_async, gatt_procedure_discover_primary_services, gatt_procedure_discover_secondary_services, gatt_procedure_discover_characteristics, gatt_procedure_discover_includes, gatt_procedure_discover_descriptors, gatt_procedure_prepare_write_handle, gatt_procedure_prepare_write_handle_async, gatt_procedure_execute_write, gatt_procedure_execute_write_async, gatt_procedure_write_command_handle, gatt_procedure_read_multiple_handles, gatt_procedure_read_multiple_handles_async, gatt_procedure_read_blob_handle, gatt_procedure_read_blob_handle_async from blesuite.smart_scan import blesuite_smart_scan from blesuite.entities.gatt_device import BLEDevice from blesuite.event_handler import BTEventHandler import logging import gevent import os logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) ROLE_CENTRAL = 0x00 ROLE_PERIPHERAL = 0x01 PUBLIC_DEVICE_ADDRESS = 0x00 RANDOM_DEVICE_ADDRESS = 0x01 class BLEConnection(object): def __init__(self, address, address_type, connection_handle=None): self.address = address self.address_type = address_type self.connection_handle = connection_handle self.interval_min = None self.interval_max = None self.mtu = 23 def __repr__(self): return '<{} address={}, type={}>'.format( self.__class__.__name__, self.address, {0: "random", 1: "public"}.get(self.address_type, "Unknown") ) class BLEConnectionManager(object): def __init__(self, adapter, role, our_address_type="public", random_address=None, psm=0, mtu=23, gatt_server=None, event_handler=None, att_operation_event_hook=None, att_security_event_hook=None): self.role_name = role self.adapter = adapter self.requester = None self.responses = [] self.response_counter = 0 self.psm = psm self.mtu = mtu self.gatt_server = gatt_server self.event_handler = event_handler self.att_operation_event_hook = att_operation_event_hook self.att_security_event_hook = att_security_event_hook self.address = None self.our_address_type_name = our_address_type if self.our_address_type_name.lower() == "random": self.our_address_type = RANDOM_DEVICE_ADDRESS else: self.our_address_type = PUBLIC_DEVICE_ADDRESS if self.our_address_type == RANDOM_DEVICE_ADDRESS and random_address is None: self.random_address = ':'.join(map(lambda x: x.encode('hex'), os.urandom(6))) elif self.our_address_type == RANDOM_DEVICE_ADDRESS: self.random_address = random_address else: self.random_address = None self.central = None self.stack_connection = None self.connections = [] if role is 'central': logger.debug("creating central") self._create_central() logger.debug("creating PyBT connection") self._create_stack_connection(ROLE_CENTRAL) logger.debug("creating listeners") self._start_listeners() elif role is 'peripheral': logger.debug("creating peripheral role") self._create_peripheral() logger.debug("creating PyBT connection") self._create_stack_connection(ROLE_PERIPHERAL) logger.debug("creating listeners") self._start_listeners() else: logger.error("Unknown role: %s" % role) raise RuntimeError("Unknown role: %s" % role) self.address = self.role.stack.addr def __enter__(self): return self def __del__(self): if self.stack_connection is not None: for connection in self.connections: if self.stack_connection.is_connected(connection.connection_handle): self.stack_connection.disconnect(connection.connection_handle, 0x16) self.stack_connection.destroy() self.stack_connection = None def __exit__(self, exc_type, exc_val, exc_tb): logger.debug("Exiting bleConnectionManager. exc_type:%s exc_val:%s exc_tb:%s" % (exc_type, exc_val, exc_tb)) if self.stack_connection is not None: self.stack_connection.destroy() self.stack_connection = None if self.role is not None: self.role.destroy() self.role = None def _create_central(self): if self.adapter is None: self.role = LECentral(address_type=self.our_address_type, random=self.random_address, att_operation_event_hook=self.att_operation_event_hook) else: self.role = LECentral(adapter=self.adapter, address_type=self.our_address_type, random=self.random_address, att_operation_event_hook=self.att_operation_event_hook) def _create_peripheral(self): if self.gatt_server is None: self.attribute_db = AttributeDatabase(event_handler=self.att_security_event_hook) self.gatt_server = Server(self.attribute_db) self.gatt_server.set_mtu(self.mtu) if self.adapter is None: self.role = LEPeripheral(self.gatt_server, mtu=self.mtu, address_type=self.our_address_type, random=self.random_address, att_operation_event_hook=self.att_operation_event_hook) else: self.role = LEPeripheral(self.gatt_server, adapter=self.adapter, mtu=self.mtu, address_type=self.our_address_type, random=self.random_address, att_operation_event_hook=self.att_operation_event_hook) def _create_stack_connection(self, role_type): if self.event_handler is None: self.event_handler = BTEventHandler(self) self.stack_connection = Connection(self.role, role_type, self.event_handler) def _start_listeners(self): self.stack_connection.start() def get_address(self): return self.address def get_discovered_devices(self): return self.stack_connection.seen def set_event_handler(self, event_class): logger.debug("Trying to set event handler") self.event_handler = event_class if self.stack_connection.socket_handler is not None: logger.debug("Stack connection found, setting event handler") self.stack_connection.set_event_handler(event_class) return True return False def set_att_operation_hook(self, event_class): logger.debug("Trying to set ATT operation hook") self.att_operation_event_hook = event_class self.role.att.event_handler = self.att_operation_event_hook return True def set_att_security_hook(self, event_class): logger.debug("Trying to set ATT security hook") self.att_security_event_hook = event_class if self.gatt_server is None: logger.debug("No GATT server running, setting security hook failed.") return False self.gatt_server.db.att_security_hooks = self.att_security_event_hook return True def is_connected(self, connection): return self.stack_connection.is_connected(connection.connection_handle) def init_connection(self, address, address_type): address = address.upper() if address_type == "public": address_type = PUBLIC_DEVICE_ADDRESS elif address_type == "private": address_type = RANDOM_DEVICE_ADDRESS ble_connection = BLEConnection(address, address_type) self.connections.append(ble_connection) return ble_connection def get_bleconnection_from_connection_handle(self, connection_handle): for connection in self.connections: if connection.connection_handle is not None and connection.connection_handle == connection_handle: return connection return None def connect(self, ble_connection, timeout=15): import time start = time.time() if not self.stack_connection.is_connected(ble_connection.connection_handle): request = self.stack_connection.connect(ble_connection.connection_handle, ble_connection.address, kind=ble_connection.address_type) while not request.has_response(): if timeout is not None and time.time() - start >= timeout: logger.debug("Connection failed: Connection timeout reached.") return False logger.debug("Is not connected") gevent.sleep(1) ble_connection.connection_handle = request.response.conn_handle logger.debug("Connected") return True def disconnect(self, connection, reason=0x16): self.stack_connection.disconnect(connection.connection_handle, reason) def pair(self, ble_connection, timeout=15): import time self.initiate_pairing(ble_connection) start = time.time() while not self.role.smp.get_connection_encryption_status(ble_connection.connection_handle): if self.role.smp.did_pairing_fail(ble_connection.address): logger.debug("Pairing Failed") return False if timeout is not None and time.time() - start >= timeout: return False logger.debug("Pairing in progress. Pairing Failed: %s " % self.role.smp.did_pairing_fail(ble_connection.address)) gevent.sleep(1) logger.debug("Paired") return True def initiate_pairing(self, ble_connection): if not self.is_connected(ble_connection): self.connect(ble_connection) self.role.smp.send_pairing_request(ble_connection.address, ble_connection.connection_handle) def is_pairing_in_progress(self, ble_connection): return self.role.smp.is_pairing_in_progress(ble_connection.address) def did_pairing_fail(self, ble_connection): return self.role.smp.did_pairing_fail(ble_connection.address) def is_connection_encrypted(self, ble_connection): return self.role.smp.get_connection_encryption_status(ble_connection.connection_handle) def resume_connection_encryption(self, ble_connection): result = self.role.smp.initiate_encryption_with_existing_keys(ble_connection.address, ble_connection.address_type, ble_connection.connection_handle, self.address, self.our_address_type, self.role) return result def get_security_manager_long_term_key_database(self): return self.role.smp.long_term_key_db def add_key_to_security_manager_long_term_key_database(self, address, address_type, ltk, ediv, rand, irk, csrk, security_mode, security_level): self.role.smp.long_term_key_db.add_long_term_key_entry(address, address_type, ltk, ediv, rand, irk, csrk, security_mode, security_level) def export_security_manager_long_term_key_database_for_storage(self): ltk_db = self.role.smp.long_term_key_db.get_long_term_key_database() for entry in ltk_db: temp = entry['address'] if temp is not None: temp = temp.encode('hex') entry['address'] = temp temp = entry['ltk'] if temp is not None: temp = temp.encode('hex') entry['ltk'] = temp temp = entry['rand'] if temp is not None: temp = temp.encode('hex') entry['rand'] = temp temp = entry['irk'] if temp is not None: temp = temp.encode('hex') entry['irk'] = temp temp = entry['csrk'] if temp is not None: temp = temp.encode('hex') entry['csrk'] = temp return ltk_db def import_long_term_key_database_to_security_manager(self, long_term_key_database): import blesuite.utils.validators as validator for entry in long_term_key_database: keys = entry.keys() if 'address' in keys: peer_address = entry['address'].decode('hex') else: peer_address = "00" * 6 if 'address_type' in keys: peer_address_type = entry['address_type'] else: peer_address_type = 0 if 'ltk' in keys: ltk = validator.validate_ltk(entry['ltk']).decode('hex') else: raise validator.InvalidSMLTK(None) if 'ediv' in keys: ediv = entry['ediv'] else: ediv = 0 if 'rand' in keys: rand = validator.validate_rand(entry['rand']).decode('hex') else: rand = '\x00' * 8 if 'irk' in keys: irk = validator.validate_irk(entry['irk']).decode('hex') else: irk = '\x00' * 16 if 'csrk' in keys: csrk = validator.validate_csrk(entry['csrk']).decode('hex') else: csrk = '\x00' * 16 if 'security_mode' in keys: mode = entry['security_mode'] else: mode = 1 if 'security_level' in keys: level = entry['security_level'] else: level = 1 mode, level = validator.validate_att_security_mode(mode, level) self.role.smp.long_term_key_db.add_long_term_key_entry(peer_address, peer_address_type, ltk, ediv, rand, irk, csrk, mode, level) def get_security_manager_protocol_default_pairing_parameters(self): return self.role.smp.get_default_pairing_parameters() def set_security_manager_protocol_default_pairing_parameters(self, default_io_cap=0x03, default_oob=0x00, default_mitm=0x00, default_bond=0x01, default_lesc=0x00, default_keypress=0x00, default_ct2=0x01, default_rfu=0x00, default_max_key_size=16, default_initiator_key_distribution=0x01, default_responder_key_distribution=0x01): self.role.smp.set_default_pairing_parameters(default_io_cap, default_oob, default_mitm, default_bond, default_lesc, default_keypress, default_ct2, default_rfu, default_max_key_size, default_initiator_key_distribution, default_responder_key_distribution) def get_security_manager_protocol_pairing_parameters_for_connection(self, ble_connection): return self.role.smp.get_pairing_parameters_for_connection(ble_connection.address) def set_security_manager_protocol_pairing_parameters_for_connection(self, ble_connection, io_cap=0x03, oob=0x00, mitm=0x00, bond=0x01, lesc=0x00, keypress=0x0, ct2=0x01, rfu=0x00, max_key_size=16, initiator_key_distribution=0x01, responder_key_distribution=0x01): return self.role.smp.set_pairing_parameters_for_connection(ble_connection.address, io_cap, oob, mitm, bond, lesc, keypress, ct2, rfu, max_key_size, initiator_key_distribution, responder_key_distribution) def decode_gap_data(self, data): gap = GAP() try: gap.decode(data) except Exception as e: if "Data too short" in str(e): logger.debug("Data too short, leaving off malformed data") else: raise e return gap def generate_gap_data_dict(self, gap): return gap.gap_dict() def scan(self, timeout): import time self.start_scan() start = time.time() * 1000 logger.debug("Starting sleep loop") while ((time.time() * 1000) - start) < timeout: logger.debug("Scanning...") gevent.sleep(1) self.stop_scan() logger.debug("Done scanning!") discovered_devices = self.get_discovered_devices() return discovered_devices def start_scan(self): self.stack_connection.scan("on") def stop_scan(self): self.stack_connection.scan("off") def advertise_and_wait_for_connection(self): self.start_advertising() while self.is_advertising(): gevent.sleep(1) if len(self.stack_connection.connection_statuses.keys()) > 0: connection_handle = self.stack_connection.connection_statuses.keys()[0] peer_address = self.stack_connection.peer_addresses_by_connection_handle[connection_handle] peer_address_type = self.stack_connection.connected_addr_type_by_connection_handle[connection_handle] return True, BLEConnection(peer_address, peer_address_type, connection_handle=connection_handle) else: logger.error("Advertising stopped and no connections are present. Something went wrong.") return False, None def start_advertising(self): self.stack_connection.start_advertising() def stop_advertising(self): self.stack_connection.stop_advertising() def is_advertising(self): return self.stack_connection.is_advertising() def set_advertising_data(self, data): self.stack_connection.set_advertising_data(data) def set_scan_response_data(self, data): self.stack_connection.set_scan_response_data(data) def set_advertising_parameters(self, advertisement_type, channel_map, interval_min, interval_max, destination_addr, destination_addr_type): self.stack_connection.set_advertising_parameters(advertisement_type, channel_map, interval_min, interval_max, destination_addr, destination_addr_type) def set_local_name(self, name, enforce_null_termination=True): if enforce_null_termination: if len(name) != 248: padding = 248 - len(name) name = name + ('\0' * padding) self.stack_connection.set_local_name(name)
MIT License
onshape-public/onshape-clients
python/onshape_client/oas/models/bt_parameter_spec_reference_part_studio1256.py
BTParameterSpecReferencePartStudio1256.openapi_types
python
def openapi_types(): return { "allowed_insertable_types": ([str],), "bt_type": (str,), "computed_configuration_inputs": ( [ bt_computed_configuration_input_spec2525.BTComputedConfigurationInputSpec2525 ], ), "max_number_of_picks": (int,), "additional_localized_strings": (int,), "column_name": (str,), "default_value": (btm_parameter1.BTMParameter1,), "icon_uri": (str,), "localizable_name": (str,), "localized_name": (str,), "parameter_id": (str,), "parameter_name": (str,), "strings_to_localize": ([str],), "ui_hint": (str,), "ui_hints": ([str],), "visibility_condition": ( bt_parameter_visibility_condition177.BTParameterVisibilityCondition177, ), }
This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type.
https://github.com/onshape-public/onshape-clients/blob/20843a00c628e516e7219e17a23ec4ef2bf9f16f/python/onshape_client/oas/models/bt_parameter_spec_reference_part_studio1256.py#L138-L170
from __future__ import absolute_import import re import sys import six import nulltype from onshape_client.oas.model_utils import ( ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) try: from onshape_client.oas.models import bt_computed_configuration_input_spec2525 except ImportError: bt_computed_configuration_input_spec2525 = sys.modules[ "onshape_client.oas.models.bt_computed_configuration_input_spec2525" ] try: from onshape_client.oas.models import bt_parameter_spec_reference2789 except ImportError: bt_parameter_spec_reference2789 = sys.modules[ "onshape_client.oas.models.bt_parameter_spec_reference2789" ] try: from onshape_client.oas.models import ( bt_parameter_spec_reference_part_studio1256_all_of, ) except ImportError: bt_parameter_spec_reference_part_studio1256_all_of = sys.modules[ "onshape_client.oas.models.bt_parameter_spec_reference_part_studio1256_all_of" ] try: from onshape_client.oas.models import bt_parameter_visibility_condition177 except ImportError: bt_parameter_visibility_condition177 = sys.modules[ "onshape_client.oas.models.bt_parameter_visibility_condition177" ] try: from onshape_client.oas.models import btm_parameter1 except ImportError: btm_parameter1 = sys.modules["onshape_client.oas.models.btm_parameter1"] class BTParameterSpecReferencePartStudio1256(ModelComposed): allowed_values = { ("allowed_insertable_types",): { "SOLID": "SOLID", "SURFACE": "SURFACE", "WIRE": "WIRE", "MESH": "MESH", "SKETCH": "SKETCH", "FLATTENED_SHEET_METAL": "FLATTENED_SHEET_METAL", "ENTIRE_PART_STUDIO": "ENTIRE_PART_STUDIO", "CONSTRUCTION_PLANE": "CONSTRUCTION_PLANE", "COMPOSITE_PART": "COMPOSITE_PART", "UNKNOWN": "UNKNOWN", }, ("ui_hints",): { "OPPOSITE_DIRECTION": "OPPOSITE_DIRECTION", "ALWAYS_HIDDEN": "ALWAYS_HIDDEN", "SHOW_CREATE_SELECTION": "SHOW_CREATE_SELECTION", "CONTROL_VISIBILITY": "CONTROL_VISIBILITY", "NO_PREVIEW_PROVIDED": "NO_PREVIEW_PROVIDED", "REMEMBER_PREVIOUS_VALUE": "REMEMBER_PREVIOUS_VALUE", "DISPLAY_SHORT": "DISPLAY_SHORT", "ALLOW_FEATURE_SELECTION": "ALLOW_FEATURE_SELECTION", "MATE_CONNECTOR_AXIS_TYPE": "MATE_CONNECTOR_AXIS_TYPE", "PRIMARY_AXIS": "PRIMARY_AXIS", "SHOW_EXPRESSION": "SHOW_EXPRESSION", "OPPOSITE_DIRECTION_CIRCULAR": "OPPOSITE_DIRECTION_CIRCULAR", "SHOW_LABEL": "SHOW_LABEL", "HORIZONTAL_ENUM": "HORIZONTAL_ENUM", "UNCONFIGURABLE": "UNCONFIGURABLE", "MATCH_LAST_ARRAY_ITEM": "MATCH_LAST_ARRAY_ITEM", "COLLAPSE_ARRAY_ITEMS": "COLLAPSE_ARRAY_ITEMS", "INITIAL_FOCUS_ON_EDIT": "INITIAL_FOCUS_ON_EDIT", "INITIAL_FOCUS": "INITIAL_FOCUS", "DISPLAY_CURRENT_VALUE_ONLY": "DISPLAY_CURRENT_VALUE_ONLY", "READ_ONLY": "READ_ONLY", "PREVENT_CREATING_NEW_MATE_CONNECTORS": "PREVENT_CREATING_NEW_MATE_CONNECTORS", "FIRST_IN_ROW": "FIRST_IN_ROW", "ALLOW_QUERY_ORDER": "ALLOW_QUERY_ORDER", "PREVENT_ARRAY_REORDER": "PREVENT_ARRAY_REORDER", "UNKNOWN": "UNKNOWN", }, } validations = {} additional_properties_type = None @staticmethod
MIT License
openstack/senlin
senlin/db/sqlalchemy/api.py
node_ids_by_cluster
python
def node_ids_by_cluster(context, cluster_id, filters=None): with session_for_read() as session: query = session.query(models.Node.id).filter_by(cluster_id=cluster_id) if filters: query = utils.exact_filter(query, models.Node, filters) return [n[0] for n in query.all()]
an internal API for getting node IDs.
https://github.com/openstack/senlin/blob/390779ca1e08f819683e79993696f945f1c0393e/senlin/db/sqlalchemy/api.py#L305-L312
import datetime import sys import threading import time from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import utils as sa_utils from oslo_log import log as logging from oslo_utils import timeutils import osprofiler.sqlalchemy import sqlalchemy from sqlalchemy.orm import joinedload from sqlalchemy.sql.expression import func from senlin.common import consts from senlin.common import exception from senlin.db.sqlalchemy import migration from senlin.db.sqlalchemy import models from senlin.db.sqlalchemy import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF _main_context_manager = None _CONTEXT = threading.local() cfg.CONF.import_opt('database_retry_limit', 'senlin.conf') cfg.CONF.import_opt('database_retry_interval', 'senlin.conf') cfg.CONF.import_opt('database_max_retry_interval', 'senlin.conf') def _get_main_context_manager(): global _main_context_manager if not _main_context_manager: _main_context_manager = enginefacade.transaction_context() cfg.CONF.import_group('profiler', 'senlin.conf') if cfg.CONF.profiler.enabled: if cfg.CONF.profiler.trace_sqlalchemy: eng = _main_context_manager.writer.get_engine() osprofiler.sqlalchemy.add_tracing(sqlalchemy, eng, "db") return _main_context_manager def get_engine(): return _get_main_context_manager().writer.get_engine() def session_for_read(): return _get_main_context_manager().reader.using(_CONTEXT) def session_for_write(): return _get_main_context_manager().writer.using(_CONTEXT) def get_backend(): return sys.modules[__name__] def retry_on_deadlock(f): return oslo_db_api.wrap_db_retry( retry_on_deadlock=True, max_retries=CONF.database_retry_limit, retry_interval=CONF.database_retry_interval, inc_retry_interval=True, max_retry_interval=CONF.database_max_retry_interval)(f) def query_by_short_id(context, model_query, model, short_id, project_safe=True): q = model_query() q = q.filter(model.id.like('%s%%' % short_id)) q = utils.filter_query_by_project(q, project_safe, context) if q.count() == 1: return q.first() elif q.count() == 0: return None else: raise exception.MultipleChoices(arg=short_id) def query_by_name(context, model_query, name, project_safe=True): q = model_query() q = q.filter_by(name=name) q = utils.filter_query_by_project(q, project_safe, context) if q.count() == 1: return q.first() elif q.count() == 0: return None else: raise exception.MultipleChoices(arg=name) def cluster_model_query(): with session_for_read() as session: query = session.query(models.Cluster).options( joinedload(models.Cluster.nodes), joinedload(models.Cluster.profile), joinedload(models.Cluster.policies) ) return query @retry_on_deadlock def cluster_create(context, values): with session_for_write() as session: cluster_ref = models.Cluster() cluster_ref.update(values) session.add(cluster_ref) return cluster_get(context, cluster_ref.id) def cluster_get(context, cluster_id, project_safe=True): cluster = cluster_model_query().get(cluster_id) if cluster is None: return None return utils.check_resource_project(context, cluster, project_safe) def cluster_get_by_name(context, name, project_safe=True): return query_by_name(context, cluster_model_query, name, project_safe=project_safe) def cluster_get_by_short_id(context, short_id, project_safe=True): return query_by_short_id(context, cluster_model_query, models.Cluster, short_id, project_safe=project_safe) def _query_cluster_get_all(context, project_safe=True): query = cluster_model_query() query = utils.filter_query_by_project(query, project_safe, context) return query def cluster_get_all(context, limit=None, marker=None, sort=None, filters=None, project_safe=True): query = _query_cluster_get_all(context, project_safe=project_safe) if filters: query = utils.exact_filter(query, models.Cluster, filters) keys, dirs = utils.get_sort_params(sort, consts.CLUSTER_INIT_AT) if marker: marker = cluster_model_query().get(marker) return sa_utils.paginate_query(query, models.Cluster, limit, keys, marker=marker, sort_dirs=dirs).all() @retry_on_deadlock def cluster_next_index(context, cluster_id): with session_for_write() as session: cluster = session.query(models.Cluster).with_for_update().get( cluster_id) if cluster is None: return 0 next_index = cluster.next_index cluster.next_index = cluster.next_index + 1 cluster.save(session) return next_index def cluster_count_all(context, filters=None, project_safe=True): query = _query_cluster_get_all(context, project_safe=project_safe) query = utils.exact_filter(query, models.Cluster, filters) return query.count() @retry_on_deadlock def cluster_update(context, cluster_id, values): with session_for_write() as session: cluster = session.query( models.Cluster).with_for_update().get(cluster_id) if not cluster: raise exception.ResourceNotFound(type='cluster', id=cluster_id) cluster.update(values) cluster.save(session) @retry_on_deadlock def cluster_delete(context, cluster_id): with session_for_write() as session: cluster = session.query(models.Cluster).get(cluster_id) if cluster is None: raise exception.ResourceNotFound(type='cluster', id=cluster_id) query = session.query(models.Node).filter_by(cluster_id=cluster_id) nodes = query.all() if len(nodes) != 0: for node in nodes: session.delete(node) for cp in cluster.policies: session.delete(cp) session.delete(cluster) def node_model_query(): with session_for_read() as session: query = session.query(models.Node).options( joinedload(models.Node.profile) ) return query @retry_on_deadlock def node_create(context, values): with session_for_write() as session: node = models.Node() node.update(values) session.add(node) return node def node_get(context, node_id, project_safe=True): node = node_model_query().get(node_id) if not node: return None return utils.check_resource_project(context, node, project_safe) def node_get_by_name(context, name, project_safe=True): return query_by_name(context, node_model_query, name, project_safe=project_safe) def node_get_by_short_id(context, short_id, project_safe=True): return query_by_short_id(context, node_model_query, models.Node, short_id, project_safe=project_safe) def _query_node_get_all(context, project_safe=True, cluster_id=None): query = node_model_query() if cluster_id is not None: query = query.filter_by(cluster_id=cluster_id) query = utils.filter_query_by_project(query, project_safe, context) return query def node_get_all(context, cluster_id=None, limit=None, marker=None, sort=None, filters=None, project_safe=True): query = _query_node_get_all(context, project_safe=project_safe, cluster_id=cluster_id) if filters: query = utils.exact_filter(query, models.Node, filters) keys, dirs = utils.get_sort_params(sort, consts.NODE_INIT_AT) if marker: marker = node_model_query().get(marker) return sa_utils.paginate_query(query, models.Node, limit, keys, marker=marker, sort_dirs=dirs).all() def node_get_all_by_cluster(context, cluster_id, filters=None, project_safe=True): query = _query_node_get_all(context, cluster_id=cluster_id, project_safe=project_safe) if filters: query = utils.exact_filter(query, models.Node, filters) return query.all()
Apache License 2.0
mozillazg/bustard
tests/httpbin/core.py
cache
python
def cache(request): is_conditional = ( request.headers.get('If-Modified-Since') or request.headers.get('If-None-Match') ) if is_conditional is None: response = view_get(request) response.headers['Last-Modified'] = http_date() response.headers['ETag'] = uuid.uuid4().hex return response else: return status_code(304)
Returns a 304 if an If-Modified-Since header or If-None-Match is present. Returns the same as a GET otherwise.
https://github.com/mozillazg/bustard/blob/bd7b47f3ba5440cf6ea026c8b633060fedeb80b7/tests/httpbin/core.py#L505-L520
import base64 import json import os import random import time import uuid from bustard.app import Bustard from bustard.http import ( Response, Headers, jsonify as bustard_jsonify, redirect ) from bustard.utils import json_dumps_default from werkzeug.datastructures import WWWAuthenticate from werkzeug.http import http_date from werkzeug.serving import run_simple from six.moves import range as xrange from . import filters from .helpers import ( get_headers, status_code, get_dict, get_request_range, check_basic_auth, check_digest_auth, secure_cookie, H, ROBOT_TXT, ANGRY_ASCII ) from .utils import weighted_choice from .structures import CaseInsensitiveDict ENV_COOKIES = ( '_gauges_unique', '_gauges_unique_year', '_gauges_unique_month', '_gauges_unique_day', '_gauges_unique_hour', '__utmz', '__utma', '__utmb' ) def jsonify(*args, **kwargs): response = bustard_jsonify(*args, **kwargs) if not response.data.endswith(b'\n'): response.data += b'\n' return response tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates') app = Bustard(__name__, template_dir=tmpl_dir) render_template = app.render_template url_for = app.url_for @app.after_request def set_cors_headers(request, response): response.headers['Access-Control-Allow-Origin'] = ( request.headers.get('Origin', '*') ) response.headers['Access-Control-Allow-Credentials'] = 'true' if request.method == 'OPTIONS': response.headers['Access-Control-Allow-Methods'] = ( 'GET, POST, PUT, DELETE, PATCH, OPTIONS' ) response.headers['Access-Control-Max-Age'] = '3600' if request.headers.get('Access-Control-Request-Headers') is not None: response.headers['Access-Control-Allow-Headers'] = ( request.headers['Access-Control-Request-Headers'] ) return response @app.route('/') def view_landing_page(request): tracking_enabled = 'HTTPBIN_TRACKING' in os.environ return render_template('index.html', request=request, tracking_enabled=tracking_enabled) @app.route('/html') def view_html_page(request): return render_template('moby.html') @app.route('/robots.txt') def view_robots_page(request): response = Response() response.content = ROBOT_TXT response.content_type = 'text/plain' return response @app.route('/deny') def view_deny_page(request): response = Response() response.content = ANGRY_ASCII response.content_type = 'text/plain' return response @app.route('/ip') def view_origin(request): return jsonify(origin=request.headers.get('X-Forwarded-For', request.remote_addr)) @app.route('/headers') def view_headers(request): return jsonify(get_dict(request, 'headers')) @app.route('/user-agent') def view_user_agent(request): headers = get_headers(request) return jsonify({'user-agent': headers['user-agent']}) @app.route('/get', methods=('GET', 'OPTIONS')) def view_get(request): return jsonify(get_dict(request, 'url', 'args', 'headers', 'origin')) @app.route('/post', methods=('POST',)) def view_post(request): return jsonify(get_dict(request, 'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json')) @app.route('/put', methods=('PUT',)) def view_put(request): return jsonify(get_dict(request, 'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json')) @app.route('/patch', methods=('PATCH',)) def view_patch(request): return jsonify(get_dict(request, 'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json')) @app.route('/delete', methods=('DELETE',)) def view_delete(request): return jsonify(get_dict(request, 'url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json')) @app.route('/gzip') @filters.gzip def view_gzip_encoded_content(request): return jsonify(get_dict(request, 'origin', 'headers', method=request.method, gzipped=True)) @app.route('/deflate') @filters.deflate def view_deflate_encoded_content(request): return jsonify(get_dict(request, 'origin', 'headers', method=request.method, deflated=True)) @app.route('/redirect/<int:n>') def redirect_n_times(request, n): n = int(n) assert n > 0 absolute = request.args.get('absolute', 'false').lower() == 'true' if n == 1: return redirect(app.url_for('view_get', _request=request, _external=absolute)) if absolute: return _redirect(request, 'absolute', n, True) else: return _redirect(request, 'relative', n, False) def _redirect(request, kind, n, external): return redirect(url_for('{0}_redirect_n_times'.format(kind), n=n - 1, _external=external, _request=request)) @app.route('/redirect-to') def redirect_to(request): args = CaseInsensitiveDict(request.args.items()) response = Response('') response.status_code = 302 response.headers['Location'] = args['url'].encode('utf-8') return response @app.route('/relative-redirect/<int:n>') def relative_redirect_n_times(request, n): n = int(n) assert n > 0 response = Response('') response.status_code = 302 if n == 1: response.headers['Location'] = url_for('view_get') return response response.headers['Location'] = app.url_for( 'relative_redirect_n_times', n=n - 1 ) return response @app.route('/absolute-redirect/<int:n>') def absolute_redirect_n_times(request, n): n = int(n) assert n > 0 if n == 1: return redirect(app.url_for('view_get', _request=request, _external=True)) return _redirect(request, 'absolute', n, True) @app.route('/stream/<int:n>') def stream_n_messages(request, n): n = int(n) response = get_dict(request, 'url', 'args', 'headers', 'origin') n = min(n, 100) def generate_stream(): for i in range(n): response['id'] = i yield json.dumps(response, default=json_dumps_default) + '\n' return Response(generate_stream(), headers={ 'Content-Type': 'application/json', }) @app.route('/status/<codes>', methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE']) def view_status_code(request, codes): if ',' not in codes: code = int(codes) return status_code(code) choices = [] for choice in codes.split(','): if ':' not in choice: code = choice weight = 1 else: code, weight = choice.split(':') choices.append((int(code), float(weight))) code = weighted_choice(choices) return status_code(code) @app.route('/response-headers') def response_headers(request): headers = Headers(request.args.to_dict()) response = jsonify(headers) while True: content_len_shown = response.headers['Content-Length'] d = {} for key in response.headers.keys(): value = response.headers.get_all(key) if len(value) == 1: value = value[0] d[key] = value response = jsonify(d) for key, value in headers.to_list(): response.headers.add(key, value) if response.headers['Content-Length'] == content_len_shown: break return response @app.route('/cookies') def view_cookies(request, hide_env=True): cookies = dict(request.cookies.items()) if hide_env and ('show_env' not in request.args): for key in ENV_COOKIES: try: del cookies[key] except KeyError: pass return jsonify(cookies=cookies) @app.route('/forms/post') def view_forms_post(request): return render_template('forms-post.html') @app.route('/cookies/set/<name>/<value>') def set_cookie(request, name, value): r = app.make_response(redirect(url_for('view_cookies'))) r.set_cookie(key=name, value=value, secure=secure_cookie(request)) return r @app.route('/cookies/set') def set_cookies(request): cookies = dict(request.args.items()) r = app.make_response(redirect(url_for('view_cookies'))) for key, value in cookies.items(): r.set_cookie(key=key, value=value, secure=secure_cookie(request)) return r @app.route('/cookies/delete') def delete_cookies(request): cookies = dict(request.args.items()) r = app.make_response(redirect(url_for('view_cookies'))) for key, value in cookies.items(): r.delete_cookie(key=key) return r @app.route('/basic-auth/<user>/<passwd>') def basic_auth(request, user='user', passwd='passwd'): if not check_basic_auth(request, user, passwd): return status_code(401) return jsonify(authenticated=True, user=user) @app.route('/hidden-basic-auth/<user>/<passwd>') def hidden_basic_auth(request, user='user', passwd='passwd'): if not check_basic_auth(request, user, passwd): return status_code(404) return jsonify(authenticated=True, user=user) @app.route('/digest-auth/<qop>/<user>/<passwd>') def digest_auth(request, qop=None, user='user', passwd='passwd'): if qop not in ('auth', 'auth-int'): qop = None if 'Authorization' not in request.headers or not check_digest_auth(user, passwd) or 'Cookie' not in request.headers: response = app.make_response('') response.status_code = 401 nonce = H(b''.join([ getattr(request, 'remote_addr', u'').encode('ascii'), b':', str(time.time()).encode('ascii'), b':', os.urandom(10) ])) opaque = H(os.urandom(10)) auth = WWWAuthenticate('digest') auth.set_digest('me@kennethreitz.com', nonce, opaque=opaque, qop=('auth', 'auth-int') if qop is None else (qop, )) response.headers['WWW-Authenticate'] = auth.to_header() response.headers['Set-Cookie'] = 'fake=fake_value' return response return jsonify(authenticated=True, user=user) @app.route('/delay/<delay>') def delay_response(request, delay): delay = min(float(delay), 10) time.sleep(delay) return jsonify(get_dict(request, 'url', 'args', 'form', 'data', 'origin', 'headers', 'files')) @app.route('/drip') def drip(request): args = CaseInsensitiveDict(request.args.items()) duration = float(args.get('duration', 2)) numbytes = int(args.get('numbytes', 10)) code = int(args.get('code', 200)) pause = duration / numbytes delay = float(args.get('delay', 0)) if delay > 0: time.sleep(delay) def generate_bytes(): for i in xrange(numbytes): yield u'*'.encode('utf-8') time.sleep(pause) response = Response(generate_bytes(), headers={ 'Content-Type': 'application/octet-stream', 'Content-Length': str(numbytes), }) response.status_code = code return response @app.route('/base64/<value>') def decode_base64(request, value): encoded = value.encode('utf-8') return base64.urlsafe_b64decode(encoded).decode('utf-8') @app.route('/cache', methods=('GET',))
MIT License
rlworkgroup/garage
src/garage/torch/_functions.py
soft_update_model
python
def soft_update_model(target_model, source_model, tau): for target_param, param in zip(target_model.parameters(), source_model.parameters()): target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
Update model parameter of target and source model. # noqa: D417 Args: target_model (garage.torch.Policy/garage.torch.QFunction): Target model to update. source_model (garage.torch.Policy/QFunction): Source network to update. tau (float): Interpolation parameter for doing the soft target update.
https://github.com/rlworkgroup/garage/blob/3a578852c392cecde5b7c9786aa182d74f6df1d4/src/garage/torch/_functions.py#L284-L302
import copy import math import warnings import torch from torch import nn import torch.nn.functional as F _USE_GPU = False _DEVICE = None _GPU_ID = 0 def zero_optim_grads(optim, set_to_none=True): if not set_to_none: optim.zero_grad() return for group in optim.param_groups: for param in group['params']: param.grad = None def compute_advantages(discount, gae_lambda, max_episode_length, baselines, rewards): adv_filter = torch.full((1, 1, 1, max_episode_length - 1), discount * gae_lambda, dtype=torch.float) adv_filter = torch.cumprod(F.pad(adv_filter, (1, 0), value=1), dim=-1) deltas = (rewards + discount * F.pad(baselines, (0, 1))[:, 1:] - baselines) deltas = F.pad(deltas, (0, max_episode_length - 1)).unsqueeze(0).unsqueeze(0) advantages = F.conv2d(deltas, adv_filter, stride=1).reshape(rewards.shape) return advantages def pad_to_last(nums, total_length, axis=-1, val=0): tensor = torch.Tensor(nums) axis = (axis + len(tensor.shape)) if axis < 0 else axis if len(tensor.shape) <= axis: raise IndexError('axis {} is out of range {}'.format( axis, tensor.shape)) padding_config = [0, 0] * len(tensor.shape) padding_idx = abs(axis - len(tensor.shape)) * 2 - 1 padding_config[padding_idx] = max(total_length - tensor.shape[axis], val) return F.pad(tensor, padding_config) def filter_valids(tensor, valids): return [tensor[i][:valid] for i, valid in enumerate(valids)] def np_to_torch(array): tensor = torch.from_numpy(array) if tensor.dtype != torch.float32: tensor = tensor.float() return tensor.to(global_device()) def list_to_tensor(data): return torch.as_tensor(data, dtype=torch.float32, device=global_device()) def as_torch_dict(array_dict): for key, value in array_dict.items(): array_dict[key] = np_to_torch(value) return array_dict def torch_to_np(tensors): value_out = tuple(v.cpu().numpy() for v in tensors) return value_out def flatten_batch(tensor): return tensor.reshape((-1, ) + tensor.shape[2:]) def flatten_to_single_vector(tensor): N = tensor.shape[0] return tensor.view(N, -1) def update_module_params(module, new_params): named_modules = dict(module.named_modules()) def update(m, name, param): del m._parameters[name] setattr(m, name, param) m._parameters[name] = param for name, new_param in new_params.items(): if '.' in name: module_name, param_name = tuple(name.rsplit('.', 1)) if module_name in named_modules: update(named_modules[module_name], param_name, new_param) else: update(module, name, new_param)
MIT License
robertjoosten/maya-timeline-marker
scripts/timeline_marker/ui.py
TimelineMarker.add_from_ui
python
def add_from_ui(self): colour = self.picker.property("rgb") comment = self.comment.text() for frame in utils.get_timeline_range(): self.add(frame, colour, comment) self.comment.setText("")
Get the frame, color and comments arguments from the UI and add marker(s).
https://github.com/robertjoosten/maya-timeline-marker/blob/5ebed07470d5f61bfb52375b84c86b40a23f1675/scripts/timeline_marker/ui.py#L335-L349
import json from maya import mel from maya import cmds from maya.api import OpenMaya from PySide2 import QtWidgets, QtGui, QtCore from timeline_marker import utils TIMELINE_MARKER = "timeline-marker" TIMELINE_MARKER_OLD = "timelineMarker" class TimelineMark(object): slots = ("colour", "comment", ) def __init__(self, colour=(0, 255, 0), comment=""): self.colour = colour self.comment = comment class TimelineMarker(QtWidgets.QWidget): instance = None def __init__(self, parent): super(TimelineMarker, self).__init__(parent) self.start = None self.end = None self.total = None self.step = None self.data = {} self.range = None self.callbacks = [] scale_factor = self.logicalDpiX() / 96.0 self.menu = utils.get_timeline_menu() self.menu_actions = [] self.menu_actions.append(self.menu.addSeparator()) self.comment = QtWidgets.QLineEdit(self.menu) self.comment.setPlaceholderText("marker comment...") action = QtWidgets.QWidgetAction(self.menu) action.setDefaultWidget(self.comment) self.menu.addAction(action) self.menu_actions.append(action) self.menu_actions.append(self.menu.addSeparator()) self.picker = self.menu.addAction("Pick Color") self.picker.setProperty("rgb", [0, 255, 0]) self.picker.triggered.connect(self.display_picker) pixmap = QtGui.QPixmap(12 * scale_factor, 12 * scale_factor) pixmap.fill(QtGui.QColor(0, 255, 0)) self.picker.setIcon(QtGui.QIcon(pixmap)) self.menu_actions.append(self.picker) self.menu_actions.append(self.menu.addSeparator()) add_action = self.menu.addAction("Add Marker") add_action.triggered.connect(self.add_from_ui) remove_action = self.menu.addAction("Delete Selected Marker") remove_action.triggered.connect(self.remove_from_ui) clear_action = self.menu.addAction("Delete All Markers") clear_action.triggered.connect(self.clear) self.menu_actions.extend([add_action, remove_action, clear_action]) self.menu_actions.append(self.menu.addSeparator()) self.move = self.menu.addAction("Move With Time Control") self.move.setCheckable(True) self.move.setChecked(False) self.menu_actions.append(self.move) self.load_from_scene() self.register_callbacks() @classmethod def get_instance(cls): if cls.instance is None: raise RuntimeError("TimelineMarker has no instance, initialize first.") return cls.instance def paintEvent(self, event): self.start = cmds.playbackOptions(query=True, minTime=True) self.end = cmds.playbackOptions(query=True, maxTime=True) self.total = self.width() self.step = (self.total - (self.total * 0.01)) / (self.end - self.start + 1) if not self.data: return painter = QtGui.QPainter(self) pen = QtGui.QPen() pen.setWidth(self.step) for frame, frame_data in self.data.items(): if not self.start <= frame <= self.end: continue r, g, b = frame_data.colour pen.setColor(QtGui.QColor(r, g, b, 50)) pos = (frame - self.start + 0.5) * self.step + (self.total * 0.005) line = QtCore.QLineF(QtCore.QPointF(pos, 0), QtCore.QPointF(pos, 100)) painter.setPen(pen) painter.drawLine(line) return super(TimelineMarker, self).paintEvent(event) def event(self, event): if event.type() == QtCore.QEvent.ToolTip: QtWidgets.QToolTip.hideText() frame = int(((event.x() - (self.total * 0.005)) / self.step) + self.start) frame_data = self.data.get(frame) if frame_data is not None: QtWidgets.QToolTip.showText(event.globalPos(), frame_data.comment, self) return super(TimelineMarker, self).event(event) def update(self): self.write_to_scene() super(TimelineMarker, self).update() def deleteLater(self): for action in self.menu_actions: action.deleteLater() self.remove_callbacks() super(TimelineMarker, self).deleteLater() def display_picker(self): colour = self.picker.property("rgb") colour = QtGui.QColor(colour[0], colour[1], colour[2]) dialog = QtWidgets.QColorDialog.getColor(colour, self.menu) if not dialog.isValid(): return colour = [dialog.red(), dialog.green(), dialog.blue()] pixmap = QtGui.QPixmap(12, 12) pixmap.fill(QtGui.QColor(colour[0], colour[1], colour[2])) self.picker.setProperty("rgb", colour) self.picker.setIcon(QtGui.QIcon(pixmap)) def press_command_callback(self, *args): timeline_path = utils.get_timeline_path() cmds.timeControl(timeline_path, edit=True, beginScrub=True) range_visible = cmds.timeControl(timeline_path, query=True, rangeVisible=True) if range_visible and self.move.isChecked(): self.range = utils.get_timeline_range() else: self.range = None def release_command_callback(self, *args): timeline_path = utils.get_timeline_path() cmds.timeControl(timeline_path, edit=True, endScrub=True) if not self.range or not self.move.isChecked(): return start_range = self.range[:] end_range = utils.get_timeline_range() self.range = None start_length = len(start_range) end_length = len(end_range) range_visible = cmds.timeControl(timeline_path, query=True, rangeVisible=True) if (start_length == 1 and end_length != 1) or not range_visible: return matches = {frame: self.data[frame] for frame in start_range if frame in self.data} for frame, frame_data in matches.items(): if start_length == 1: frame_remapped = end_range[0] else: frame_remapped = int( utils.remap( frame, input_min=start_range[0], input_max=start_range[-1], output_min=end_range[0], output_max=end_range[-1] ) ) if frame == frame_remapped: continue self.data[frame_remapped] = frame_data self.data.pop(frame, None) self.update() @classmethod def add(cls, frame, colour, comment): instance = cls.get_instance() instance.data[frame] = TimelineMark(colour, comment) instance.update() @classmethod def set(cls, frames, colours, comments): instance = cls.get_instance() instance.data.clear() for frame, colour, comment in zip(frames, colours, comments): instance.data[frame] = TimelineMark(colour, comment) instance.update() @classmethod def remove(cls, *frames): instance = cls.get_instance() for frame in frames: instance.data.pop(frame, None) instance.update() @classmethod def clear(cls): instance = cls.get_instance() instance.data.clear() instance.update()
MIT License
mi2rl/mi2rlnet
medimodule/Chest/module.py
EnhanceCTClassifier._preprocessing
python
def _preprocessing(self, path: str) -> Tuple[np.array, np.array]: ds = pydicom.dcmread(path) if ds.file_meta.TransferSyntaxUID == '1.2.840.10008.1.2.4.70': ds.decompress() imgo = ds.pixel_array try: intercept = ds.RescaleIntercept tmp_img = imgo.astype('float32') tmp_img2 = np.copy(tmp_img) window_width = 450. window_level = 550. lower = (window_level-window_width) upper = (window_level+window_width) tmp_img -= (lower - intercept) tmp_img /= (upper + 1024) tmp_img[tmp_img < 0] = 0. tmp_img[tmp_img > 1] = 1. tmp_img = cv2.resize(tmp_img, (256, 256), interpolation=cv2.INTER_AREA) tmp_img = tmp_img[:,:,None] tmp_img2[tmp_img2 == -2000] = 0. tmp_img2 -= -1024. - intercept tmp_img2 /= 4096 tmp_img2 = cv2.resize(tmp_img2, (256, 256), interpolation=cv2.INTER_AREA) tmp_img2 = tmp_img2[:,:,None] img = np.concatenate((tmp_img, tmp_img2), axis=-1) img = np.expand_dims(img, axis=0) except: print('please check your image modality, you have to input CT') raise return imgo, img
Args: (string) path : dicom path Return: (numpy ndarray) imgo : original image (numpy ndarray) img : preprocessed image
https://github.com/mi2rl/mi2rlnet/blob/158a74985074f95fcd6a345c310903936dd2adbe/medimodule/Chest/module.py#L108-L156
import os import sys import cv2 import json import pydicom import warnings import numpy as np import SimpleITK as sitk from typing import Tuple, Optional, List, Dict from medimodule.utils import Checker from medimodule.base import BaseModule from medimodule.Chest.models import LungSeg from medimodule.Chest.models import LRDet from medimodule.Chest.models import ViewCls from medimodule.Chest.models import EnhanceCls from medimodule.Chest.models.lr_mark_detection_model.anchors import anchors_for_shape class ViewpointClassifier(BaseModule): def __init__(self, weight_path: Optional[str] = None): self.model = ViewCls() if weight_path: self.model.load_weights(weight_path) self.labels = ['PA', 'Lateral', 'Others'] def _preprocessing(self, path: str) -> Tuple[np.array, np.array]: imgo = np.squeeze(sitk.GetArrayFromImage(sitk.ReadImage(path))) img = cv2.resize(imgo, (512, 512), interpolation=cv2.INTER_LINEAR) img = img.astype(np.float32) img -= np.min(img) img /= np.percentile(img, 99) img[img > 1] = 1 img *= (2**8-1) img = img.astype(np.uint8) img = img[None,...,None] return imgo, img def predict( self, path: str, save_path: Optional[str] = None ) -> Tuple[np.array, str]: path = os.path.abspath(path) imgo, img = self._preprocessing(path) result = self.model.predict(img) result = np.argmax(result) result = self.labels[result] if save_path: if ".txt" in save_path: with open(save_path, "w") as f: f.write(result) else: warnings.warn( "ONLY txt format is allowed in this module." "If you want other format, use a custom code.", UserWarning) return (imgo, result) class EnhanceCTClassifier(BaseModule): def __init__(self, weight_path: Optional[str] = None): self.model = EnhanceCls() if weight_path: self.model.load_weights(weight_path) self.labels = ['Non-Enhanced', 'Enhanced']
Apache License 2.0
racker/rackspace-monitoring
test/__init__.py
MockHttp._example
python
def _example(self, method, url, body, headers): return (httplib.OK, 'Hello World!', {'X-Foo': 'libcloud'}, httplib.responses[httplib.OK])
Return a simple message and header, regardless of input.
https://github.com/racker/rackspace-monitoring/blob/8a9929e5fd51826c0a392e21bc55acb2aefe54f7/test/__init__.py#L183-L188
import random import unittest from cgi import parse_qs from libcloud.utils.py3 import httplib from libcloud.utils.py3 import StringIO from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import u XML_HEADERS = {'content-type': 'application/xml'} class LibcloudTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): self._visited_urls = [] self._executed_mock_methods = [] super(LibcloudTestCase, self).__init__(*args, **kwargs) def setUp(self): self._visited_urls = [] self._executed_mock_methods = [] def _add_visited_url(self, url): self._visited_urls.append(url) def _add_executed_mock_method(self, method_name): self._executed_mock_methods.append(method_name) def assertExecutedMethodCount(self, expected): actual = len(self._executed_mock_methods) self.assertEqual(actual, expected, 'expected %d, but %d mock methods were executed' % (expected, actual)) class multipleresponse(object): count = 0 func = None def __init__(self, f): self.func = f def __call__(self, *args, **kwargs): ret = self.func(self.func.__class__, *args, **kwargs) response = ret[self.count] self.count = self.count + 1 return response class MockResponse(object): headers = {} body = StringIO() status = 0 reason = '' version = 11 def __init__(self, status, body, headers=None, reason=None): self.status = status self.body = StringIO(u(body)) self.headers = headers or self.headers self.reason = reason or self.reason def read(self, *args, **kwargs): return self.body.read(*args, **kwargs) def getheader(self, name, *args, **kwargs): return self.headers.get(name, *args, **kwargs) def getheaders(self): return list(self.headers.items()) def msg(self): raise NotImplemented class BaseMockHttpObject(object): def _get_method_name(self, type, use_param, qs, path): meth_name = path.replace('/', '_').replace('.', '_').replace('-', '_') if type: meth_name = '%s_%s' % (meth_name, self.type) if use_param: param = qs[self.use_param][0].replace('.', '_').replace('-', '_') meth_name = '%s_%s' % (meth_name, param) return meth_name class MockHttp(BaseMockHttpObject): responseCls = MockResponse host = None port = None response = None type = None use_param = None test = None def __init__(self, host, port, *args, **kwargs): self.host = host self.port = port def request(self, method, url, body=None, headers=None, raw=False): parsed = urlparse.urlparse(url) scheme, netloc, path, params, query, fragment = parsed qs = parse_qs(query) if path.endswith('/'): path = path[:-1] meth_name = self._get_method_name(type=self.type, use_param=self.use_param, qs=qs, path=path) meth = getattr(self, meth_name) if self.test and isinstance(self.test, LibcloudTestCase): self.test._add_visited_url(url=url) self.test._add_executed_mock_method(method_name=meth_name) status, body, headers, reason = meth(method, url, body, headers) self.response = self.responseCls(status, body, headers, reason) def getresponse(self): return self.response def connect(self): pass def close(self): pass
Apache License 2.0
basilfx/kippo-extra
kippo_extra/commands/which.py
command_which.call
python
def call(self): if not len(self.args) or 'PATH' not in self.env: return for f in self.args: for path in self.env['PATH'].split(':'): resolved = self.fs.resolve_path(f, path) if self.fs.exists(resolved): self.writeln("%s/%s" % (path, f)) continue
Look up all the arguments on PATH and print each (first) result
https://github.com/basilfx/kippo-extra/blob/a03f8b94877efcb7fbf458226b73ed06ce732b4a/kippo_extra/commands/which.py#L12-L26
from kippo_extra.utils import ExtendedHoneyPotCommand commands = {} class command_which(ExtendedHoneyPotCommand): resolve_args = False
MIT License
ota-insight/djangosaml2idp
djangosaml2idp/processors.py
BaseProcessor.enable_multifactor
python
def enable_multifactor(self, user) -> bool: return False
Check if this user should use a second authentication system
https://github.com/ota-insight/djangosaml2idp/blob/0b4325782a6fd2c034677b5923041b5df10087ec/djangosaml2idp/processors.py#L101-L104
import hashlib import logging from typing import Dict, Type from django.conf import settings from django.contrib.auth import get_user_model from django.core.exceptions import ImproperlyConfigured, ValidationError from django.utils.module_loading import import_string from django.utils.translation import gettext as _ from saml2.saml import (NAMEID_FORMAT_EMAILADDRESS, NAMEID_FORMAT_ENCRYPTED, NAMEID_FORMAT_ENTITY, NAMEID_FORMAT_KERBEROS, NAMEID_FORMAT_PERSISTENT, NAMEID_FORMAT_TRANSIENT, NAMEID_FORMAT_UNSPECIFIED, NAMEID_FORMAT_WINDOWSDOMAINQUALIFIEDNAME, NAMEID_FORMAT_X509SUBJECTNAME) from .models import ServiceProvider, PersistentId logger = logging.getLogger(__name__) User = get_user_model() class NameIdBuilder: format_mappings = { NAMEID_FORMAT_UNSPECIFIED: 'get_nameid_unspecified', NAMEID_FORMAT_TRANSIENT: 'get_nameid_transient', NAMEID_FORMAT_PERSISTENT: 'get_nameid_persistent', NAMEID_FORMAT_EMAILADDRESS: 'get_nameid_email', NAMEID_FORMAT_X509SUBJECTNAME: None, NAMEID_FORMAT_WINDOWSDOMAINQUALIFIEDNAME: None, NAMEID_FORMAT_KERBEROS: None, NAMEID_FORMAT_ENTITY: None, NAMEID_FORMAT_ENCRYPTED: None } @classmethod def _get_nameid_opaque(cls, user_id: str, salt: bytes = b'', *args, **kwargs) -> str: salted_value = user_id.encode() + salt opaque = hashlib.sha256(salted_value) return opaque.hexdigest() @classmethod def get_nameid_persistent(cls, user_id: str, sp: ServiceProvider, user: settings.AUTH_USER_MODEL, *args, **kwargs) -> str: return str(PersistentId.objects.get_or_create(sp=sp, user=user)[0].persistent_id) @classmethod def get_nameid_email(cls, user_id: str, user: settings.AUTH_USER_MODEL = None, **kwargs) -> str: if '@' not in user_id: raise Exception(f"user_id {user_id} does not contain the '@' symbol, so is not a valid NameID Email address format.") return user_id @classmethod def get_nameid_transient(cls, user_id: str, **kwargs) -> str: raise NotImplementedError('Not implemented yet') @classmethod def get_nameid_unspecified(cls, user_id: str, **kwargs) -> str: return user_id @classmethod def get_nameid(cls, user_id: str, nameid_format: str, **kwargs) -> str: method = cls.format_mappings.get(nameid_format) if not method: raise NotImplementedError(f'{nameid_format} has not been mapped in NameIdBuilder.format_mappings') if not hasattr(cls, method): raise NotImplementedError(f'{nameid_format} has not been implemented NameIdBuilder methods') name_id = getattr(cls, method)(user_id, **kwargs) return name_id class BaseProcessor: def __init__(self, entity_id: str): self._entity_id = entity_id def has_access(self, request) -> bool: return True
Apache License 2.0
avyfain/conway
conway/game.py
Board.constrain
python
def constrain(self): self.points = set(cell for cell in self.points if self.contains(cell)) return self
Removes any points outside of the cell's bounding box.
https://github.com/avyfain/conway/blob/94f40ec2815bfe4c5ffc37efff92558856ce693b/conway/game.py#L85-L90
from itertools import chain, product NEIGHBOR_SET = ((0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, 0), (1, -1), (1, 1)) def neighbors(point): x, y = point return set((x + dx, y + dy) for dx, dy in NEIGHBOR_SET) class Board(object): def __init__(self, pattern, size=100): self.size = size -1 self.points = set() self.pattern = pattern self.frame_num = 0 self.frames = [] i, j = 0, 0 for char in pattern: if char == '\n': i = 0 j += 1 elif char == '1': i += 1 elif char == '0': self.points.add((i, j)) i += 1 def __hash__(self): return hash(frozenset(self.points)) def __eq__(self, other): return other and hash(self) == hash(other) def advance(self): newstate = set() recalc = self.points | set(chain(*(neighbors(point) for point in self.points))) for point in recalc: count = len(self.points & neighbors(point)) if count == 3 or (count == 2 and point in self.points): newstate.add(point) self.points = newstate return self def center(self): center_x, center_y = (sum(dim)/len(self.points) - self.size/2 for dim in zip(*self.points)) self.points = {(x-center_x, y-center_y) for x, y in self.points} return self
MIT License
nttpc/anymotion-cli
anymotion_cli/settings.py
Settings._to_int_with_check
python
def _to_int_with_check( self, value: Union[str, int], name: str, min_value: int ) -> int: try: x = int(value) except ValueError: message = f"The {name} value is invalid: {value}" raise SettingsValueError(message) if x < min_value: th = min_value - 1 message = f"The {name} value must be greater than {th}: {x}" raise SettingsValueError(message) return x
Convert value to int. Returns: The converted value. Raises: SettingsValueError: If conversion is not possible or value is less than min_value.
https://github.com/nttpc/anymotion-cli/blob/e18656aad3972350f1ed846a5fee6c512b54cfcd/anymotion_cli/settings.py#L139-L160
import os from configparser import ConfigParser, SectionProxy from distutils.util import strtobool from pathlib import Path from typing import Any, Optional, Tuple, Union from .config import ( API_URL, IS_DOWNLOAD, IS_OPEN, POLLING_INTERVAL, TIMEOUT, get_app_dir, ) from .exceptions import SettingsValueError class Settings(object): def __init__(self, profile_name: str, use_env: bool = True): settings_dir = get_app_dir() config_file = settings_dir / "config" credentials_file = settings_dir / "credentials" self._config = _Profile(config_file, profile_name) self._credentials = _Profile(credentials_file, profile_name) self._env = _Environment(use_env) @property def is_ok(self) -> bool: return self.client_id is not None and self.client_secret is not None @property def client_id(self) -> Optional[str]: value_from_env = self._env.anymotion_client_id value_from_file = self._credentials.anymotion_client_id return value_from_env or value_from_file @property def client_secret(self) -> Optional[str]: value_from_env = self._env.anymotion_client_secret value_from_file = self._credentials.anymotion_client_secret return value_from_env or value_from_file @property def api_url(self) -> str: return self._config.anymotion_api_url or API_URL @property def interval(self) -> int: interval = self._config.polling_interval or POLLING_INTERVAL return self._to_int_with_check(interval, "polling_interval", 1) @property def timeout(self) -> int: timeout = self._config.timeout or TIMEOUT return self._to_int_with_check(timeout, "timeout", 1) @property def is_download(self) -> Optional[bool]: is_download = self._config.is_download or IS_DOWNLOAD return self._to_optional_bool_with_check(is_download, "is_download") @property def is_open(self) -> Optional[bool]: is_open = self._config.is_open or IS_OPEN return self._to_optional_bool_with_check(is_open, "is_open") def write_config(self, api_url: str) -> None: if api_url == self.api_url: return if api_url is None or "anymotion" not in api_url: raise SettingsValueError("api_url is invald.") self._config.anymotion_api_url = api_url self._config.save() def write_credentials(self, client_id: str, client_secret: str) -> None: if client_id is None or client_secret is None: raise SettingsValueError("client_id or client_secret is invald.") self._credentials.anymotion_client_id = client_id self._credentials.anymotion_client_secret = client_secret self._credentials.save()
MIT License
armandmcqueen/tensorpack-mask-rcnn
tensorpack/input_source/input_source.py
QueueInput.refill_queue
python
def refill_queue(self): self.thread.pause() opt = tf.RunOptions() opt.timeout_in_ms = 2000 sess = tf.get_default_session() try: while True: sess.run(self._dequeue_op, options=opt) except tf.errors.DeadlineExceededError: pass self.thread.reinitialize_dataflow() self.thread.resume()
Clear the queue, then call dataflow.__iter__() again and fill into the queue.
https://github.com/armandmcqueen/tensorpack-mask-rcnn/blob/a98bb06401c5c76f98ad77349218fe3f0fe17076/tensorpack/input_source/input_source.py#L227-L245
import threading from contextlib import contextmanager from itertools import chain import tensorflow as tf from six.moves import range, zip from ..callbacks.base import Callback, CallbackFactory from ..callbacks.graph import RunOp from ..dataflow import DataFlow, MapData, RepeatedData from ..tfutils.common import get_op_tensor_name from ..tfutils.dependency import dependency_of_fetches from ..tfutils.summary import add_moving_summary from ..tfutils.tower import get_current_tower_context from ..utils import logger from ..utils.concurrency import ShareSessionThread from .input_source_base import InputSource try: from tensorflow.python.ops.data_flow_ops import StagingArea except ImportError: pass __all__ = ['PlaceholderInput', 'FeedInput', 'FeedfreeInput', 'QueueInput', 'BatchQueueInput', 'DummyConstantInput', 'TensorInput', 'ZMQInput', 'TFDatasetInput', 'StagingInput'] def _get_reset_callback(df): return CallbackFactory(setup_graph=lambda _: df.reset_state()) def _make_feeds(placeholders, datapoint): assert len(datapoint) == len(placeholders), "Size of datapoint and placeholders are different: {} != {}".format( len(datapoint), len(placeholders)) if isinstance(datapoint, (list, tuple)): return dict(zip(placeholders, datapoint)) elif isinstance(datapoint, dict): ret = {p: datapoint[p.op.name] for p in placeholders} return ret else: raise TypeError("Got a datapoint of type {}!".format(type(datapoint))) class PlaceholderInput(InputSource): def __init__(self): pass def _setup(self, inputs): self._all_placehdrs = [v.build_placeholder_reuse() for v in inputs] def _get_input_tensors(self): return self._all_placehdrs class FeedInput(InputSource): class _FeedCallback(Callback): def __init__(self, ds, placeholders): self._ds = ds self._itr = self._ds.__iter__() self._placeholders = placeholders def _before_run(self, _): dp = next(self._itr) assert len(dp) == len(self._placeholders), "[FeedInput] datapoints and inputs are of different length!" feed = _make_feeds(self._placeholders, dp) return tf.train.SessionRunArgs(fetches=[], feed_dict=feed) def _reset(self): self._itr = self._ds.__iter__() def __init__(self, ds, infinite=True): if not isinstance(ds, DataFlow): raise ValueError("FeedInput takes a DataFlow! Got {}".format(ds)) self.ds = ds if infinite: self._iter_ds = RepeatedData(self.ds, -1) else: self._iter_ds = self.ds def _size(self): return len(self.ds) def _setup(self, inputs): self._all_placehdrs = [v.build_placeholder_reuse() for v in inputs] self._cb = self._FeedCallback(self._iter_ds, self._all_placehdrs) def _get_input_tensors(self): return self._all_placehdrs def _reset_state(self): self._cb._reset() def _get_callbacks(self): return [self._cb, _get_reset_callback(self._iter_ds)] class FeedfreeInput(InputSource): def _reset_state(self): pass class EnqueueThread(ShareSessionThread): def __init__(self, queue, ds, placehdrs): super(EnqueueThread, self).__init__() self.name = 'EnqueueThread ' + queue.name self.daemon = True self.dataflow = ds self.queue = queue self.placehdrs = placehdrs self.op = self.queue.enqueue(self.placehdrs) self.close_op = self.queue.close(cancel_pending_enqueues=True) self._running = threading.Event() self._running.set() def run(self): with self.default_sess(): try: self.reinitialize_dataflow() while True: if not self._running.is_set(): self._running.wait() dp = next(self._itr) feed = _make_feeds(self.placehdrs, dp) self.op.run(feed_dict=feed) except (tf.errors.CancelledError, tf.errors.OutOfRangeError): pass except Exception as e: if isinstance(e, RuntimeError) and 'closed Session' in str(e): pass else: logger.exception("Exception in {}:".format(self.name)) finally: try: self.close_op.run() except Exception: pass logger.info("{} Exited.".format(self.name)) def reinitialize_dataflow(self): self._itr = self.dataflow.__iter__() def pause(self): self._running.clear() def resume(self): self._running.set() class QueueInput(FeedfreeInput): def __init__(self, ds, queue=None): if not isinstance(ds, DataFlow): raise ValueError("QueueInput takes a DataFlow! Got {}".format(ds)) self.queue = queue self.ds = ds self._inf_ds = RepeatedData(ds, -1) self._started = False def _size(self): return len(self.ds) def _setup(self, inputs): self._input_placehdrs = [v.build_placeholder_reuse() for v in inputs] assert len(self._input_placehdrs) > 0, "QueueInput has to be used with some inputs!" with self.cached_name_scope(): if self.queue is None: self.queue = tf.FIFOQueue( 50, [x.dtype for x in self._input_placehdrs], name='input_queue') logger.info("Setting up the queue '{}' for CPU prefetching ...".format(self.queue.name)) self.thread = EnqueueThread(self.queue, self._inf_ds, self._input_placehdrs) self._dequeue_op = self.queue.dequeue(name='dequeue_for_reset')
Apache License 2.0
modoboa/modoboa-amavis
modoboa_amavis/management/commands/qcleanup.py
Command.add_arguments
python
def add_arguments(self, parser): parser.add_argument( "--debug", action="store_true", default=False, help="Activate debug output") parser.add_argument( "--verbose", action="store_true", default=False, help="Display informational messages")
Add extra arguments to command line.
https://github.com/modoboa/modoboa-amavis/blob/18e5a210ac2eb007ce28d70675f4188d93e1b822/modoboa_amavis/management/commands/qcleanup.py#L20-L27
from __future__ import print_function, unicode_literals import time from django.core.management.base import BaseCommand from django.db.models import Count from modoboa.parameters import tools as param_tools from ...models import Maddr, Msgrcpt, Msgs from ...modo_extension import Amavis class Command(BaseCommand): args = "" help = "Amavis quarantine cleanup"
MIT License
ooici/pyon
pyon/ion/resregistry.py
ResourceRegistry.read_attachment
python
def read_attachment(self, attachment_id='', include_content=False): attachment = self.read(attachment_id) if not isinstance(attachment, Attachment): raise Inconsistent("Object in datastore must be Attachment, not %s" % type(attachment)) if include_content: attachment.content = self.rr_store.read_attachment(attachment_id, attachment_name=self.DEFAULT_ATTACHMENT_NAME) if attachment.attachment_type == AttachmentType.BLOB: if type(attachment.content) is not str: raise BadRequest("Attachment content must be str") return attachment
Returns the metadata of an attachment. Unless indicated otherwise the content returned is only a name to the actual attachment content.
https://github.com/ooici/pyon/blob/122c629290d27f32f2f41dafd5c12469295e8acf/pyon/ion/resregistry.py#L395-L411
__author__ = 'Michael Meisinger' from pyon.core import bootstrap from pyon.core.bootstrap import IonObject, CFG from pyon.core.exception import BadRequest, NotFound, Inconsistent from pyon.core.object import IonObjectBase from pyon.core.registry import getextends from pyon.datastore.datastore import DataStore from pyon.datastore.datastore_query import DatastoreQueryBuilder, DQ from pyon.ion.event import EventPublisher from pyon.ion.identifier import create_unique_resource_id, create_unique_association_id from pyon.ion.resource import LCS, LCE, PRED, RT, AS, OT, get_restype_lcsm, is_resource, ExtendedResourceContainer, lcstate, lcsplit, Predicates, create_access_args from pyon.ion.process import get_ion_actor_id from pyon.util.containers import get_ion_ts from pyon.util.log import log from interface.objects import Attachment, AttachmentType, ResourceModificationType class ResourceRegistry(object): DEFAULT_ATTACHMENT_NAME = 'resource.attachment' def __init__(self, datastore_manager=None, container=None): self.container = container or bootstrap.container_instance datastore_manager = datastore_manager or self.container.datastore_manager self.rr_store = datastore_manager.get_datastore(DataStore.DS_RESOURCES, DataStore.DS_PROFILE.RESOURCES) self.name = 'container_resource_registry' self.id = 'container_resource_registry' self.event_pub = EventPublisher() self.superuser_actors = None def start(self): pass def stop(self): self.close() def close(self): self.rr_store.close() def create(self, object=None, actor_id=None, object_id=None, attachments=None): if object is None: raise BadRequest("Object not present") if not isinstance(object, IonObjectBase): raise BadRequest("Object is not an IonObject") if not is_resource(object): raise BadRequest("Object is not a Resource") if "_id" in object: raise BadRequest("Object must not contain _id") if "_rev" in object: raise BadRequest("Object must not contain _rev") lcsm = get_restype_lcsm(object.type_) object.lcstate = lcsm.initial_state if lcsm else LCS.DEPLOYED object.availability = lcsm.initial_availability if lcsm else AS.AVAILABLE cur_time = get_ion_ts() object.ts_created = cur_time object.ts_updated = cur_time if object_id is None: new_res_id = create_unique_resource_id() else: new_res_id = object_id res = self.rr_store.create(object, new_res_id, attachments=attachments) res_id, rev = res if actor_id and actor_id != 'anonymous': log.debug("Associate resource_id=%s with owner=%s", res_id, actor_id) self.create_association(res_id, PRED.hasOwner, actor_id) if self.container.has_capability(self.container.CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ResourceModifiedEvent", origin=res_id, origin_type=object.type_, sub_type="CREATE", mod_type=ResourceModificationType.CREATE) return res def create_mult(self, res_list, actor_id=None): cur_time = get_ion_ts() id_list = [] for resobj in res_list: lcsm = get_restype_lcsm(resobj.type_) resobj.lcstate = lcsm.initial_state if lcsm else LCS.DEPLOYED resobj.availability = lcsm.initial_availability if lcsm else AS.AVAILABLE resobj.ts_created = cur_time resobj.ts_updated = cur_time id_list.append(resobj._id if "_id" in resobj else create_unique_resource_id()) res = self.rr_store.create_mult(res_list, id_list, allow_ids=True) rid_list = [(rid, rrv) for success, rid, rrv in res] if actor_id and actor_id != 'anonymous': assoc_list = [] for resobj, (rid, rrv) in zip(res_list, rid_list): resobj._id = rid assoc_list.append((resobj, PRED.hasOwner, actor_id)) self.create_association_mult(assoc_list) for resobj, (rid, rrv) in zip(res_list, rid_list): self.event_pub.publish_event(event_type="ResourceModifiedEvent", origin=rid, origin_type=resobj.type_, mod_type=ResourceModificationType.CREATE) return rid_list def read(self, object_id='', rev_id=''): if not object_id: raise BadRequest("The object_id parameter is an empty string") return self.rr_store.read(object_id, rev_id) def read_mult(self, object_ids=None, strict=True): if object_ids is None: raise BadRequest("The object_ids parameter is empty") return self.rr_store.read_mult(object_ids, strict=strict) def update(self, object): if object is None: raise BadRequest("Object not present") if not hasattr(object, "_id") or not hasattr(object, "_rev"): raise BadRequest("Object does not have required '_id' or '_rev' attribute") res_obj = self.read(object._id) object.ts_updated = get_ion_ts() if res_obj.lcstate != object.lcstate or res_obj.availability != object.availability: log.warn("Cannot modify %s life cycle state or availability in update current=%s/%s given=%s/%s. " + "DO NOT REUSE THE SAME OBJECT IN CREATE THEN UPDATE", type(res_obj).__name__, res_obj.lcstate, res_obj.availability, object.lcstate, object.availability) object.lcstate = res_obj.lcstate object.availability = res_obj.availability self.event_pub.publish_event(event_type="ResourceModifiedEvent", origin=object._id, origin_type=object.type_, sub_type="UPDATE", mod_type=ResourceModificationType.UPDATE) return self.rr_store.update(object) def delete(self, object_id='', del_associations=False): res_obj = self.read(object_id) if not res_obj: raise NotFound("Resource %s does not exist" % object_id) if not del_associations: self._delete_owners(object_id) if del_associations: assoc_ids = self.find_associations(anyside=object_id, id_only=True) self.rr_store.delete_doc_mult(assoc_ids, object_type="Association") elif self._is_in_association(object_id): log.warn("Deleting object %s that still has associations" % object_id) res = self.rr_store.delete(object_id) if self.container.has_capability(self.container.CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ResourceModifiedEvent", origin=res_obj._id, origin_type=res_obj.type_, sub_type="DELETE", mod_type=ResourceModificationType.DELETE) return res def _delete_owners(self, resource_id): owners, assocs = self.rr_store.find_objects(resource_id, PRED.hasOwner, RT.ActorIdentity, id_only=True) for aid in assocs: self.delete_association(aid) def retire(self, resource_id): return self.execute_lifecycle_transition(resource_id, LCE.RETIRE) def lcs_delete(self, resource_id): res_obj = self.read(resource_id) old_state = res_obj.lcstate if old_state == LCS.DELETED: raise BadRequest("Resource id=%s already DELETED" % (resource_id)) res_obj.lcstate = LCS.DELETED res_obj.ts_updated = get_ion_ts() updres = self.rr_store.update(res_obj) log.debug("retire(res_id=%s). Change %s_%s to %s_%s", resource_id, old_state, res_obj.availability, res_obj.lcstate, res_obj.availability) assocs = self.find_associations(anyside=resource_id, id_only=False) for assoc in assocs: assoc.retired = True if assocs: self.rr_store.update_mult(assocs) log.debug("lcs_delete(res_id=%s). Retired %s associations", resource_id, len(assocs)) if self.container.has_capability(self.container.CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ResourceLifecycleEvent", origin=res_obj._id, origin_type=res_obj.type_, sub_type="%s.%s" % (res_obj.lcstate, res_obj.availability), lcstate=res_obj.lcstate, availability=res_obj.availability, lcstate_before=old_state, availability_before=res_obj.availability) def execute_lifecycle_transition(self, resource_id='', transition_event=''): if transition_event == LCE.DELETE: return self.lcs_delete(resource_id) res_obj = self.read(resource_id) old_lcstate = res_obj.lcstate old_availability = res_obj.availability if transition_event == LCE.RETIRE: if res_obj.lcstate == LCS.RETIRED or res_obj.lcstate == LCS.DELETED: raise BadRequest("Resource id=%s, type=%s, lcstate=%s, availability=%s has no transition for event %s" % ( resource_id, res_obj.type_, old_lcstate, old_availability, transition_event)) res_obj.lcstate = LCS.RETIRED else: restype = res_obj.type_ restype_workflow = get_restype_lcsm(restype) if not restype_workflow: raise BadRequest("Resource id=%s type=%s has no lifecycle" % (resource_id, restype)) new_lcstate = restype_workflow.get_lcstate_successor(old_lcstate, transition_event) new_availability = restype_workflow.get_availability_successor(old_availability, transition_event) if not new_lcstate and not new_availability: raise BadRequest("Resource id=%s, type=%s, lcstate=%s, availability=%s has no transition for event %s" % ( resource_id, restype, old_lcstate, old_availability, transition_event)) if new_lcstate: res_obj.lcstate = new_lcstate if new_availability: res_obj.availability = new_availability res_obj.ts_updated = get_ion_ts() self.rr_store.update(res_obj) log.debug("execute_lifecycle_transition(res_id=%s, event=%s). Change %s_%s to %s_%s", resource_id, transition_event, old_lcstate, old_availability, res_obj.lcstate, res_obj.availability) if self.container.has_capability(self.container.CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ResourceLifecycleEvent", origin=res_obj._id, origin_type=res_obj.type_, sub_type="%s.%s" % (res_obj.lcstate, res_obj.availability), lcstate=res_obj.lcstate, availability=res_obj.availability, lcstate_before=old_lcstate, availability_before=old_availability, transition_event=transition_event) return "%s_%s" % (res_obj.lcstate, res_obj.availability) def set_lifecycle_state(self, resource_id='', target_lcstate=''): if not target_lcstate: raise BadRequest("Bad life-cycle state %s" % target_lcstate) if target_lcstate.startswith(LCS.DELETED): self.lcs_delete(resource_id) if target_lcstate.startswith(LCS.RETIRED): self.execute_lifecycle_transition(resource_id, LCE.RETIRE) res_obj = self.read(resource_id) old_lcstate = res_obj.lcstate old_availability = res_obj.availability restype = res_obj.type_ restype_workflow = get_restype_lcsm(restype) if not restype_workflow: raise BadRequest("Resource id=%s type=%s has no lifecycle" % (resource_id, restype)) if '_' in target_lcstate: target_lcs, target_av = lcsplit(target_lcstate) if target_lcs not in LCS: raise BadRequest("Unknown life-cycle state %s" % target_lcs) if target_av and target_av not in AS: raise BadRequest("Unknown life-cycle availability %s" % target_av) elif target_lcstate in LCS: target_lcs, target_av = target_lcstate, res_obj.availability elif target_lcstate in AS: target_lcs, target_av = res_obj.lcstate, target_lcstate else: raise BadRequest("Unknown life-cycle state %s" % target_lcstate) lcs_successors = restype_workflow.get_lcstate_successors(old_lcstate) av_successors = restype_workflow.get_availability_successors(old_availability) found_lcs, found_av = target_lcs in lcs_successors.values(), target_av in av_successors.values() if not found_lcs and not found_av: raise BadRequest("Target state %s not reachable for resource in state %s_%s" % ( target_lcstate, old_lcstate, old_availability)) res_obj.lcstate = target_lcs res_obj.availability = target_av res_obj.ts_updated = get_ion_ts() updres = self.rr_store.update(res_obj) log.debug("set_lifecycle_state(res_id=%s, target=%s). Change %s_%s to %s_%s", resource_id, target_lcstate, old_lcstate, old_availability, res_obj.lcstate, res_obj.availability) if self.container.has_capability(self.container.CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ResourceLifecycleEvent", origin=res_obj._id, origin_type=res_obj.type_, sub_type="%s.%s" % (res_obj.lcstate, res_obj.availability), lcstate=res_obj.lcstate, availability=res_obj.availability, lcstate_before=old_lcstate, availability_before=old_availability) def create_attachment(self, resource_id='', attachment=None, actor_id=None): if attachment is None: raise BadRequest("Object not present") if not isinstance(attachment, Attachment): raise BadRequest("Object is not an Attachment") attachment.object_id = resource_id if resource_id else "" attachment.attachment_size = -1 attachment_content = None if attachment.attachment_type == AttachmentType.BLOB: if type(attachment.content) is not str: raise BadRequest("Attachment content must be str") attachment.attachment_size = len(attachment.content) attachment_content = attachment.content elif attachment.attachment_type == AttachmentType.ASCII: if type(attachment.content) is not str: raise BadRequest("Attachment content must be str") attachment.attachment_size = len(attachment.content) attachment_content = attachment.content elif attachment.attachment_type == AttachmentType.OBJECT: raise BadRequest("AttachmentType.OBJECT is not supported currently") elif attachment.attachment_type == AttachmentType.REFERENCE: if not isinstance(attachment.content, basestring): raise BadRequest("Attachment content must be binary string") attachment.attachment_size = len(attachment.content) attachment_content = attachment.content else: raise BadRequest("Unknown attachment-type: %s" % attachment.attachment_type) attachment.content = '' content = dict(data=attachment_content, content_type=attachment.content_type) att_id, _ = self.create(attachment, attachments={self.DEFAULT_ATTACHMENT_NAME: content}, actor_id=actor_id) if resource_id: self.create_association(resource_id, PRED.hasAttachment, att_id) return att_id
BSD 2-Clause Simplified License
python-acoustics/python-acoustics
acoustics/room.py
mean_alpha
python
def mean_alpha(alphas, surfaces): return np.average(alphas, axis=0, weights=surfaces)
Calculate mean of absorption coefficients. :param alphas: Absorption coefficients :math:`\\alpha`. :param surfaces: Surfaces :math:`S`.
https://github.com/python-acoustics/python-acoustics/blob/fbc87454422c41e1a39e282d7680126a6d8014dd/acoustics/room.py#L19-L26
import numpy as np from scipy.io import wavfile from scipy import stats from acoustics.utils import _is_1d from acoustics.signal import bandpass from acoustics.bands import (_check_band_type, octave_low, octave_high, third_low, third_high) SOUNDSPEED = 343.0
BSD 3-Clause New or Revised License
largelymfs/topical_word_embeddings
TWE-1/gensim/corpora/svmlightcorpus.py
SvmLightCorpus.doc2line
python
def doc2line(doc, label=0): pairs = ' '.join("%i:%s" % (termid + 1, termval) for termid, termval in doc) return "%s %s\n" % (label, pairs)
Output the document in SVMlight format, as a string. Inverse function to `line2doc`.
https://github.com/largelymfs/topical_word_embeddings/blob/1ae3d15d0afcd3fcd39cc81eec4ad9463413a9f6/TWE-1/gensim/corpora/svmlightcorpus.py#L126-L131
from __future__ import with_statement import logging from gensim import utils from gensim.corpora import IndexedCorpus logger = logging.getLogger('gensim.corpora.svmlightcorpus') class SvmLightCorpus(IndexedCorpus): def __init__(self, fname, store_labels=True): IndexedCorpus.__init__(self, fname) logger.info("loading corpus from %s" % fname) self.fname = fname self.length = None self.store_labels = store_labels self.labels = [] def __iter__(self): lineno = -1 self.labels = [] with utils.smart_open(self.fname) as fin: for lineno, line in enumerate(fin): doc = self.line2doc(line) if doc is not None: if self.store_labels: self.labels.append(doc[1]) yield doc[0] self.length = lineno + 1 @staticmethod def save_corpus(fname, corpus, id2word=None, labels=False, metadata=False): logger.info("converting corpus to SVMlight format: %s" % fname) offsets = [] with utils.smart_open(fname, 'wb') as fout: for docno, doc in enumerate(corpus): label = labels[docno] if labels else 0 offsets.append(fout.tell()) fout.write(utils.to_utf8(SvmLightCorpus.doc2line(doc, label))) return offsets def docbyoffset(self, offset): with utils.smart_open(self.fname) as f: f.seek(offset) return self.line2doc(f.readline())[0] def line2doc(self, line): line = utils.to_unicode(line) line = line[: line.find('#')].strip() if not line: return None parts = line.split() if not parts: raise ValueError('invalid line format in %s' % self.fname) target, fields = parts[0], [part.rsplit(':', 1) for part in parts[1:]] doc = [(int(p1) - 1, float(p2)) for p1, p2 in fields if p1 != 'qid'] return doc, target @staticmethod
MIT License
opentoallctf/ota-challenge-bot
util/slack_wrapper.py
SlackWrapper.invite_user
python
def invite_user(self, users, channel, is_private=False): users = [users] if not type(users) == list else users api_call = "conversations.invite" return self.client.api_call(api_call, channel=channel, users=users)
Invite the given user(s) to the given channel.
https://github.com/opentoallctf/ota-challenge-bot/blob/6deea8c059d28ddb86dce277158a39a5ad9517e4/util/slack_wrapper.py#L35-L42
import json import time from slackclient import SlackClient from util.util import load_json class SlackWrapper: def __init__(self, api_key): self.api_key = api_key self.client = SlackClient(self.api_key) self.connected = self.client.rtm_connect(auto_reconnect=True) self.server = None self.username = None self.user_id = None if self.connected: self.server = self.client.server self.username = self.server.username self.user_id = self.server.login_data.get("self").get("id") def read(self): return self.client.rtm_read()
MIT License
pawamoy/pytkdocs
src/pytkdocs/serializer.py
serialize_annotated_object
python
def serialize_annotated_object(obj: AnnotatedObject) -> dict: return {"description": obj.description, "annotation": annotation_to_string(obj.annotation)}
Serialize an instance of [`AnnotatedObject`][pytkdocs.parsers.docstrings.base.AnnotatedObject]. Arguments: obj: The object to serialize. Returns: A JSON-serializable dictionary.
https://github.com/pawamoy/pytkdocs/blob/a298616c2db23833b4877c67725cbbfde5a43c78/src/pytkdocs/serializer.py#L75-L85
import inspect import re from typing import Any, Match, Optional, Pattern from pytkdocs.objects import Object, Source from pytkdocs.parsers.docstrings.base import AnnotatedObject, Attribute, Parameter, Section try: from typing import GenericMeta except ImportError: class GenericMeta(type): RE_OPTIONAL: Pattern = re.compile(r"Union\[(.+), NoneType\]") RE_FORWARD_REF: Pattern = re.compile(r"_?ForwardRef\('([^']+)'\)") def rebuild_optional(match: Match) -> str: group = match.group(1) brackets_level = 0 for char in group: if char == "," and brackets_level == 0: return f"Union[{group}]" if char == "[": brackets_level += 1 elif char == "]": brackets_level -= 1 return f"Optional[{group}]" def annotation_to_string(annotation: Any) -> str: if annotation is inspect.Signature.empty: return "" if inspect.isclass(annotation) and not isinstance(annotation, GenericMeta): string = annotation.__name__ else: string = str(annotation).replace("typing.", "") string = RE_FORWARD_REF.sub(lambda match: match.group(1), string) string = RE_OPTIONAL.sub(rebuild_optional, string) return string
ISC License
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_10/api/volume_snapshots_api.py
VolumeSnapshotsApi.api210_volume_snapshots_tags_get_with_http_info
python
def api210_volume_snapshots_tags_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, filter=None, limit=None, namespaces=None, offset=None, resource_destroyed=None, resource_ids=None, resource_names=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if namespaces is not None: if not isinstance(namespaces, list): namespaces = [namespaces] if resource_ids is not None: if not isinstance(resource_ids, list): resource_ids = [resource_ids] if resource_names is not None: if not isinstance(resource_names, list): resource_names = [resource_names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api210_volume_snapshots_tags_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api210_volume_snapshots_tags_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'namespaces' in params: query_params.append(('namespaces', params['namespaces'])) collection_formats['namespaces'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'resource_destroyed' in params: query_params.append(('resource_destroyed', params['resource_destroyed'])) if 'resource_ids' in params: query_params.append(('resource_ids', params['resource_ids'])) collection_formats['resource_ids'] = 'csv' if 'resource_names' in params: query_params.append(('resource_names', params['resource_names'])) collection_formats['resource_names'] = 'csv' if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/volume-snapshots/tags', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TagGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
List tags Displays the list of tags. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api210_volume_snapshots_tags_get_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters. :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria. :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size. :param list[str] namespaces: A comma-separated list of namespaces. :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned. :param bool resource_destroyed: If set to `true`, returns only objects from destroyed resources. Returns an error if the name of a live resource is specified in the `resource_names` query parameter. If set to `false`, returns only objects from live resources. Returns an error if the name of a destroyed resource is specified in the `resource_names` query parameter. :param list[str] resource_ids: A comma-separated list of resource IDs. The `resource_ids` and `resource_names` parameters cannot be provided together. :param list[str] resource_names: A comma-separated list of resource names. The `resource_ids` and `resource_names` parameters cannot be provided together. :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values. :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: TagGetResponse If the method is called asynchronously, returns the request thread.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_10/api/volume_snapshots_api.py#L713-L851
from __future__ import absolute_import import re import six from typing import List, Optional from .. import models class VolumeSnapshotsApi(object): def __init__(self, api_client): self.api_client = api_client def api210_volume_snapshots_delete_with_http_info( self, authorization=None, x_request_id=None, ids=None, names=None, replication_snapshot=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if ids is not None: if not isinstance(ids, list): ids = [ids] if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'ids' in params: query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'replication_snapshot' in params: query_params.append(('replication_snapshot', params['replication_snapshot'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/volume-snapshots', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_volume_snapshots_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, destroyed=None, filter=None, ids=None, limit=None, names=None, offset=None, sort=None, source_ids=None, source_names=None, total_item_count=None, total_only=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if ids is not None: if not isinstance(ids, list): ids = [ids] if names is not None: if not isinstance(names, list): names = [names] if sort is not None: if not isinstance(sort, list): sort = [sort] if source_ids is not None: if not isinstance(source_ids, list): source_ids = [source_ids] if source_names is not None: if not isinstance(source_names, list): source_names = [source_names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api210_volume_snapshots_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api210_volume_snapshots_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'destroyed' in params: query_params.append(('destroyed', params['destroyed'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'ids' in params: query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if 'limit' in params: query_params.append(('limit', params['limit'])) if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'source_ids' in params: query_params.append(('source_ids', params['source_ids'])) collection_formats['source_ids'] = 'csv' if 'source_names' in params: query_params.append(('source_names', params['source_names'])) collection_formats['source_names'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) if 'total_only' in params: query_params.append(('total_only', params['total_only'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/volume-snapshots', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='VolumeSnapshotGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_volume_snapshots_patch_with_http_info( self, volume_snapshot=None, authorization=None, x_request_id=None, ids=None, names=None, replication_snapshot=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if ids is not None: if not isinstance(ids, list): ids = [ids] if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if volume_snapshot is None: raise TypeError("Missing the required parameter `volume_snapshot` when calling `api210_volume_snapshots_patch`") collection_formats = {} path_params = {} query_params = [] if 'ids' in params: query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'replication_snapshot' in params: query_params.append(('replication_snapshot', params['replication_snapshot'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if 'volume_snapshot' in params: body_params = params['volume_snapshot'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/volume-snapshots', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='VolumeSnapshotResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_volume_snapshots_post_with_http_info( self, volume_snapshot=None, authorization=None, x_request_id=None, on=None, source_ids=None, source_names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if source_ids is not None: if not isinstance(source_ids, list): source_ids = [source_ids] if source_names is not None: if not isinstance(source_names, list): source_names = [source_names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if volume_snapshot is None: raise TypeError("Missing the required parameter `volume_snapshot` when calling `api210_volume_snapshots_post`") collection_formats = {} path_params = {} query_params = [] if 'on' in params: query_params.append(('on', params['on'])) if 'source_ids' in params: query_params.append(('source_ids', params['source_ids'])) collection_formats['source_ids'] = 'csv' if 'source_names' in params: query_params.append(('source_names', params['source_names'])) collection_formats['source_names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if 'volume_snapshot' in params: body_params = params['volume_snapshot'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/volume-snapshots', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='VolumeSnapshotResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_volume_snapshots_tags_batch_put_with_http_info( self, tag=None, authorization=None, x_request_id=None, resource_ids=None, resource_names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if tag is not None: if not isinstance(tag, list): tag = [tag] if resource_ids is not None: if not isinstance(resource_ids, list): resource_ids = [resource_ids] if resource_names is not None: if not isinstance(resource_names, list): resource_names = [resource_names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if tag is None: raise TypeError("Missing the required parameter `tag` when calling `api210_volume_snapshots_tags_batch_put`") collection_formats = {} path_params = {} query_params = [] if 'resource_ids' in params: query_params.append(('resource_ids', params['resource_ids'])) collection_formats['resource_ids'] = 'csv' if 'resource_names' in params: query_params.append(('resource_names', params['resource_names'])) collection_formats['resource_names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if 'tag' in params: body_params = params['tag'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/volume-snapshots/tags/batch', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TagResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_volume_snapshots_tags_delete_with_http_info( self, authorization=None, x_request_id=None, keys=None, namespaces=None, resource_ids=None, resource_names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if keys is not None: if not isinstance(keys, list): keys = [keys] if namespaces is not None: if not isinstance(namespaces, list): namespaces = [namespaces] if resource_ids is not None: if not isinstance(resource_ids, list): resource_ids = [resource_ids] if resource_names is not None: if not isinstance(resource_names, list): resource_names = [resource_names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'keys' in params: query_params.append(('keys', params['keys'])) collection_formats['keys'] = 'csv' if 'namespaces' in params: query_params.append(('namespaces', params['namespaces'])) collection_formats['namespaces'] = 'csv' if 'resource_ids' in params: query_params.append(('resource_ids', params['resource_ids'])) collection_formats['resource_ids'] = 'csv' if 'resource_names' in params: query_params.append(('resource_names', params['resource_names'])) collection_formats['resource_names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/volume-snapshots/tags', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
BSD 2-Clause Simplified License
stonybrooknlp/haqae
EncDec.py
Attention.__init__
python
def __init__(self, dim, use_cuda=True): super(Attention, self).__init__() if isinstance(dim, tuple): self.query_dim, self.memory_dim, self.output_dim = dim else: self.query_dim = self.memory_dim = self.output_dim = dim self.linear_in = nn.Linear(self.query_dim, self.memory_dim, bias=False) self.linear_out = nn.Linear(self.query_dim + self.memory_dim, self.output_dim, bias=False) self.use_cuda = use_cuda
Args dim (int or Tuple) : dimension of the query vector and memory* vectors if int if tuple it should be a tuple (query dim, memory dim, output_dim) *Note I do not condone the usage of this term, it just seems to be unfortunately common, so eh, I'll use it.
https://github.com/stonybrooknlp/haqae/blob/b90e1d922f8472edc22e47e32326f4fd694ce4f5/EncDec.py#L159-L174
import torch import torch.nn as nn import numpy as np import math from torch.autograd import Variable import torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence class EncDecBase(nn.Module): def __init__(self, emb_size, hidden_size, embeddings=None, cell_type="GRU", layers=1, bidir=True, use_cuda=True): super(EncDecBase, self).__init__() self.emb_size = emb_size self.hidden_size = hidden_size self.embeddings = embeddings self.layers = layers self.bidir = bidir self.cell_type = cell_type self.use_cuda = use_cuda if cell_type == "LSTM": self.rnn = nn.LSTM(self.emb_size, self.hidden_size, self.layers, bidirectional=self.bidir, batch_first=True) else: self.rnn = nn.GRU(self.emb_size, self.hidden_size, self.layers, bidirectional=self.bidir, batch_first=True) def forward(input, hidden): raise NotImplementedError def initHidden(self, batch_size): dirs = 2 if self.bidir else 1 if self.cell_type == "LSTM": hidden = (Variable(torch.zeros(batch_size, self.layers*dirs, self.hidden_size)), Variable(torch.zeros(self.layers*dirs, batch_size, self.hidden_size))) else: hidden = Variable(torch.zeros(self.layers*dirs, batch_size, self.hidden_size)) if self.use_cuda: return hidden.cuda() else: return hidden class Encoder(EncDecBase): def forward(self, input, hidden, seq_lens, use_packed=True): out = self.embeddings(input).view(input.shape[0], input.shape[1], -1) if use_packed: packed_input = pack_padded_sequence(out, seq_lens.cpu().numpy(), batch_first=True) self.rnn.flatten_parameters() packed_out, hidden = self.rnn(packed_input, hidden) enc_out, _ = pad_packed_sequence(packed_out, batch_first=True) else: enc_out, hidden = self.rnn(out, hidden) return enc_out, hidden class Decoder(EncDecBase): def __init__(self, emb_size, hidden_size, embeddings=None, cell_type="GRU", layers=1, attn_dim=-1, use_cuda=True, dropout=0.0): if attn_dim is None: attn_mem_dim = 2*hidden_size attndim = hidden_size else: attn_mem_dim, attndim = attn_dim bidir = False super(Decoder, self).__init__(emb_size + attndim, hidden_size, embeddings, cell_type, layers, bidir, use_cuda) self.attn_dim = attndim self.input_feed = None self.attention = Attention((hidden_size, attn_mem_dim, self.attn_dim), use_cuda=self.use_cuda) if dropout > 0: print("Using a Dropout Value of {} in the decoder".format(dropout)) self.drop = nn.Dropout(dropout) else: self.drop = None def reset_feed_(self): del self.input_feed self.input_feed = None def init_feed_(self, feed): if self.input_feed is None: self.input_feed = feed def forward(self, input, hidden, memory): if self.drop is None: out = self.embeddings(input).view(input.shape[0], -1) else: out = self.drop(self.embeddings(input).view(input.shape[0], -1)) dec_input = torch.cat([out, self.input_feed], dim=1).unsqueeze(dim=1) self.rnn.flatten_parameters() rnn_output, hidden = self.rnn(dec_input, hidden) rnn_output=torch.squeeze(rnn_output, dim=1) dec_output, scores = self.attention(rnn_output, memory) if self.drop is not None: dec_output = self.drop(dec_output) self.input_feed = dec_output return dec_output, hidden class Attention(nn.Module):
Apache License 2.0
rlbot/rlbot
src/main/python/rlbot/agents/base_agent.py
BaseAgent.set_game_state
python
def set_game_state(self, game_state: GameState): self.__game_state_func(game_state)
CHEAT: Change the rocket league game to the given game_state
https://github.com/rlbot/rlbot/blob/b2d06110b0c8541a85605b6c00dcb898e7f35946/src/main/python/rlbot/agents/base_agent.py#L152-L154
from typing import Optional from urllib.parse import ParseResult as URL from flatbuffers import Builder from rlbot.agents.rlbot_runnable import RLBotRunnable, LOCATIONS_HEADER, DETAILS_HEADER from rlbot.botmanager.helper_process_request import HelperProcessRequest from rlbot.matchcomms.client import MatchcommsClient from rlbot.messages.flat import MatchSettings, ControllerState, PlayerInput from rlbot.parsing.custom_config import ConfigObject from rlbot.utils.game_state_util import GameState from rlbot.utils.logging_utils import get_logger from rlbot.utils.rendering.rendering_manager import RenderingManager from rlbot.utils.structures.ball_prediction_struct import BallPrediction from rlbot.utils.structures.game_data_struct import GameTickPacket, FieldInfoPacket from rlbot.utils.structures.legacy_data_v3 import convert_to_legacy_v3 from rlbot.utils.structures.quick_chats import QuickChats from rlbot.utils.structures.rigid_body_struct import RigidBodyTick BOT_CONFIG_MODULE_HEADER = LOCATIONS_HEADER BOT_CONFIG_AGENT_HEADER = 'Bot Parameters' BOT_CONFIG_DETAILS_HEADER = DETAILS_HEADER PYTHON_FILE_KEY = 'python_file' SUPPORTS_STANDALONE = 'supports_standalone' LOADOUT_GENERATOR_FILE_KEY = 'loadout_generator' LOGO_FILE_KEY = 'logo_file' LOOKS_CONFIG_KEY = 'looks_config' BOT_NAME_KEY = "name" SUPPORTS_EARLY_START_KEY = "supports_early_start" MAXIMUM_TICK_RATE_PREFERENCE_KEY = "maximum_tick_rate_preference" class SimpleControllerState: def __init__(self, steer: float = 0.0, throttle: float = 0.0, pitch: float = 0.0, yaw: float = 0.0, roll: float = 0.0, jump: bool = False, boost: bool = False, handbrake: bool = False, use_item: bool = False): self.steer = steer self.throttle = throttle self.pitch = pitch self.yaw = yaw self.roll = roll self.jump = jump self.boost = boost self.handbrake = handbrake self.use_item = use_item def to_flatbuffer(self, player_index: int) -> Builder: builder = Builder(100) ControllerState.ControllerStateStart(builder) ControllerState.ControllerStateAddSteer(builder, self.steer) ControllerState.ControllerStateAddThrottle(builder, self.throttle) ControllerState.ControllerStateAddPitch(builder, self.pitch) ControllerState.ControllerStateAddYaw(builder, self.yaw) ControllerState.ControllerStateAddRoll(builder, self.roll) ControllerState.ControllerStateAddJump(builder, self.jump) ControllerState.ControllerStateAddBoost(builder, self.boost) ControllerState.ControllerStateAddHandbrake(builder, self.handbrake) cs_offset = ControllerState.ControllerStateEnd(builder) PlayerInput.PlayerInputStart(builder) PlayerInput.PlayerInputAddPlayerIndex(builder, player_index) PlayerInput.PlayerInputAddControllerState(builder, cs_offset) pi_offset = PlayerInput.PlayerInputEnd(builder) builder.Finish(pi_offset) return builder class BaseAgent(RLBotRunnable): team = None index = None __quick_chat_func = None __field_info_func = None __game_state_func = None __get_rigid_body_tick_func = None __match_settings_func = None renderer: RenderingManager = None matchcomms_root: URL = None def __init__(self, name, team, index): super().__init__(name) self.team = team self.index = index self.logger = get_logger(f'bot{index}') self.spawn_id = -1 def get_output(self, game_tick_packet: GameTickPacket) -> SimpleControllerState: return SimpleControllerState() def send_quick_chat(self, team_only, quick_chat): if quick_chat == QuickChats.CHAT_NONE or quick_chat is None: return self.__quick_chat_func(team_only, quick_chat) def handle_quick_chat(self, index, team, quick_chat): pass def get_field_info(self): return self.__field_info_func() def get_rigid_body_tick(self) -> RigidBodyTick: return self.__get_rigid_body_tick_func()
MIT License
duguyue100/pyaer
pyaer/dvs128.py
DVS128.get_bias
python
def get_bias(self): bias_obj = {} bias_obj["cas"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_CAS) bias_obj["injGnd"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_INJGND) bias_obj["reqPd"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_REQPD) bias_obj["puX"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_PUX) bias_obj["diffOff"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_DIFFOFF) bias_obj["req"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_REQ) bias_obj["refr"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_REFR) bias_obj["puY"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_PUY) bias_obj["diffOn"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_DIFFON) bias_obj["diff"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_DIFF) bias_obj["foll"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_FOLL) bias_obj["Pr"] = self.get_config( libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_PR) if self.noise_filter is not None: bias_obj["noise_filter_configs"] = self.noise_filter.get_bias() return bias_obj
Get bias settings. # Returns bias_obj: `dict`<br/> dictionary that contains DVS128 current bias settings.
https://github.com/duguyue100/pyaer/blob/5ca6980072780042072c197734ffbce7a96682ea/pyaer/dvs128.py#L226-L275
from __future__ import print_function, absolute_import from builtins import range import numpy as np from pyaer import libcaer from pyaer.device import USBDevice from pyaer.filters import DVSNoise from pyaer import utils class DVS128(USBDevice): def __init__(self, device_id=1, bus_number_restrict=0, dev_address_restrict=0, serial_number="", noise_filter=False): super(DVS128, self).__init__() self.open(device_id, bus_number_restrict, dev_address_restrict, serial_number) self.obtain_device_info(self.handle) self.filter_noise = noise_filter if noise_filter is True: self.noise_filter = DVSNoise(self.dvs_size_X, self.dvs_size_Y) else: self.noise_filter = None def set_noise_filter(self, noise_filter): if noise_filter is not None: self.noise_filter = noise_filter def enable_noise_filter(self): if self.filter_noise is False: self.filter_noise = True if self.noise_filter is None: self.noise_filter = DVSNoise(self.dvs_size_X, self.dvs_size_Y) def disable_noise_filter(self): if self.filter_noise is True: self.filter_noise = False def obtain_device_info(self, handle): if handle is not None: info = libcaer.caerDVS128InfoGet(handle) self.device_id = info.deviceID self.device_serial_number = info.deviceSerialNumber self.device_usb_bus_number = info.deviceUSBBusNumber self.device_usb_device_address = info.deviceUSBDeviceAddress self.device_string = info.deviceString self.firmware_version = info.firmwareVersion self.device_is_master = info.deviceIsMaster self.dvs_size_X = info.dvsSizeX self.dvs_size_Y = info.dvsSizeY def open(self, device_id=1, bus_number_restrict=0, dev_address_restrict=0, serial_number=""): super(DVS128, self).open( libcaer.CAER_DEVICE_DVS128, device_id, bus_number_restrict, dev_address_restrict, serial_number) def set_bias_from_json(self, file_path, verbose=False): bias_obj = utils.load_dvs_bias(file_path, verbose) self.set_bias(bias_obj) def set_bias(self, bias_obj): self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_CAS, bias_obj["cas"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_INJGND, bias_obj["injGnd"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_REQPD, bias_obj["reqPd"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_PUX, bias_obj["puX"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_DIFFOFF, bias_obj["diffOff"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_REQ, bias_obj["req"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_REFR, bias_obj["refr"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_PUY, bias_obj["puY"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_DIFFON, bias_obj["diffOn"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_DIFF, bias_obj["diff"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_FOLL, bias_obj["foll"]) self.set_config(libcaer.DVS128_CONFIG_BIAS, libcaer.DVS128_CONFIG_BIAS_PR, bias_obj["Pr"]) if self.filter_noise is True: self.noise_filter.set_bias(bias_obj["noise_filter_configs"])
MIT License
nukesor/ultimate-poll-bot
pollbot/sentry.py
Sentry.__init__
python
def __init__(self): if config["logging"]["sentry_enabled"]: self.initialized = True sentry_sdk.init( config["logging"]["sentry_token"], )
Construct new sentry wrapper.
https://github.com/nukesor/ultimate-poll-bot/blob/cf8558921e326a91b91c1af526899a0f8889a7fc/pollbot/sentry.py#L35-L41
import traceback import sentry_sdk from sentry_sdk import configure_scope from telegram.error import NetworkError, TimedOut from pollbot.config import config def ignore_job_exception(exception): if type(exception) is TimedOut: return True if type(exception) is NetworkError: return True return False class Sentry(object): initialized = False
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1alpha1_audit_sink_list.py
V1alpha1AuditSinkList.metadata
python
def metadata(self, metadata): self._metadata = metadata
Sets the metadata of this V1alpha1AuditSinkList. :param metadata: The metadata of this V1alpha1AuditSinkList. # noqa: E501 :type: V1ListMeta
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1alpha1_audit_sink_list.py#L151-L159
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1alpha1AuditSinkList(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'items': 'list[V1alpha1AuditSink]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def items(self): return self._items @items.setter def items(self, items): if self.local_vars_configuration.client_side_validation and items is None: raise ValueError("Invalid value for `items`, must not be `None`") self._items = items @property def kind(self): return self._kind @kind.setter def kind(self, kind): self._kind = kind @property def metadata(self): return self._metadata @metadata.setter
Apache License 2.0
morriswmz/doatools.py
doatools/model/arrays.py
GridBasedArrayDesign.element_indices
python
def element_indices(self): return self._element_indices.copy()
Retrieves the element indices. You are not supposed to modify the returned array.
https://github.com/morriswmz/doatools.py/blob/9469db201e0418aef6b97583ef54b6fec2769502/doatools/model/arrays.py#L528-L533
from math import gcd from ..utils.math import cartesian import numpy as np import warnings import copy from .array_elements import ISOTROPIC_SCALAR_SENSOR from .perturbations import LocationErrors, GainErrors, PhaseErrors, MutualCoupling class ArrayDesign: def __init__(self, locations, name, perturbations=[], element=ISOTROPIC_SCALAR_SENSOR): if not isinstance(locations, np.ndarray): locations = np.array(locations) if locations.ndim > 2: raise ValueError('Expecting a 1D vector or a 2D matrix.') if locations.ndim == 1: locations = locations.reshape((-1, 1)) elif locations.shape[1] > 3: raise ValueError('Array can only be 1D, 2D or 3D.') self._locations = locations self._name = name self._element = element self._perturbations = {} self._add_perturbation_from_list(self._parse_input_perturbations(perturbations)) @property def name(self): return self._name @property def size(self): return self._locations.shape[0] @property def output_size(self): return self.size @property def element_locations(self): return self._locations.copy() @property def actual_element_locations(self): locations = self._locations for p in self._perturbations.values(): locations = p.perturb_sensor_locations(locations) return locations @property def element(self): return self._element @property def is_perturbed(self): return len(self._perturbations) > 0 @property def ndim(self): return self._locations.shape[1] @property def actual_ndim(self): return self.actual_element_locations.shape[1] def has_perturbation(self, ptype): return ptype in self._perturbations def is_perturbation_known(self, ptype): return self._perturbations[ptype].is_known def get_perturbation_params(self, ptype): return self._perturbations[ptype].params @property def perturbations(self): return list(self._perturbations.values()) def _add_perturbation_from_list(self, perturbations, raise_on_override=True): for p in perturbations: applicable, msg = p.is_applicable_to(self) if not applicable: raise RuntimeError(msg) p_class = p.__class__ if p_class in self._perturbations and raise_on_override: raise RuntimeError( 'Cannot have more than one perturbations of the same type. ' 'Attempting to add another perturbation of the type {0}.' .format(p_class.__name__) ) self._perturbations[p_class] = p def _parse_input_perturbations(self, perturbations): if isinstance(perturbations, dict): factories = { 'location_errors': (lambda p, k: LocationErrors(p, k)), 'gain_errors': (lambda p, k: GainErrors(p, k)), 'phase_errors': (lambda p, k: PhaseErrors(p, k)), 'mutual_coupling': (lambda p, k: MutualCoupling(p, k)) } perturbations = [factories[k](v[0], v[1]) for k, v in perturbations.items()] return perturbations def get_perturbed_copy(self, perturbations, new_name=None): array = self.get_perturbation_free_copy(new_name) new_perturbations = self._parse_input_perturbations(perturbations) array._perturbations = self._perturbations.copy() array._add_perturbation_from_list(new_perturbations, False) return array def get_perturbation_free_copy(self, new_name=None): if new_name is None: new_name = self._name array = copy.copy(self) array._perturbations = {} array._name = new_name return array def steering_matrix(self, sources, wavelength, compute_derivatives=False, perturbations='all', flatten=True): if perturbations == 'all': perturb_list = self._perturbations.values() elif perturbations == 'known': perturb_list = [v for v in self._perturbations.values() if v.is_known] elif perturbations == 'none': perturb_list = [] else: raise ValueError('Perturbation can only be "all", "known", or "none".') if not self._element.is_isotropic or not self._element.is_scalar: require_spatial_response = True if compute_derivatives: raise RuntimeError( 'Derivative computation is not supported when the array ' 'elements are non-isotropic or non-scalar.' ) else: require_spatial_response = False actual_locations = self._locations for p in perturb_list: actual_locations = p.perturb_sensor_locations(actual_locations) T = sources.phase_delay_matrix(actual_locations, wavelength, compute_derivatives) if compute_derivatives: A = np.exp(1j * T[0]) DA = [A * (1j * X) for X in T[1:]] else: A = np.exp(1j * T) DA = [] if require_spatial_response: if sources.is_far_field: r, az, el = sources.calc_spherical_coords(np.zeros((1, 1))) else: r, az, el = sources.calc_spherical_coords(actual_locations) S = self._element.calc_spatial_response(r, az, el) A = S * A for p in perturb_list: A, DA = p.perturb_steering_matrix(A, DA) if A.ndim > 2 and flatten: A = A.reshape((-1, sources.size)) if compute_derivatives: return (A,) + tuple(DA) else: return A class GridBasedArrayDesign(ArrayDesign): def __init__(self, indices, d0=None, name=None, bases=None, **kwargs): if bases is None: if np.isscalar(d0): d0 = np.full((indices.shape[1],), d0, dtype=np.float_) else: d0 = np.array(d0, dtype=np.float_) if d0.ndim > 1 or indices.shape[1] < d0.size: raise ValueError( 'd0 must be a scalar or a list-like object of length {0}.' .format(indices.shape[1]) ) bases = np.eye(d0.size) * d0 else: if indices.shape[1] != bases.shape[0]: raise ValueError( 'The number of rows of the bases matrix does not match ' 'the number of columns of the indices ({0} != {1}).' .format(bases.shape[0], indices.shape[1]) ) d0 = np.linalg.norm(bases, ord=2, axis=1) super().__init__(indices @ bases, name, **kwargs) self._element_indices = indices self._bases = bases self._d0 = d0 @property def d0(self): return self._d0 @property def bases(self): return self._bases @property
MIT License
blackducksoftware/hub-rest-api-python
blackduck/CustomFields.py
get_custom_fields
python
def get_custom_fields(self, object_name): assert object_name in self.supported_cf_object_types(), "You must supply a supported object name that is in {}".format(self.supported_cf_object_types()) url = self._get_cf_object_url(object_name) + "/fields" response = self.execute_get(url) return response.json()
Get the custom field (definition) for a given object type, e.g. Project, Project Version, Component, etc
https://github.com/blackducksoftware/hub-rest-api-python/blob/9a409dc52b4cbe0d6405134f03a91783f8a0363e/blackduck/CustomFields.py#L80-L88
import logging import requests import json from operator import itemgetter import urllib.parse logger = logging.getLogger(__name__) def _get_cf_url(self): return self.get_apibase() + "/custom-fields/objects" def supported_cf_object_types(self): if not hasattr(self, "_cf_object_types"): logger.debug("retrieving object types") self._cf_object_types = [cfo['name'] for cfo in self.get_cf_objects().get('items', [])] return self._cf_object_types def get_cf_objects(self): url = self._get_cf_url() if not hasattr(self, "_cf_objects"): logger.debug("retrieving objects") response = self.execute_get(url) self._cf_objects = response.json() return self._cf_objects def _get_cf_object_url(self, object_name): for cf_object in self.get_cf_objects().get('items', []): if cf_object['name'].lower() == object_name.lower(): return cf_object['_meta']['href'] def get_cf_object(self, object_name): assert object_name in self.supported_cf_object_types(), "Object name {} not one of the supported types ({})".format(object_name, self.supported_cf_object_types()) object_url = self._get_cf_object_url(object_name) response = self.execute_get(object_url) return response.json() def _get_cf_obj_rel_path(self, object_name): return object_name.lower().replace(" ", "-") def create_cf(self, object_name, field_type, description, label, position, active=True, initial_options=[]): assert isinstance(position, int) and position >= 0, "position must be an integer that is greater than or equal to 0" assert field_type in ["BOOLEAN", "DATE", "DROPDOWN", "MULTISELECT", "RADIO", "TEXT", "TEXTAREA"] types_using_initial_options = ["DROPDOWN", "MULTISELECT", "RADIO"] post_url = self._get_cf_object_url(object_name) + "/fields" cf_object = self._get_cf_obj_rel_path(object_name) cf_request = { "active": active, "description": description, "label": label, "position": position, "type": field_type, } if field_type in types_using_initial_options and initial_options: cf_request.update({"initialOptions": initial_options}) response = self.execute_post(post_url, data=cf_request) return response def delete_cf(self, object_name, field_id): assert object_name in self.supported_cf_object_types(), "You must supply a supported object name that is in {}".format(self.supported_cf_object_types()) delete_url = self._get_cf_object_url(object_name) + "/fields/{}".format(field_id) return self.execute_delete(delete_url)
Apache License 2.0
baderlab/saber
saber/embeddings.py
Embeddings._prepare_embedding_matrix
python
def _prepare_embedding_matrix(self, embedding_idx, load_all=False): type_to_idx = None if load_all: type_to_idx = self._generate_type_to_idx(embedding_idx) self.token_map = type_to_idx['word'] embedding_matrix = np.zeros((len(self.token_map), self.dimension)) for word, i in self.token_map.items(): token_embedding = embedding_idx.get(word) if token_embedding is not None: embedding_matrix[i] = token_embedding return embedding_matrix, type_to_idx
Returns an embedding matrix containing all pre-trained embeddings in `embedding_idx`. Creates an embedding matrix from `embedding_idx`, where the ith row contains the embedding for the word with value i in `self.token_map`. If no embedding exists for a given word in `embedding_idx`, the zero vector is used instead. Args: embedding_idx (dict): A Dictionary mapping words to their embeddings. load_all (bool): True if all embeddings should be loaded. False if only words that appear in `self.token_map` should be loaded. Defaults to False. Returns: A matrix whos ith row corresponds to the word embedding for the word with value i in `self.token_map`.
https://github.com/baderlab/saber/blob/876be6bfdb1bc5b18cbcfa848c94b0d20c940f02/saber/embeddings.py#L74-L106
import numpy as np from gensim.models import KeyedVectors from . import constants from .preprocessor import Preprocessor class Embeddings(object): def __init__(self, filepath, token_map, **kwargs): self.filepath = filepath self.token_map = token_map self.matrix = None self.num_found = None self.num_embed = None self.dimension = None for key, value in kwargs.items(): setattr(self, key, value) def load(self, binary=True, load_all=False): embedding_idx = self._prepare_embedding_index(binary) self.num_found, self.dimension = len(embedding_idx), len(list(embedding_idx.values())[0]) self.matrix, type_to_idx = self._prepare_embedding_matrix(embedding_idx, load_all) self.num_embed = self.matrix.shape[0] return type_to_idx def _prepare_embedding_index(self, binary=True): limit = 10000 if self.__dict__.get("debug", False) else None vectors = KeyedVectors.load_word2vec_format(self.filepath, binary=binary, limit=limit) embedding_idx = {word: vectors[word] for word in vectors.vocab} return embedding_idx
MIT License
airspeed-velocity/asv
asv/console.py
Log.add_padded
python
def add_padded(self, msg): if self._prev_message is None: self.info(msg) return padding_length = util.get_terminal_width() - len(self._prev_message) - 14 - 1 - len(msg) if WIN: padding_length -= 1 padding = " "*padding_length self._prev_message = None self.add(" {0}{1}".format(padding, msg))
Final part of two-part info message. Should be preceded by a call to info/warn/...(msg, reserve_space=True)
https://github.com/airspeed-velocity/asv/blob/9d5af5713357ccea00a518758fae6822cc69f539/asv/console.py#L385-L401
from __future__ import (absolute_import, division, print_function, unicode_literals) import io import contextlib import locale import logging import os import sys import textwrap import time import six from six.moves import xrange, input from . import util WIN = (os.name == "nt") def isatty(file): if hasattr(file, 'isatty'): return file.isatty() return False def _color_text(text, color): color_mapping = { 'black': '0;30', 'red': '0;31', 'green': '0;32', 'brown': '0;33', 'blue': '0;34', 'magenta': '0;35', 'cyan': '0;36', 'lightgrey': '0;37', 'default': '0;39', 'darkgrey': '1;30', 'lightred': '1;31', 'lightgreen': '1;32', 'yellow': '1;33', 'lightblue': '1;34', 'lightmagenta': '1;35', 'lightcyan': '1;36', 'white': '1;37'} color_code = color_mapping.get(color, '0;39') return '\033[{0}m{1}\033[0m'.format(color_code, text) _unicode_translations = { ord('μ'): 'u', ord('·'): '-', ord('±'): '~' } def _write_with_fallback(s, fileobj): if not isinstance(s, six.text_type): raise ValueError("Input string is not a Unicode string") if six.PY3: try: fileobj.write(s) return except UnicodeError: pass enc = locale.getpreferredencoding() try: b = s.encode(enc) except UnicodeError: s = s.translate(_unicode_translations) b = s.encode(enc, errors='replace') fileobj.flush() fileobj.buffer.write(b) else: enc = locale.getpreferredencoding() try: b = s.encode(enc) except UnicodeError: for key, val in _unicode_translations.iteritems(): s = s.replace(unichr(key), val) b = s.encode(enc, errors='replace') fileobj.write(b) def color_print(*args, **kwargs): file = kwargs.get('file', sys.stdout) end = kwargs.get('end', '\n') if isatty(file) and not WIN: for i in xrange(0, len(args), 2): msg = args[i] if i + 1 == len(args): color = '' else: color = args[i + 1] if color: msg = _color_text(msg, color) _write_with_fallback(msg, file) _write_with_fallback(end, file) else: for i in xrange(0, len(args), 2): msg = args[i] _write_with_fallback(msg, file) _write_with_fallback(end, file) def get_answer_default(prompt, default, use_defaults=False): color_print("{0} [{1}]: ".format(prompt, default), end='') if use_defaults: return default x = input() if x.strip() == '': return default return x def truncate_left(s, l): if len(s) > l: return '...' + s[-(l - 3):] else: return s class Log(object): def __init__(self): self._indent = 1 self._total = 0 self._count = 0 self._logger = logging.getLogger() self._needs_newline = False self._last_dot = time.time() def _stream_formatter(self, record): if self._needs_newline: color_print('') parts = record.msg.split('\n', 1) first_line = parts[0] if len(parts) == 1: rest = None else: rest = parts[1] indent = self._indent + 1 continued = getattr(record, 'continued', False) if self._total: progress_msg = '[{0:6.02f}%] '.format( (float(self._count) / self._total) * 100.0) if not continued: color_print(progress_msg, end='') indent += len(progress_msg) if not continued: color_print('·' * self._indent, end='') color_print(' ', end='') else: color_print(' ' * indent, end='') if hasattr(record, 'color'): color = record.color elif record.levelno < logging.DEBUG: color = 'default' elif record.levelno < logging.INFO: color = 'default' elif record.levelno < logging.WARN: if self._indent == 1: color = 'green' elif self._indent == 2: color = 'blue' else: color = 'default' elif record.levelno < logging.ERROR: color = 'brown' else: color = 'red' spaces = ' ' * indent color_print(first_line, color, end='') if rest is not None: color_print('') detail = textwrap.dedent(rest) for line in detail.split('\n'): color_print(spaces, end='') color_print(line) self._needs_newline = True sys.stdout.flush() @contextlib.contextmanager def indent(self): self._indent += 1 yield self._indent -= 1 def dot(self): if isatty(sys.stdout): if time.time() > self._last_dot + 1.0: color_print('.', 'darkgrey', end='') sys.stdout.flush() self._last_dot = time.time() def set_nitems(self, n): try: self._total = util.ceildiv(n * self._total, self._total - self._count) self._count = self._total - n except ZeroDivisionError: self._total = n self._count = 0 def step(self): self._count = min(self._total, self._count + 1) def enable(self, verbose=False): sh = logging.StreamHandler() sh.emit = self._stream_formatter self._logger.addHandler(sh) if verbose: self._logger.setLevel(logging.DEBUG) else: self._logger.setLevel(logging.INFO) @contextlib.contextmanager def set_level(self, level): orig_level = self._logger.level if not self.is_debug_enabled(): self._logger.setLevel(level) try: yield finally: self._logger.setLevel(orig_level) def is_debug_enabled(self): return self._logger.getEffectiveLevel() <= logging.DEBUG def _message(self, routine, message, reserve_space=False, color=None, continued=False): kwargs = {} extra = {} if color is not None: extra['color'] = color if continued: extra['continued'] = True if extra: kwargs['extra'] = extra if reserve_space: max_width = max(16, util.get_terminal_width() - 33) message = truncate_left(message, max_width) self._prev_message = message routine(message, **kwargs) def info(self, *args, **kwargs): self._message(self._logger.info, *args, **kwargs) def warning(self, *args, **kwargs): self._message(self._logger.warning, *args, **kwargs) def debug(self, *args, **kwargs): self._message(self._logger.debug, *args, **kwargs) def error(self, *args, **kwargs): self._message(self._logger.error, *args, **kwargs) def add(self, msg): if self._needs_newline: _write_with_fallback(msg, sys.stdout) sys.stdout.flush() else: self.info(msg)
BSD 3-Clause New or Revised License
jupyterhub/binderhub
binderhub/_version.py
get_config
python
def get_config(): cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "" cfg.parentdir_prefix = "binderhub-" cfg.versionfile_source = "binderhub/_version.py" cfg.verbose = False return cfg
Create, populate and return the VersioneerConfig() object.
https://github.com/jupyterhub/binderhub/blob/ce0bb859ed25ddc58c0336643bd02bbf05a83386/binderhub/_version.py#L37-L48
import errno import os import re import subprocess import sys def get_keywords(): git_refnames = "$Format:%d$" git_full = "$Format:%H$" git_date = "$Format:%ci$" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig:
BSD 3-Clause New or Revised License
jefflirion/python-androidtv
androidtv/basetv/basetv_async.py
BaseTVAsync.key_8
python
async def key_8(self): await self._key(constants.KEY_8)
Send 8 keypress.
https://github.com/jefflirion/python-androidtv/blob/f5396699f308992a59c39b71643134e49ca4f34a/androidtv/basetv/basetv_async.py#L590-L592
import logging from .basetv import BaseTV from .. import constants from ..adb_manager.adb_manager_async import ADBPythonAsync, ADBServerAsync _LOGGER = logging.getLogger(__name__) class BaseTVAsync(BaseTV): def __init__(self, host, port=5555, adbkey='', adb_server_ip='', adb_server_port=5037, state_detection_rules=None, signer=None): if not adb_server_ip: adb = ADBPythonAsync(host, port, adbkey, signer) else: adb = ADBServerAsync(host, port, adb_server_ip, adb_server_port) BaseTV.__init__(self, adb, host, port, adbkey, adb_server_ip, adb_server_port, state_detection_rules) async def adb_shell(self, cmd): return await self._adb.shell(self._remove_adb_shell_prefix(cmd)) async def adb_pull(self, local_path, device_path): return await self._adb.pull(local_path, device_path) async def adb_push(self, local_path, device_path): return await self._adb.push(local_path, device_path) async def adb_screencap(self): return await self._adb.screencap() async def adb_connect(self, always_log_errors=True, auth_timeout_s=constants.DEFAULT_AUTH_TIMEOUT_S): if isinstance(self._adb, ADBPythonAsync): return await self._adb.connect(always_log_errors, auth_timeout_s) return await self._adb.connect(always_log_errors) async def adb_close(self): await self._adb.close() async def get_device_properties(self): properties = await self._adb.shell(constants.CMD_MANUFACTURER + " && " + constants.CMD_MODEL + " && " + constants.CMD_SERIALNO + " && " + constants.CMD_VERSION + " && " + constants.CMD_MAC_WLAN0 + " && " + constants.CMD_MAC_ETH0) self._parse_device_properties(properties) return self.device_properties async def audio_output_device(self): stream_music = await self._get_stream_music() return self._audio_output_device(stream_music) async def audio_state(self): audio_state_response = await self._adb.shell(constants.CMD_AUDIO_STATE) return self._audio_state(audio_state_response) async def awake(self): return await self._adb.shell(constants.CMD_AWAKE + constants.CMD_SUCCESS1_FAILURE0) == '1' async def current_app(self): current_app_response = await self._adb.shell(self._cmd_current_app) return self._current_app(current_app_response) async def get_hdmi_input(self): return self._get_hdmi_input(await self._adb.shell(constants.CMD_HDMI_INPUT)) async def get_installed_apps(self): installed_apps_response = await self._adb.shell(constants.CMD_INSTALLED_APPS) self.installed_apps = self._get_installed_apps(installed_apps_response) return self.installed_apps async def is_volume_muted(self): stream_music = await self._get_stream_music() return self._is_volume_muted(stream_music) async def media_session_state(self): media_session_state_response = await self._adb.shell(constants.CMD_MEDIA_SESSION_STATE_FULL) _, media_session_state = self._current_app_media_session_state(media_session_state_response) return media_session_state async def screen_on(self): return await self._adb.shell(constants.CMD_SCREEN_ON + constants.CMD_SUCCESS1_FAILURE0) == '1' async def volume(self): stream_music = await self._get_stream_music() audio_output_device = self._audio_output_device(stream_music) return self._volume(stream_music, audio_output_device) async def volume_level(self): volume = await self.volume() return self._volume_level(volume) async def wake_lock_size(self): wake_lock_size_response = await self._adb.shell(constants.CMD_WAKE_LOCK_SIZE) return self._wake_lock_size(wake_lock_size_response) async def _get_stream_music(self, stream_music_raw=None): if not stream_music_raw: stream_music_raw = await self._adb.shell(constants.CMD_STREAM_MUSIC) return self._parse_stream_music(stream_music_raw) async def _send_intent(self, pkg, intent, count=1): cmd = 'monkey -p {} -c {} {}; echo $?'.format(pkg, intent, count) res = await self._adb.shell(cmd) if res is None: return {} res = res.strip().split("\r\n") retcode = res[-1] output = "\n".join(res[:-1]) return {"output": output, "retcode": retcode} async def launch_app(self, app): await self._adb.shell(self._cmd_launch_app.format(app)) async def stop_app(self, app): return await self._adb.shell("am force-stop {0}".format(app)) async def start_intent(self, uri): await self._adb.shell("am start -a android.intent.action.VIEW -d {}".format(uri)) async def _key(self, key): await self._adb.shell('input keyevent {0}'.format(key)) async def power(self): await self._key(constants.KEY_POWER) async def sleep(self): await self._key(constants.KEY_SLEEP) async def home(self): await self._key(constants.KEY_HOME) async def up(self): await self._key(constants.KEY_UP) async def down(self): await self._key(constants.KEY_DOWN) async def left(self): await self._key(constants.KEY_LEFT) async def right(self): await self._key(constants.KEY_RIGHT) async def enter(self): await self._key(constants.KEY_ENTER) async def back(self): await self._key(constants.KEY_BACK) async def menu(self): await self._key(constants.KEY_MENU) async def mute_volume(self): await self._key(constants.KEY_MUTE) async def media_play(self): await self._key(constants.KEY_PLAY) async def media_pause(self): await self._key(constants.KEY_PAUSE) async def media_play_pause(self): await self._key(constants.KEY_PLAY_PAUSE) async def media_stop(self): await self._key(constants.KEY_STOP) async def media_next_track(self): await self._key(constants.KEY_NEXT) async def media_previous_track(self): await self._key(constants.KEY_PREVIOUS) async def space(self): await self._key(constants.KEY_SPACE) async def key_0(self): await self._key(constants.KEY_0) async def key_1(self): await self._key(constants.KEY_1) async def key_2(self): await self._key(constants.KEY_2) async def key_3(self): await self._key(constants.KEY_3) async def key_4(self): await self._key(constants.KEY_4) async def key_5(self): await self._key(constants.KEY_5) async def key_6(self): await self._key(constants.KEY_6) async def key_7(self): await self._key(constants.KEY_7)
MIT License
anand-p-r/fmi-hass-custom
sensor.py
FMIBestConditionSensor.update
python
def update(self): if self._fmi is None: _LOGGER.debug("FMI: Coordinator is not available") return if self._fmi.current is None: _LOGGER.debug("FMI: Sensor _FMI Current Forecast is unavailable") return if self.type == "place": self._state = self._fmi.current.place self._icon = "mdi:city-variant" return source_data = None if self._fmi.time_step == 1: source_data = self._fmi.current.data else: if self._fmi.forecast is None: _LOGGER.debug("FMI: Sensor _FMI Hourly Forecast is unavailable") return if len(self._fmi.forecast.forecasts) > 1: curr_min = datetime.now().minute if curr_min >= 30: source_data = self._fmi.forecast.forecasts[1] else: source_data = self._fmi.forecast.forecasts[0] if source_data is None: _LOGGER.debug("FMI: Sensor Source data is unavailable") return wind_direction = "Unavailable" if source_data.wind_direction is not None: wind_direction = get_wind_direction_string(source_data.wind_direction.value) if self.type == "forecast_time": self._state = source_data.time.astimezone(tz.tzlocal()) self._icon = "mdi:av-timer" elif self.type == "weather": self._state = get_weather_symbol(source_data.symbol.value) elif self.type == "temperature": self._state = source_data.temperature.value self._icon = "mdi:thermometer" elif self.type == "wind_speed": self._state = source_data.wind_speed.value self._icon = "mdi:weather-windy" elif self.type == "wind_direction": self._state = wind_direction self._icon = "mdi:weather-windy" elif self.type == "wind_gust": self._state = source_data.wind_gust.value self._icon = "mdi:weather-windy" elif self.type == "humidity": self._state = source_data.humidity.value self._icon = "mdi:water" elif self.type == "clouds": self._state = source_data.cloud_cover.value self._icon = "mdi:weather-cloudy" elif self.type == "rain": self._state = source_data.precipitation_amount.value self._icon = "mdi:weather-pouring" elif self.type == "time": self._state = self._fmi.best_state self._icon = "mdi:av-timer" else: self._state = None
Get the latest data from FMI and updates the states.
https://github.com/anand-p-r/fmi-hass-custom/blob/9b8a6319c6a5970afed4deb5069377d246709732/sensor.py#L184-L259
from datetime import datetime from dateutil import tz from homeassistant.const import ( ATTR_ATTRIBUTION, ATTR_LOCATION, ATTR_TEMPERATURE, ATTR_TIME, SPEED_METERS_PER_SECOND, TEMP_CELSIUS, PERCENTAGE ) from homeassistant.const import CONF_NAME from homeassistant.helpers.update_coordinator import CoordinatorEntity from .const import ( CONF_LIGHTNING, _LOGGER, ATTRIBUTION, DOMAIN, ATTR_HUMIDITY, ATTR_WIND_SPEED, ATTR_PRECIPITATION, ATTR_DISTANCE, ATTR_STRIKES, ATTR_PEAK_CURRENT, ATTR_CLOUD_COVER, ATTR_ELLIPSE_MAJOR, COORDINATOR ) from .utils import ( get_weather_symbol ) SENSOR_TYPES = { "place": ["Place", None], "weather": ["Condition", None], "temperature": ["Temperature", TEMP_CELSIUS], "wind_speed": ["Wind Speed", SPEED_METERS_PER_SECOND], "wind_direction": ["Wind Direction", ""], "wind_gust": ["Wind Gust", SPEED_METERS_PER_SECOND], "humidity": ["Humidity", PERCENTAGE], "clouds": ["Cloud Coverage", PERCENTAGE], "rain": ["Rain", "mm/hr"], "forecast_time": ["Time", None], "time": ["Best Time Of Day", None], } SENSOR_LIGHTNING_TYPES = { "lightning": ["Lightning Strikes", None] } SENSOR_MAREO_TYPES = { "sea_level": ["Sea Level", "cm"] } PARALLEL_UPDATES = 1 async def async_setup_entry(hass, config_entry, async_add_entities): name = config_entry.data[CONF_NAME] lightning_mode = config_entry.options.get(CONF_LIGHTNING, False) coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR] entity_list = [] for sensor_type in SENSOR_TYPES: entity_list.append( FMIBestConditionSensor( name, coordinator, sensor_type ) ) if lightning_mode: for sensor_type in SENSOR_LIGHTNING_TYPES: entity_list.append( FMILightningStrikesSensor(name, coordinator, sensor_type)) for sensor_type in SENSOR_MAREO_TYPES: entity_list.append( FMIMareoSensor(name, coordinator, sensor_type)) async_add_entities(entity_list, False) def get_wind_direction_string(wind_direction_in_deg): if wind_direction_in_deg is not None: if wind_direction_in_deg <=23: return "N" elif wind_direction_in_deg > 338: return "N" elif (23 < wind_direction_in_deg <= 68): return "NE" elif (68 < wind_direction_in_deg <= 113): return "E" elif (113 < wind_direction_in_deg <= 158): return "SE" elif (158 < wind_direction_in_deg <= 203): return "S" elif (203 < wind_direction_in_deg <= 248): return "SW" elif (248 < wind_direction_in_deg <= 293): return "W" elif (293 < wind_direction_in_deg <= 338): return "NW" else: return "Unavailable" return "Unavailable" class FMIBestConditionSensor(CoordinatorEntity): def __init__(self, name, coordinator, sensor_type): super().__init__(coordinator) self.client_name = name self._name = SENSOR_TYPES[sensor_type][0] self._fmi = coordinator self._state = None self._icon = None self.type = sensor_type self._unit_of_measurement = SENSOR_TYPES[sensor_type][1] self.update() @property def name(self): if self._fmi is not None: if self._fmi.current is not None: return f"{self._fmi.current.place} {self._name}" return self._name @property def unique_id(self): @property def state(self): self.update() return self._state @property def icon(self): return self._icon @property def unit_of_measurement(self): return self._unit_of_measurement @property def device_state_attributes(self): if self.type == "time": if self._fmi is not None: if self._fmi.current is not None: return { ATTR_LOCATION: self._fmi.current.place, ATTR_TIME: self._fmi.best_time, ATTR_TEMPERATURE: self._fmi.best_temperature, ATTR_HUMIDITY: self._fmi.best_humidity, ATTR_PRECIPITATION: self._fmi.best_precipitation, ATTR_WIND_SPEED: self._fmi.best_wind_speed, ATTR_ATTRIBUTION: ATTRIBUTION, } return {ATTR_ATTRIBUTION: ATTRIBUTION}
MIT License
spunkybot/spunkybot
spunky.py
LogParser.handle_exit
python
def handle_exit(self, line): logger.debug("Exit: %s", line) self.handle_awards() self.allow_cmd_teams = True self.stats_reset(store_score=True)
handle Exit of a match, show Awards, store user score in database and reset statistics
https://github.com/spunkybot/spunkybot/blob/a546e3ac13294de38f7edabf27ee2d286be28a23/spunky.py#L807-L814
__version__ = '1.13.0.dev0' import os import time import sqlite3 import math import textwrap import random import ConfigParser import logging.handlers from Queue import Queue from threading import Thread from threading import RLock import lib.pygeoip as pygeoip import lib.schedule as schedule from lib.pyquake3 import PyQuake3 logger = logging.getLogger('spunkybot') logger.setLevel(logging.DEBUG) logger.propagate = False BOT_PLAYER_NUM = 1022 RCON_DELAY = 0.3 COMMANDS = {'help': {'desc': 'display all available commands', 'syntax': '^7Usage: ^2!help', 'level': 0, 'short': 'h'}, 'forgive': {'desc': 'forgive a player for team killing', 'syntax': '^7Usage: ^2!forgive ^7[<name>]', 'level': 0, 'short': 'f'}, 'forgiveall': {'desc': 'forgive all team kills', 'syntax': '^7Usage: ^2!forgiveall', 'level': 0, 'short': 'fa'}, 'forgivelist': {'desc': 'list all players who killed you', 'syntax': '^7Usage: ^2!forgivelist', 'level': 0, 'short': 'fl'}, 'forgiveprev': {'desc': 'forgive last team kill', 'syntax': '^7Usage: ^2!forgiveprev', 'level': 0, 'short': 'fp'}, 'grudge': {'desc': 'grudge a player for team killing, a grudged player will not be forgiven', 'syntax': '^7Usage: ^2!grudge ^7[<name>]', 'level': 0}, 'bombstats': {'desc': 'display Bomb stats', 'syntax': '^7Usage: ^2!bombstats', 'level': 0}, 'ctfstats': {'desc': 'display Capture the Flag stats', 'syntax': '^7Usage: ^2!ctfstats', 'level': 0}, 'freezestats': {'desc': 'display freeze/thawout stats', 'syntax': '^7Usage: ^2!freezestats', 'level': 0}, 'hestats': {'desc': 'display HE grenade kill stats', 'syntax': '^7Usage: ^2!hestats', 'level': 0}, 'hits': {'desc': 'display hit stats', 'syntax': '^7Usage: ^2!hits', 'level': 0}, 'hs': {'desc': 'display headshot counter', 'syntax': '^7Usage: ^2!hs', 'level': 0}, 'knife': {'desc': 'display knife kill stats', 'syntax': '^7Usage: ^2!knife', 'level': 0}, 'register': {'desc': 'register yourself as a basic user', 'syntax': '^7Usage: ^2!register', 'level': 0}, 'spree': {'desc': 'display current kill streak', 'syntax': '^7Usage: ^2!spree', 'level': 0}, 'stats': {'desc': 'display current map stats', 'syntax': '^7Usage: ^2!stats', 'level': 0}, 'teams': {'desc': 'balance teams', 'syntax': '^7Usage: ^2!teams', 'level': 0}, 'time': {'desc': 'display the current server time', 'syntax': '^7Usage: ^2!time', 'level': 0}, 'regtest': {'desc': 'display current user status', 'syntax': '^7Usage: ^2!regtest', 'level': 1}, 'xlrstats': {'desc': 'display full player statistics', 'syntax': '^7Usage: ^2!xlrstats ^7[<name>]', 'level': 1}, 'xlrtopstats': {'desc': 'display the top players', 'syntax': '^7Usage: ^2!xlrtopstats', 'level': 1, 'short': 'topstats'}, 'admintest': {'desc': 'display current admin status', 'syntax': '^7Usage: ^2!admintest', 'level': 20}, 'country': {'desc': 'get the country of a player', 'syntax': '^7Usage: ^2!country ^7<name>', 'level': 20}, 'lastmaps': {'desc': 'list the last played maps', 'syntax': '^7Usage: ^2!lastmaps', 'level': 20}, 'lastvote': {'desc': 'display information about the last called vote', 'syntax': '^7Usage: ^2!lastvote', 'level': 20}, 'leveltest': {'desc': 'get the admin level for a given player or myself', 'syntax': '^7Usage: ^2!leveltest ^7[<name>]', 'level': 20, 'short': 'lt'}, 'list': {'desc': 'list all connected players', 'syntax': '^7Usage: ^2!list', 'level': 20}, 'locate': {'desc': 'display geolocation info of a player', 'syntax': '^7Usage: ^2!locate ^7<name>', 'level': 20, 'short': 'lc'}, 'mute': {'desc': 'mute or un-mute a player', 'syntax': '^7Usage: ^2!mute ^7<name> [<duration>]', 'level': 20}, 'nextmap': {'desc': 'display the next map in rotation', 'syntax': '^7Usage: ^2!nextmap', 'level': 20}, 'poke': {'desc': 'notify a player that he needs to move', 'syntax': '^7Usage: ^2!poke ^7<name>', 'level': 20}, 'seen': {'desc': 'display when a player was last seen', 'syntax': '^7Usage: ^2!seen ^7<name>', 'level': 20}, 'shuffleteams': {'desc': 'shuffle the teams', 'syntax': '^7Usage: ^2!shuffleteams', 'level': 20, 'short': 'shuffle'}, 'spec': {'desc': 'move yourself to spectator', 'syntax': '^7Usage: ^2!spec', 'level': 20, 'short': 'sp'}, 'warn': {'desc': 'warn player', 'syntax': '^7Usage: ^2!warn ^7<name> [<reason>]', 'level': 20, 'short': 'w'}, 'warninfo': {'desc': 'display how many warnings a player has', 'syntax': '^7Usage: ^2!warninfo ^7<name>', 'level': 20, 'short': 'wi'}, 'warnremove': {'desc': "remove a player's last warning", 'syntax': '^7Usage: ^2!warnremove ^7<name>', 'level': 20, 'short': 'wr'}, 'warns': {'desc': 'list the warnings', 'syntax': '^7Usage: ^2!warns', 'level': 20}, 'warntest': {'desc': 'test a warning', 'syntax': '^7Usage: ^2!warntest ^7<warning>', 'level': 20}, 'admins': {'desc': 'list all the online admins', 'syntax': '^7Usage: ^2!admins', 'level': 40}, 'afk': {'desc': 'force a player to spec, because he is away from keyboard', 'syntax': '^7Usage: ^2!afk ^7<name>', 'level': 40}, 'aliases': {'desc': 'list the aliases of a player', 'syntax': '^7Usage: ^2!aliases ^7<name>', 'level': 40, 'short': 'alias'}, 'bigtext': {'desc': 'display big message on screen', 'syntax': '^7Usage: ^2!bigtext ^7<text>', 'level': 40}, 'exit': {'desc': 'display last disconnected player', 'syntax': '^7Usage: ^2!exit', 'level': 40}, 'find': {'desc': 'display the slot number of a player', 'syntax': '^7Usage: ^2!find ^7<name>', 'level': 40}, 'force': {'desc': 'force a player to the given team', 'syntax': '^7Usage: ^2!force ^7<name> <blue/red/spec> [<lock>]', 'level': 40}, 'kick': {'desc': 'kick a player', 'syntax': '^7Usage: ^2!kick ^7<name> <reason>', 'level': 40, 'short': 'k'}, 'nuke': {'desc': 'nuke a player', 'syntax': '^7Usage: ^2!nuke ^7<name>', 'level': 40}, 'regulars': {'desc': 'display the regular players online', 'syntax': '^7Usage: ^2!regulars', 'level': 40, 'short': 'regs'}, 'say': {'desc': 'say a message to all players', 'syntax': '^7Usage: ^2!say ^7<text>', 'level': 40, 'short': '!!'}, 'tell': {'desc': 'tell a message to a specific player', 'syntax': '^7Usage: ^2!tell ^7<name> <text>', 'level': 40}, 'tempban': {'desc': 'ban a player temporary for the given period of 1 sec to 3 days', 'syntax': '^7Usage: ^2!tempban ^7<name> <duration> [<reason>]', 'level': 40, 'short': 'tb'}, 'warnclear': {'desc': 'clear the player warnings', 'syntax': '^7Usage: ^2!warnclear ^7<name>', 'level': 40, 'short': 'wc'}, 'ban': {'desc': 'ban a player for several days', 'syntax': '^7Usage: ^2!ban ^7<name> <reason>', 'level': 60, 'short': 'b'}, 'baninfo': {'desc': 'display active bans of a player', 'syntax': '^7Usage: ^2!baninfo ^7<name>', 'level': 60, 'short': 'bi'}, 'ci': {'desc': 'kick player with connection interrupt', 'syntax': '^7Usage: ^2!ci ^7<name>', 'level': 60}, 'forgiveclear': {'desc': "clear a player's team kills", 'syntax': '^7Usage: ^2!forgiveclear ^7[<name>]', 'level': 60, 'short': 'fc'}, 'forgiveinfo': {'desc': "display a player's team kills", 'syntax': '^7Usage: ^2!forgiveinfo ^7<name>', 'level': 60, 'short': 'fi'}, 'ping': {'desc': 'display the ping of a player', 'syntax': '^7Usage: ^2!ping ^7<name>', 'level': 60}, 'id': {'desc': 'show the IP, guid and authname of a player', 'syntax': '^7Usage: ^2!id ^7<name>', 'level': 60}, 'kickbots': {'desc': 'kick all bots', 'syntax': '^7Usage: ^2!kickbots', 'level': 60, 'short': 'kb'}, 'rain': {'desc': 'enables or disables rain', 'syntax': '^7Usage: ^2!rain ^7<on/off>', 'level': 60}, 'scream': {'desc': 'scream a message in different colors to all players', 'syntax': '^7Usage: ^2!scream ^7<text>', 'level': 60}, 'slap': {'desc': 'slap a player (a number of times)', 'syntax': '^7Usage: ^2!slap ^7<name> [<amount>]', 'level': 60}, 'status': {'desc': 'report the status of the bot', 'syntax': '^7Usage: ^2!status', 'level': 60}, 'swap': {'desc': 'swap teams for player A and B', 'syntax': '^7Usage: ^2!swap ^7<name1> [<name2>]', 'level': 60}, 'version': {'desc': 'display the version of the bot', 'syntax': '^7Usage: ^2!version', 'level': 60}, 'veto': {'desc': 'stop voting process', 'syntax': '^7Usage: ^2!veto', 'level': 60}, 'addbots': {'desc': 'add up to 4 bots to the game', 'syntax': '^7Usage: ^2!addbots', 'level': 80}, 'banall': {'desc': 'ban all players matching pattern', 'syntax': '^7Usage: ^2!banall ^7<pattern> [<reason>]', 'level': 80, 'short': 'ball'}, 'banlist': {'desc': 'display the last active 10 bans', 'syntax': '^7Usage: ^2!banlist', 'level': 80}, 'bots': {'desc': 'enables or disables bot support', 'syntax': '^7Usage: ^2!bots ^7<on/off>', 'level': 80}, 'cyclemap': {'desc': 'cycle to the next map', 'syntax': '^7Usage: ^2!cyclemap', 'level': 80}, 'exec': {'desc': 'execute given config file', 'syntax': '^7Usage: ^2!exec ^7<filename>', 'level': 80}, 'gear': {'desc': 'set allowed weapons', 'syntax': '^7Usage: ^2!gear ^7<default/all/knife/pistol/shotgun/sniper/magnum/mac>', 'level': 80}, 'instagib': {'desc': 'set Instagib mode', 'syntax': '^7Usage: ^2!instagib ^7<on/off>', 'level': 80}, 'kickall': {'desc': 'kick all players matching pattern', 'syntax': '^7Usage: ^2!kickall ^7<pattern> [<reason>]', 'level': 80, 'short': 'kall'}, 'kill': {'desc': 'kill a player', 'syntax': '^7Usage: ^2!kill ^7<name>', 'level': 80}, 'clear': {'desc': 'clear all player warnings', 'syntax': '^7Usage: ^2!clear', 'level': 80, 'short': 'kiss'}, 'lastadmin': {'desc': 'display the last disconnected admin', 'syntax': '^7Usage: ^2!lastadmin', 'level': 80}, 'lastbans': {'desc': 'list the last 4 bans', 'syntax': '^7Usage: ^2!lastbans', 'level': 80, 'short': 'bans'}, 'lookup': {'desc': 'search for a player in the database', 'syntax': '^7Usage: ^2!lookup ^7<name>', 'level': 80, 'short': 'l'}, 'makereg': {'desc': 'make a player a regular (Level 2) user', 'syntax': '^7Usage: ^2!makereg ^7<name>', 'level': 80, 'short': 'mr'}, 'map': {'desc': 'load given map', 'syntax': '^7Usage: ^2!map ^7<ut4_name>', 'level': 80}, 'mapcycle': {'desc': 'list the map rotation', 'syntax': '^7Usage: ^2!mapcycle', 'level': 80}, 'maps': {'desc': 'display all available maps', 'syntax': '^7Usage: ^2!maps', 'level': 80}, 'maprestart': {'desc': 'restart the map', 'syntax': '^7Usage: ^2!maprestart', 'level': 80, 'short': 'restart'}, 'moon': {'desc': 'activate low gravity mode (Moon mode)', 'syntax': '^7Usage: ^2!moon ^7<on/off>', 'level': 80, 'short': 'lowgravity'}, 'permban': {'desc': 'ban a player permanent', 'syntax': '^7Usage: ^2!permban ^7<name> <reason>', 'level': 80, 'short': 'pb'}, 'putgroup': {'desc': 'add a client to a group', 'syntax': '^7Usage: ^2!putgroup ^7<name> <group>', 'level': 80}, 'rebuild': {'desc': 'sync up all available maps', 'syntax': '^7Usage: ^2!rebuild', 'level': 80}, 'setgravity': {'desc': 'set the gravity (default: 800)', 'syntax': '^7Usage: ^2!setgravity ^7<value>', 'level': 80}, 'setnextmap': {'desc': 'set the next map', 'syntax': '^7Usage: ^2!setnextmap ^7<ut4_name>', 'level': 80}, 'swapteams': {'desc': 'swap the current teams', 'syntax': '^7Usage: ^2!swapteams', 'level': 80}, 'unban': {'desc': 'unban a player from the database', 'syntax': '^7Usage: ^2!unban ^7<@ID>', 'level': 80}, 'unreg': {'desc': 'remove a player from the regular group', 'syntax': '^7Usage: ^2!unreg ^7<name>', 'level': 80}, 'bomb': {'desc': 'change gametype to Bomb', 'syntax': '^7Usage: ^2!bomb', 'level': 90}, 'ctf': {'desc': 'change gametype to Capture the Flag', 'syntax': '^7Usage: ^2!ctf', 'level': 90}, 'ffa': {'desc': 'change gametype to Free For All', 'syntax': '^7Usage: ^2!ffa', 'level': 90}, 'gungame': {'desc': 'change gametype to Gun Game', 'syntax': '^7Usage: ^2!gungame', 'level': 90}, 'jump': {'desc': 'change gametype to Jump', 'syntax': '^7Usage: ^2!jump', 'level': 90}, 'lms': {'desc': 'change gametype to Last Man Standing', 'syntax': '^7Usage: ^2!lms', 'level': 90}, 'tdm': {'desc': 'change gametype to Team Deathmatch', 'syntax': '^7Usage: ^2!tdm', 'level': 90}, 'ts': {'desc': 'change gametype to Team Survivor', 'syntax': '^7Usage: ^2!ts', 'level': 90}, 'ungroup': {'desc': 'remove admin level from a player', 'syntax': '^7Usage: ^2!ungroup ^7<name>', 'level': 90}, 'password': {'desc': 'set private server password', 'syntax': '^7Usage: ^2!password ^7[<password>]', 'level': 90}, 'reload': {'desc': 'reload map', 'syntax': '^7Usage: ^2!reload', 'level': 90}} REASONS = {'obj': 'go for objective', 'camp': 'stop camping', 'spam': 'do not spam, shut-up!', 'lang': 'bad language', 'racism': 'racism is not tolerated', 'ping': 'fix your ping', 'afk': 'away from keyboard', 'tk': 'stop team killing', 'sk': 'stop spawn killing', 'spec': 'spectator too long on full server', 'score': 'score too low for this server', 'ci': 'connection interrupted', '999': 'connection interrupted', 'whiner': 'stop complaining all the time', 'skill': 'skill too low for this server', 'name': 'do not use offensive names', 'wh': 'wallhack', 'insult': 'stop insulting', 'autojoin': 'use auto-join', 'abuse': 'stop abusing others', 'teams': 'keep the teams even'} class LogParser(object): def __init__(self): self.hit_points = {0: "HEAD", 1: "HEAD", 2: "HELMET", 3: "TORSO", 4: "VEST", 5: "LEFT_ARM", 6: "RIGHT_ARM", 7: "GROIN", 8: "BUTT", 9: "LEFT_UPPER_LEG", 10: "RIGHT_UPPER_LEG", 11: "LEFT_LOWER_LEG", 12: "RIGHT_LOWER_LEG", 13: "LEFT_FOOT", 14: "RIGHT_FOOT"} self.hit_item = {1: "UT_MOD_KNIFE", 2: "UT_MOD_BERETTA", 3: "UT_MOD_DEAGLE", 4: "UT_MOD_SPAS", 5: "UT_MOD_MP5K", 6: "UT_MOD_UMP45", 8: "UT_MOD_LR300", 9: "UT_MOD_G36", 10: "UT_MOD_PSG1", 14: "UT_MOD_SR8", 15: "UT_MOD_AK103", 17: "UT_MOD_NEGEV", 19: "UT_MOD_M4", 20: "UT_MOD_GLOCK", 21: "UT_MOD_COLT1911", 22: "UT_MOD_MAC11", 23: "UT_MOD_BLED"} self.death_cause = {1: "MOD_WATER", 3: "MOD_LAVA", 5: "UT_MOD_TELEFRAG", 6: "MOD_FALLING", 7: "UT_MOD_SUICIDE", 9: "MOD_TRIGGER_HURT", 10: "MOD_CHANGE_TEAM", 12: "UT_MOD_KNIFE", 13: "UT_MOD_KNIFE_THROWN", 14: "UT_MOD_BERETTA", 15: "UT_MOD_DEAGLE", 16: "UT_MOD_SPAS", 17: "UT_MOD_UMP45", 18: "UT_MOD_MP5K", 19: "UT_MOD_LR300", 20: "UT_MOD_G36", 21: "UT_MOD_PSG1", 22: "UT_MOD_HK69", 23: "UT_MOD_BLED", 24: "UT_MOD_KICKED", 25: "UT_MOD_HEGRENADE", 28: "UT_MOD_SR8", 30: "UT_MOD_AK103", 31: "UT_MOD_SPLODED", 32: "UT_MOD_SLAPPED", 33: "UT_MOD_SMITED", 34: "UT_MOD_BOMBED", 35: "UT_MOD_NUKED", 36: "UT_MOD_NEGEV", 37: "UT_MOD_HK69_HIT", 38: "UT_MOD_M4", 39: "UT_MOD_GLOCK", 40: "UT_MOD_COLT1911", 41: "UT_MOD_MAC11"} self.user_cmds = [] self.mod_cmds = [] self.admin_cmds = [] self.fulladmin_cmds = [] self.senioradmin_cmds = [] self.superadmin_cmds = [] self.shortcut_cmd = {} for key, value in COMMANDS.iteritems(): if 'short' in value: self.shortcut_cmd[value['short']] = key if value['level'] == 20: self.mod_cmds.append(key) self.admin_cmds.append(key) self.fulladmin_cmds.append(key) self.senioradmin_cmds.append(key) self.superadmin_cmds.append(key) elif value['level'] == 40: self.admin_cmds.append(key) self.fulladmin_cmds.append(key) self.senioradmin_cmds.append(key) self.superadmin_cmds.append(key) elif value['level'] == 60: self.fulladmin_cmds.append(key) self.senioradmin_cmds.append(key) self.superadmin_cmds.append(key) elif value['level'] == 80: self.senioradmin_cmds.append(key) self.superadmin_cmds.append(key) elif value['level'] >= 90: self.superadmin_cmds.append(key) else: self.user_cmds.append(key) self.mod_cmds.append(key) self.admin_cmds.append(key) self.fulladmin_cmds.append(key) self.senioradmin_cmds.append(key) self.superadmin_cmds.append(key) self.user_cmds.sort() self.mod_cmds.sort() self.admin_cmds.sort() self.fulladmin_cmds.sort() self.senioradmin_cmds.sort() self.superadmin_cmds.sort() logger.info("Starting logging : OK") games_log = CONFIG.get('server', 'log_file') self.ffa_lms_gametype = False self.ctf_gametype = False self.ts_gametype = False self.tdm_gametype = False self.bomb_gametype = False self.freeze_gametype = False self.ts_do_team_balance = False self.allow_cmd_teams = True self.urt_modversion = None self.game = None self.players_lock = RLock() self.firstblood = False self.firstnadekill = False self.firstknifekill = False self.firstteamkill = False self.last_disconnected_player = None self.last_admin = None self.allow_nextmap_vote = True self.failed_vote_timer = 0 self.last_vote = '' self.default_gear = '' self.tk_autokick = CONFIG.getboolean('bot', 'teamkill_autokick') if CONFIG.has_option('bot', 'teamkill_autokick') else True self.allow_tk_bots = CONFIG.getboolean('bot', 'allow_teamkill_bots') if CONFIG.has_option('bot', 'allow_teamkill_bots') else False self.noob_autokick = CONFIG.getboolean('bot', 'noob_autokick') if CONFIG.has_option('bot', 'noob_autokick') else False self.spawnkill_autokick = CONFIG.getboolean('bot', 'spawnkill_autokick') if CONFIG.has_option('bot', 'spawnkill_autokick') else False self.kill_spawnkiller = CONFIG.getboolean('bot', 'instant_kill_spawnkiller') if CONFIG.has_option('bot', 'instant_kill_spawnkiller') else False self.spawnkill_warn_time = CONFIG.getint('bot', 'spawnkill_warn_time') if CONFIG.has_option('bot', 'spawnkill_warn_time') else 3 self.max_ping = CONFIG.getint('bot', 'max_ping') if CONFIG.has_option('bot', 'max_ping') else 200 self.num_kick_specs = CONFIG.getint('bot', 'kick_spec_full_server') if CONFIG.has_option('bot', 'kick_spec_full_server') else 10 self.task_frequency = CONFIG.getint('bot', 'task_frequency') if CONFIG.has_option('bot', 'task_frequency') else 60 self.warn_expiration = CONFIG.getint('bot', 'warn_expiration') if CONFIG.has_option('bot', 'warn_expiration') else 240 self.bad_words_autokick = CONFIG.getint('bot', 'bad_words_autokick') if CONFIG.has_option('bot', 'bad_words_autokick') else 0 self.show_country_on_connect = CONFIG.getboolean('bot', 'show_country_on_connect') if CONFIG.has_option('bot', 'show_country_on_connect') else True self.show_first_kill_msg = CONFIG.getboolean('bot', 'show_first_kill') if CONFIG.has_option('bot', 'show_first_kill') else True self.show_hit_stats_msg = CONFIG.getboolean('bot', 'show_hit_stats_respawn') if CONFIG.has_option('bot', 'show_hit_stats_respawn') else True self.show_multikill_msg = CONFIG.getboolean('bot', 'show_multi_kill') if CONFIG.has_option('bot', 'show_multi_kill') else True self.teams_autobalancer = CONFIG.getboolean('bot', 'autobalancer') if CONFIG.has_option('bot', 'autobalancer') else False self.allow_cmd_teams_round_end = CONFIG.getboolean('bot', 'allow_teams_round_end') if CONFIG.has_option('bot', 'allow_teams_round_end') else False self.limit_nextmap_votes = CONFIG.getboolean('bot', 'limit_nextmap_votes') if CONFIG.has_option('bot', 'limit_nextmap_votes') else False self.vote_delay = CONFIG.getint('bot', 'vote_delay') if CONFIG.has_option('bot', 'vote_delay') else 0 self.spam_bomb_planted_msg = CONFIG.getboolean('bot', 'spam_bomb_planted') if CONFIG.has_option('bot', 'spam_bomb_planted') else False self.kill_survived_opponents = CONFIG.getboolean('bot', 'kill_survived_opponents') if CONFIG.has_option('bot', 'kill_survived_opponents') else False self.spam_knife_kills_msg = CONFIG.getboolean('bot', 'spam_knife_kills') if CONFIG.has_option('bot', 'spam_knife_kills') else False self.spam_nade_kills_msg = CONFIG.getboolean('bot', 'spam_nade_kills') if CONFIG.has_option('bot', 'spam_nade_kills') else False self.spam_headshot_hits_msg = CONFIG.getboolean('bot', 'spam_headshot_hits') if CONFIG.has_option('bot', 'spam_headshot_hits') else False self.reset_headshot_hits_mapcycle = CONFIG.getboolean('bot', 'reset_headshot_hits_mapcycle') if CONFIG.has_option('bot', 'reset_headshot_hits_mapcycle') else True self.reset_kill_spree_mapcycle = CONFIG.getboolean('bot', 'reset_kill_spree_mapcycle') if CONFIG.has_option('bot', 'reset_kill_spree_mapcycle') else True ban_duration = CONFIG.getint('bot', 'ban_duration') if CONFIG.has_option('bot', 'ban_duration') else 7 self.ban_duration = ban_duration if ban_duration > 0 else 1 self.support_lowgravity = CONFIG.getboolean('lowgrav', 'support_lowgravity') if CONFIG.has_option('lowgrav', 'support_lowgravity') else False self.gravity = CONFIG.getint('lowgrav', 'gravity') if CONFIG.has_option('lowgrav', 'gravity') else 800 self.explode_time = "40" logger.info("Configuration loaded : OK") curs.execute("SELECT COUNT(*) FROM `xlrstats` WHERE `admin_role` = 100") self.iamgod = True if int(curs.fetchone()[0]) < 1 else False logger.info("Connecting to Database: OK") logger.debug("Cmd !iamgod available : %s", self.iamgod) self.uptime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) if CONFIG.has_option('rules', 'show_rules') and CONFIG.getboolean('rules', 'show_rules'): self.output_rules = CONFIG.get('rules', 'display') if CONFIG.has_option('rules', 'display') else "chat" rules_frequency = CONFIG.getint('rules', 'rules_frequency') if CONFIG.has_option('rules', 'rules_frequency') else 90 self.rules_file = os.path.join(HOME, 'conf', 'rules.conf') self.rules_frequency = rules_frequency if rules_frequency > 0 else 10 if os.path.isfile(self.rules_file): self.thread_rotate() logger.info("Load rotating messages: OK") else: logger.error("ERROR: Rotating messages will be ignored, file '%s' has not been found", self.rules_file) try: self.log_file = open(games_log, 'r') except IOError: logger.error("ERROR: The Gamelog file '%s' has not been found", games_log) logger.error("*** Aborting Spunky Bot ***") else: self.log_file.seek(0, 2) logger.info("Parsing Gamelog file : %s", games_log) self.read_log() def thread_rotate(self): processor = Thread(target=self.rotating_messages) processor.setDaemon(True) processor.start() def rotating_messages(self): time.sleep(30) while 1: with open(self.rules_file, 'r') as filehandle: rotation_msg = filehandle.readlines() if not rotation_msg: break for line in rotation_msg: with self.players_lock: if "@admins" in line: self.game.rcon_say(self.get_admins_online()) elif "@admincount" in line: self.game.rcon_say(self.get_admin_count()) elif "@nextmap" in line: self.game.rcon_say(self.get_nextmap()) elif "@time" in line: self.game.rcon_say("^7Time: %s" % time.strftime("%H:%M", time.localtime(time.time()))) elif "@bigtext" in line: self.game.rcon_bigtext("^7%s" % line.split('@bigtext')[-1].strip()) else: if self.output_rules == 'chat': self.game.rcon_say("^2%s" % line.strip()) elif self.output_rules == 'bigtext': self.game.rcon_bigtext("^2%s" % line.strip()) else: self.game.send_rcon("^2%s" % line.strip()) time.sleep(self.rules_frequency) def find_game_start(self): seek_amount = 768 start_pos = self.log_file.tell() - seek_amount end_pos = start_pos + seek_amount try: self.log_file.seek(start_pos) except IOError: logger.error("ERROR: The games.log file is empty, ignoring game type and start") self.log_file.seek(0, 2) game_start = True else: game_start = False while not game_start: while self.log_file: line = self.log_file.readline() tmp = line.split() if len(tmp) > 1 and tmp[1] == "InitGame:": game_start = True if 'g_modversion\\4.3' in line: self.hit_item.update({23: "UT_MOD_FRF1", 24: "UT_MOD_BENELLI", 25: "UT_MOD_P90", 26: "UT_MOD_MAGNUM", 29: "UT_MOD_KICKED", 30: "UT_MOD_KNIFE_THROWN"}) self.death_cause.update({42: "UT_MOD_FRF1", 43: "UT_MOD_BENELLI", 44: "UT_MOD_P90", 45: "UT_MOD_MAGNUM", 46: "UT_MOD_TOD50", 47: "UT_MOD_FLAG", 48: "UT_MOD_GOOMBA"}) self.urt_modversion = 43 logger.info("Game modversion : 4.3") elif 'g_modversion\\4.2' in line: self.hit_item.update({23: "UT_MOD_BLED", 24: "UT_MOD_KICKED", 25: "UT_MOD_KNIFE_THROWN"}) self.death_cause.update({42: "UT_MOD_FLAG", 43: "UT_MOD_GOOMBA"}) self.urt_modversion = 42 logger.info("Game modversion : 4.2") elif 'g_modversion\\4.1' in line: self.hit_points = {0: "HEAD", 1: "HELMET", 2: "TORSO", 3: "KEVLAR", 4: "ARMS", 5: "LEGS", 6: "BODY"} self.hit_item.update({21: "UT_MOD_KICKED", 22: "UT_MOD_KNIFE_THROWN"}) self.death_cause.update({33: "UT_MOD_BOMBED", 34: "UT_MOD_NUKED", 35: "UT_MOD_NEGEV", 39: "UT_MOD_FLAG", 40: "UT_MOD_GOOMBA"}) self.urt_modversion = 41 logger.info("Game modversion : 4.1") if 'g_gametype\\0\\' in line or 'g_gametype\\1\\' in line or 'g_gametype\\9\\' in line or 'g_gametype\\11\\' in line: self.ffa_lms_gametype = True elif 'g_gametype\\7\\' in line: self.ctf_gametype = True elif 'g_gametype\\4\\' in line or 'g_gametype\\5\\' in line: self.ts_gametype = True elif 'g_gametype\\3\\' in line: self.tdm_gametype = True elif 'g_gametype\\8\\' in line: self.bomb_gametype = True elif 'g_gametype\\10\\' in line: self.freeze_gametype = True self.default_gear = line.split('g_gear\\')[-1].split('\\')[0] if 'g_gear\\' in line else "%s" % '' if self.urt_modversion > 41 else '0' if self.log_file.tell() > end_pos: break elif not line: break if self.log_file.tell() < seek_amount: self.log_file.seek(0, 0) else: cur_pos = start_pos - seek_amount end_pos = start_pos start_pos = cur_pos start_pos = max(start_pos, 0) self.log_file.seek(start_pos) def read_log(self): if self.task_frequency > 0: if self.task_frequency < 10: schedule.every(10).seconds.do(self.taskmanager) else: schedule.every(self.task_frequency).seconds.do(self.taskmanager) schedule.every(2).hours.do(self.remove_expired_db_entries) self.find_game_start() self.game = Game(self.urt_modversion) self.log_file.seek(0, 2) while self.log_file: schedule.run_pending() line = self.log_file.readline() if line: self.parse_line(line) else: if not self.game.live: self.game.go_live() time.sleep(.125) def remove_expired_db_entries(self): curs.execute("DELETE FROM `ban_points` WHERE `expires` < '{}'".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())))) conn.commit() def taskmanager(self): try: with self.players_lock: counter = self.game.get_number_players() for player in self.game.players.itervalues(): player_num = player.get_player_num() if player_num == BOT_PLAYER_NUM: continue player_name = player.get_name() player_admin_role = player.get_admin_role() if self.warn_expiration > 0 and player.get_warning() > 0 and player.get_last_warn_time() and player.get_last_warn_time() + self.warn_expiration < time.time(): player.clear_warning() if player.get_warning() > 2 and player_admin_role < 40: if 'spectator' in player.get_last_warn_msg(): kick_msg = reason = "spectator too long on full server" elif 'ping' in player.get_last_warn_msg(): kick_msg = "ping too high for this server ^7[^4%s^7]" % player.get_ping_value() reason = "fix your ping" elif 'score' in player.get_last_warn_msg(): kick_msg = reason = "score too low for this server" elif 'team killing' in player.get_last_warn_msg(): kick_msg = reason = "team killing over limit" player.add_ban_point('auto-kick for team killing', 600) else: kick_msg = reason = "too many warnings" self.game.rcon_say("^2%s ^7was kicked, %s" % (player_name, kick_msg)) self.game.kick_player(player_num, reason=reason) continue if self.num_kick_specs > 0 and player_admin_role < 20: if 'GTV-' in player_name: continue elif counter > self.num_kick_specs and player.get_team() == 3 and player.get_time_joined() < (time.time() - 30): player.add_warning(warning='spectator too long on full server', timer=False) logger.debug("%s is spectator too long on full server", player_name) warnmsg = "^1WARNING ^7[^3%d^7]: You are spectator too long on full server" % player.get_warning() self.game.rcon_tell(player_num, warnmsg, False) else: player.clear_specific_warning('spectator too long on full server') if self.noob_autokick and player_admin_role < 2 and player.get_ip_address() != '0.0.0.0': kills = player.get_kills() deaths = player.get_deaths() ratio = round(float(kills) / float(deaths), 2) if deaths > 0 else 1.0 if kills > 0 and ratio < 0.33: player.add_warning(warning='score too low for this server', timer=False) logger.debug("Score of %s is too low, ratio: %s", player_name, ratio) warnmsg = "^1WARNING ^7[^3%d^7]: Your score is too low for this server" % player.get_warning() self.game.rcon_tell(player_num, warnmsg, False) else: player.clear_specific_warning('score too low for this server') if player.get_warning() == 3 and player_admin_role < 40: self.game.rcon_say("^1ALERT: ^2%s ^7auto-kick from warnings if not cleared" % player_name) self.check_player_ping() except Exception as err: logger.error(err, exc_info=True) def check_player_ping(self): if self.max_ping > 0: self.game.quake.rcon_update() for player in self.game.quake.players: try: ping_value = player.ping gameplayer = self.game.players[player.num] except KeyError: continue else: if self.max_ping < ping_value < 999 and gameplayer.get_admin_role() < 40: gameplayer.add_high_ping(ping_value) self.game.rcon_tell(player.num, "^1WARNING ^7[^3%d^7]: Your ping is too high [^4%d^7]. ^3The maximum allowed ping is %d." % (gameplayer.get_warning(), ping_value, self.max_ping), False) else: gameplayer.clear_specific_warning('fix your ping') def parse_line(self, string): line = string[7:] tmp = line.split(":", 1) line = tmp[1].strip() if len(tmp) > 1 else tmp[0].strip() option = {'InitGame': self.new_game, 'Warmup': self.handle_warmup, 'InitRound': self.handle_initround, 'Exit': self.handle_exit, 'say': self.handle_say, 'sayteam': self.handle_say, 'saytell': self.handle_saytell, 'ClientUserinfo': self.handle_userinfo, 'ClientUserinfoChanged': self.handle_userinfo_changed, 'ClientBegin': self.handle_begin, 'ClientDisconnect': self.handle_disconnect, 'SurvivorWinner': self.handle_teams_ts_mode, 'Kill': self.handle_kill, 'Hit': self.handle_hit, 'Freeze': self.handle_freeze, 'ThawOutFinished': self.handle_thawout, 'ClientSpawn': self.handle_spawn, 'Flag': self.handle_flag, 'FlagCaptureTime': self.handle_flagcapturetime, 'VotePassed': self.handle_vote_passed, 'VoteFailed': self.handle_vote_failed, 'Callvote': self.handle_callvote} try: action = tmp[0].strip() if action in option: option[action](line) elif 'Bomb' in action: self.handle_bomb(line) elif 'Pop' in action: self.handle_bomb_exploded() except (IndexError, KeyError): pass except Exception as err: logger.error(err, exc_info=True) def explode_line(self, line): arr = line.lstrip().lstrip('\\').split('\\') key = True key_val = None values = {} for item in arr: if key: key_val = item key = False else: values[key_val.rstrip()] = item.rstrip() key_val = None key = True return values def handle_vote_passed(self, line): if "g_nextmap" in line: self.game.next_mapname = line.split("g_nextmap")[-1].strip('"').strip() self.game.rcon_say("^7Vote to set next map to '%s' ^2passed" % self.game.next_mapname) self.allow_nextmap_vote = False elif "cyclemap" in line: self.game.rcon_say("^7Vote to cycle map ^2passed") elif "clientkickreason" in line: self.game.rcon_say("^7Vote to kick %s ^2passed" % self.game.players[int(line.split('"clientkickreason "')[-1].strip('"'))].get_name()) def handle_vote_failed(self, line): if "g_nextmap" in line: self.game.rcon_say("^7Vote to set next map to '%s' ^1failed" % line.split("g_nextmap")[-1].strip('"').strip()) if self.vote_delay: self.failed_vote_timer = time.time() + self.vote_delay elif "cyclemap" in line: self.game.rcon_say("^7Vote to cycle map ^1failed") elif "clientkickreason" in line: self.game.rcon_say("^7Vote to kick %s ^1failed" % self.game.players[int(line.split('"clientkickreason "')[-1].strip('"'))].get_name()) def handle_callvote(self, line): if "g_nextmap" in line: self.last_vote = "nextmap" elif "cyclemap" in line: self.last_vote = "cyclemap" elif "clientkickreason" in line: self.last_vote = "kick" spam_msg = True now = time.time() if "g_nextmap" in line and self.limit_nextmap_votes and not self.allow_nextmap_vote: self.game.send_rcon('veto') self.game.rcon_say("^7Voting for Next Map is disabled until the end of this map") spam_msg = False if "map" in line and self.failed_vote_timer > now: remaining_time = int(self.failed_vote_timer - now) self.game.send_rcon('veto') self.game.rcon_say("^7Map voting is disabled for ^2%d ^7seconds" % remaining_time) if spam_msg: self.game.rcon_bigtext("^7Press ^2F1 ^7or ^1F2 ^7to vote!") if self.game.get_last_maps() and ('"g_nextmap' in line or '"map' in line): self.game.rcon_say("^7Last Maps: ^3%s" % ", ".join(self.game.get_last_maps())) def new_game(self, line): self.ffa_lms_gametype = True if ('g_gametype\\0\\' in line or 'g_gametype\\1\\' in line or 'g_gametype\\9\\' in line or 'g_gametype\\11\\' in line) else False self.ctf_gametype = True if 'g_gametype\\7\\' in line else False self.ts_gametype = True if ('g_gametype\\4\\' in line or 'g_gametype\\5\\' in line) else False self.tdm_gametype = True if 'g_gametype\\3\\' in line else False self.bomb_gametype = True if 'g_gametype\\8\\' in line else False self.freeze_gametype = True if 'g_gametype\\10\\' in line else False logger.debug("InitGame: Starting game...") self.game.rcon_clear() self.stats_reset() self.game.set_current_map() self.game.set_all_maps() if self.support_lowgravity: self.game.send_rcon("set g_gravity %d" % self.gravity) if self.bomb_gametype: detonation_timer = self.game.get_cvar('g_bombexplodetime') self.explode_time = detonation_timer or "40" self.last_disconnected_player = None self.allow_nextmap_vote = True self.failed_vote_timer = 0 def handle_spawn(self, line): player_num = int(line) with self.players_lock: self.game.players[player_num].set_alive(True) def handle_flagcapturetime(self, line): tmp = line.split(": ", 1) player_num = int(tmp[0]) action = tmp[1] if action.isdigit(): cap_time = round(float(action) / 1000, 2) logger.debug("Player %d captured the flag in %s seconds", player_num, cap_time) with self.players_lock: self.game.players[player_num].set_flag_capture_time(cap_time) def handle_warmup(self, line): logger.debug("Warmup... %s", line) self.allow_cmd_teams = True def handle_initround(self, _): logger.debug("InitRound: Round started...") if self.ctf_gametype: with self.players_lock: for player in self.game.players.itervalues(): player.reset_flag_stats() elif self.ts_gametype or self.bomb_gametype or self.freeze_gametype: if self.allow_cmd_teams_round_end: self.allow_cmd_teams = False
MIT License
openstack/zun
zun/criapi/api_pb2_grpc.py
ImageServiceServicer.RemoveImage
python
def RemoveImage(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
RemoveImage removes the image. This call is idempotent, and must not return an error if the image has already been removed.
https://github.com/openstack/zun/blob/7ed094696b75d2971d1a6d467bb95e2a641ad9ae/zun/criapi/api_pb2_grpc.py#L507-L514
import grpc from zun.criapi import api_pb2 as zun_dot_criapi_dot_api__pb2 class RuntimeServiceStub(object): def __init__(self, channel): self.Version = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/Version', request_serializer=zun_dot_criapi_dot_api__pb2.VersionRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.VersionResponse.FromString, ) self.RunPodSandbox = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/RunPodSandbox', request_serializer=zun_dot_criapi_dot_api__pb2.RunPodSandboxRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.RunPodSandboxResponse.FromString, ) self.StopPodSandbox = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/StopPodSandbox', request_serializer=zun_dot_criapi_dot_api__pb2.StopPodSandboxRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.StopPodSandboxResponse.FromString, ) self.RemovePodSandbox = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/RemovePodSandbox', request_serializer=zun_dot_criapi_dot_api__pb2.RemovePodSandboxRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.RemovePodSandboxResponse.FromString, ) self.PodSandboxStatus = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/PodSandboxStatus', request_serializer=zun_dot_criapi_dot_api__pb2.PodSandboxStatusRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.PodSandboxStatusResponse.FromString, ) self.ListPodSandbox = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/ListPodSandbox', request_serializer=zun_dot_criapi_dot_api__pb2.ListPodSandboxRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ListPodSandboxResponse.FromString, ) self.CreateContainer = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/CreateContainer', request_serializer=zun_dot_criapi_dot_api__pb2.CreateContainerRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.CreateContainerResponse.FromString, ) self.StartContainer = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/StartContainer', request_serializer=zun_dot_criapi_dot_api__pb2.StartContainerRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.StartContainerResponse.FromString, ) self.StopContainer = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/StopContainer', request_serializer=zun_dot_criapi_dot_api__pb2.StopContainerRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.StopContainerResponse.FromString, ) self.RemoveContainer = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/RemoveContainer', request_serializer=zun_dot_criapi_dot_api__pb2.RemoveContainerRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.RemoveContainerResponse.FromString, ) self.ListContainers = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/ListContainers', request_serializer=zun_dot_criapi_dot_api__pb2.ListContainersRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ListContainersResponse.FromString, ) self.ContainerStatus = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/ContainerStatus', request_serializer=zun_dot_criapi_dot_api__pb2.ContainerStatusRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ContainerStatusResponse.FromString, ) self.UpdateContainerResources = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/UpdateContainerResources', request_serializer=zun_dot_criapi_dot_api__pb2.UpdateContainerResourcesRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.UpdateContainerResourcesResponse.FromString, ) self.ReopenContainerLog = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/ReopenContainerLog', request_serializer=zun_dot_criapi_dot_api__pb2.ReopenContainerLogRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ReopenContainerLogResponse.FromString, ) self.ExecSync = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/ExecSync', request_serializer=zun_dot_criapi_dot_api__pb2.ExecSyncRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ExecSyncResponse.FromString, ) self.Exec = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/Exec', request_serializer=zun_dot_criapi_dot_api__pb2.ExecRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ExecResponse.FromString, ) self.Attach = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/Attach', request_serializer=zun_dot_criapi_dot_api__pb2.AttachRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.AttachResponse.FromString, ) self.PortForward = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/PortForward', request_serializer=zun_dot_criapi_dot_api__pb2.PortForwardRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.PortForwardResponse.FromString, ) self.ContainerStats = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/ContainerStats', request_serializer=zun_dot_criapi_dot_api__pb2.ContainerStatsRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ContainerStatsResponse.FromString, ) self.ListContainerStats = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/ListContainerStats', request_serializer=zun_dot_criapi_dot_api__pb2.ListContainerStatsRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ListContainerStatsResponse.FromString, ) self.UpdateRuntimeConfig = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/UpdateRuntimeConfig', request_serializer=zun_dot_criapi_dot_api__pb2.UpdateRuntimeConfigRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.UpdateRuntimeConfigResponse.FromString, ) self.Status = channel.unary_unary( '/runtime.v1alpha2.RuntimeService/Status', request_serializer=zun_dot_criapi_dot_api__pb2.StatusRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.StatusResponse.FromString, ) class RuntimeServiceServicer(object): def Version(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RunPodSandbox(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def StopPodSandbox(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RemovePodSandbox(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def PodSandboxStatus(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListPodSandbox(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CreateContainer(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def StartContainer(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def StopContainer(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RemoveContainer(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListContainers(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ContainerStatus(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateContainerResources(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ReopenContainerLog(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ExecSync(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Exec(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Attach(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def PortForward(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ContainerStats(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListContainerStats(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateRuntimeConfig(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Status(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_RuntimeServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'Version': grpc.unary_unary_rpc_method_handler( servicer.Version, request_deserializer=zun_dot_criapi_dot_api__pb2.VersionRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.VersionResponse.SerializeToString, ), 'RunPodSandbox': grpc.unary_unary_rpc_method_handler( servicer.RunPodSandbox, request_deserializer=zun_dot_criapi_dot_api__pb2.RunPodSandboxRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.RunPodSandboxResponse.SerializeToString, ), 'StopPodSandbox': grpc.unary_unary_rpc_method_handler( servicer.StopPodSandbox, request_deserializer=zun_dot_criapi_dot_api__pb2.StopPodSandboxRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.StopPodSandboxResponse.SerializeToString, ), 'RemovePodSandbox': grpc.unary_unary_rpc_method_handler( servicer.RemovePodSandbox, request_deserializer=zun_dot_criapi_dot_api__pb2.RemovePodSandboxRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.RemovePodSandboxResponse.SerializeToString, ), 'PodSandboxStatus': grpc.unary_unary_rpc_method_handler( servicer.PodSandboxStatus, request_deserializer=zun_dot_criapi_dot_api__pb2.PodSandboxStatusRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.PodSandboxStatusResponse.SerializeToString, ), 'ListPodSandbox': grpc.unary_unary_rpc_method_handler( servicer.ListPodSandbox, request_deserializer=zun_dot_criapi_dot_api__pb2.ListPodSandboxRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.ListPodSandboxResponse.SerializeToString, ), 'CreateContainer': grpc.unary_unary_rpc_method_handler( servicer.CreateContainer, request_deserializer=zun_dot_criapi_dot_api__pb2.CreateContainerRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.CreateContainerResponse.SerializeToString, ), 'StartContainer': grpc.unary_unary_rpc_method_handler( servicer.StartContainer, request_deserializer=zun_dot_criapi_dot_api__pb2.StartContainerRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.StartContainerResponse.SerializeToString, ), 'StopContainer': grpc.unary_unary_rpc_method_handler( servicer.StopContainer, request_deserializer=zun_dot_criapi_dot_api__pb2.StopContainerRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.StopContainerResponse.SerializeToString, ), 'RemoveContainer': grpc.unary_unary_rpc_method_handler( servicer.RemoveContainer, request_deserializer=zun_dot_criapi_dot_api__pb2.RemoveContainerRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.RemoveContainerResponse.SerializeToString, ), 'ListContainers': grpc.unary_unary_rpc_method_handler( servicer.ListContainers, request_deserializer=zun_dot_criapi_dot_api__pb2.ListContainersRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.ListContainersResponse.SerializeToString, ), 'ContainerStatus': grpc.unary_unary_rpc_method_handler( servicer.ContainerStatus, request_deserializer=zun_dot_criapi_dot_api__pb2.ContainerStatusRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.ContainerStatusResponse.SerializeToString, ), 'UpdateContainerResources': grpc.unary_unary_rpc_method_handler( servicer.UpdateContainerResources, request_deserializer=zun_dot_criapi_dot_api__pb2.UpdateContainerResourcesRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.UpdateContainerResourcesResponse.SerializeToString, ), 'ReopenContainerLog': grpc.unary_unary_rpc_method_handler( servicer.ReopenContainerLog, request_deserializer=zun_dot_criapi_dot_api__pb2.ReopenContainerLogRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.ReopenContainerLogResponse.SerializeToString, ), 'ExecSync': grpc.unary_unary_rpc_method_handler( servicer.ExecSync, request_deserializer=zun_dot_criapi_dot_api__pb2.ExecSyncRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.ExecSyncResponse.SerializeToString, ), 'Exec': grpc.unary_unary_rpc_method_handler( servicer.Exec, request_deserializer=zun_dot_criapi_dot_api__pb2.ExecRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.ExecResponse.SerializeToString, ), 'Attach': grpc.unary_unary_rpc_method_handler( servicer.Attach, request_deserializer=zun_dot_criapi_dot_api__pb2.AttachRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.AttachResponse.SerializeToString, ), 'PortForward': grpc.unary_unary_rpc_method_handler( servicer.PortForward, request_deserializer=zun_dot_criapi_dot_api__pb2.PortForwardRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.PortForwardResponse.SerializeToString, ), 'ContainerStats': grpc.unary_unary_rpc_method_handler( servicer.ContainerStats, request_deserializer=zun_dot_criapi_dot_api__pb2.ContainerStatsRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.ContainerStatsResponse.SerializeToString, ), 'ListContainerStats': grpc.unary_unary_rpc_method_handler( servicer.ListContainerStats, request_deserializer=zun_dot_criapi_dot_api__pb2.ListContainerStatsRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.ListContainerStatsResponse.SerializeToString, ), 'UpdateRuntimeConfig': grpc.unary_unary_rpc_method_handler( servicer.UpdateRuntimeConfig, request_deserializer=zun_dot_criapi_dot_api__pb2.UpdateRuntimeConfigRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.UpdateRuntimeConfigResponse.SerializeToString, ), 'Status': grpc.unary_unary_rpc_method_handler( servicer.Status, request_deserializer=zun_dot_criapi_dot_api__pb2.StatusRequest.FromString, response_serializer=zun_dot_criapi_dot_api__pb2.StatusResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'runtime.v1alpha2.RuntimeService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) class ImageServiceStub(object): def __init__(self, channel): self.ListImages = channel.unary_unary( '/runtime.v1alpha2.ImageService/ListImages', request_serializer=zun_dot_criapi_dot_api__pb2.ListImagesRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ListImagesResponse.FromString, ) self.ImageStatus = channel.unary_unary( '/runtime.v1alpha2.ImageService/ImageStatus', request_serializer=zun_dot_criapi_dot_api__pb2.ImageStatusRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ImageStatusResponse.FromString, ) self.PullImage = channel.unary_unary( '/runtime.v1alpha2.ImageService/PullImage', request_serializer=zun_dot_criapi_dot_api__pb2.PullImageRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.PullImageResponse.FromString, ) self.RemoveImage = channel.unary_unary( '/runtime.v1alpha2.ImageService/RemoveImage', request_serializer=zun_dot_criapi_dot_api__pb2.RemoveImageRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.RemoveImageResponse.FromString, ) self.ImageFsInfo = channel.unary_unary( '/runtime.v1alpha2.ImageService/ImageFsInfo', request_serializer=zun_dot_criapi_dot_api__pb2.ImageFsInfoRequest.SerializeToString, response_deserializer=zun_dot_criapi_dot_api__pb2.ImageFsInfoResponse.FromString, ) class ImageServiceServicer(object): def ListImages(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ImageStatus(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def PullImage(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
Apache License 2.0
googlecloudplatform/gcs-oauth2-boto-plugin
gcs_oauth2_boto_plugin/test_oauth2_client.py
OAuth2AccountClientTest._RunGetAccessTokenTest
python
def _RunGetAccessTokenTest(self, expected_rapt=None): access_token_1 = 'abc123' self.assertFalse(self.client.fetched_token) token_1 = self.client.GetAccessToken() self.assertTrue(self.client.fetched_token) self.assertEqual(access_token_1, token_1.token) self.assertEqual(self.start_time + datetime.timedelta(minutes=60), token_1.expiry) self.assertEqual(token_1.rapt_token, expected_rapt) self.client.Reset() self.mock_datetime.mock_now = ( self.start_time + datetime.timedelta(minutes=55)) token_2 = self.client.GetAccessToken() self.assertEqual(token_1, token_2) self.assertEqual(access_token_1, token_2.token) self.assertFalse(self.client.fetched_token) self.client.Reset() self.mock_datetime.mock_now = ( self.start_time + datetime.timedelta(minutes=55, seconds=1)) self.client.datetime_strategy = self.mock_datetime token_3 = self.client.GetAccessToken() self.assertTrue(self.client.fetched_token) self.assertEqual( self.mock_datetime.mock_now + datetime.timedelta(minutes=60), token_3.expiry) self.assertEqual(token_3.rapt_token, expected_rapt)
Tests access token gets with self.client.
https://github.com/googlecloudplatform/gcs-oauth2-boto-plugin/blob/b749ea3424c26145277dc3c3987220891324023f/gcs_oauth2_boto_plugin/test_oauth2_client.py#L136-L175
from __future__ import absolute_import import datetime import logging import os import stat import sys import unittest from freezegun import freeze_time from gcs_oauth2_boto_plugin import oauth2_client import httplib2 try: from unittest import mock except ImportError: import mock LOG = logging.getLogger('test_oauth2_client') ACCESS_TOKEN = 'abc123' RAPT_TOKEN = 'rapt123' TOKEN_URI = 'https://provider.example.com/oauth/provider?mode=token' AUTH_URI = 'https://provider.example.com/oauth/provider?mode=authorize' DEFAULT_CA_CERTS_FILE = os.path.abspath( os.path.join('gslib', 'data', 'cacerts.txt')) IS_WINDOWS = 'win32' in str(sys.platform).lower() class MockDateTime(object): def __init__(self): self.mock_now = None def utcnow(self): return self.mock_now class MockOAuth2ServiceAccountClient(oauth2_client.OAuth2ServiceAccountClient): def __init__(self, client_id, private_key, password, auth_uri, token_uri, datetime_strategy): super(MockOAuth2ServiceAccountClient, self).__init__( client_id, private_key, password, auth_uri=auth_uri, token_uri=token_uri, datetime_strategy=datetime_strategy, ca_certs_file=DEFAULT_CA_CERTS_FILE) self.Reset() def Reset(self): self.fetched_token = False def FetchAccessToken(self, rapt_token=None): self.fetched_token = True return oauth2_client.AccessToken( ACCESS_TOKEN, GetExpiry(self.datetime_strategy, 3600), datetime_strategy=self.datetime_strategy, rapt_token=None) class MockOAuth2UserAccountClient(oauth2_client.OAuth2UserAccountClient): def __init__(self, token_uri, client_id, client_secret, refresh_token, auth_uri, datetime_strategy): super(MockOAuth2UserAccountClient, self).__init__( token_uri, client_id, client_secret, refresh_token, auth_uri=auth_uri, datetime_strategy=datetime_strategy, ca_certs_file=DEFAULT_CA_CERTS_FILE) self.Reset() def Reset(self): self.fetched_token = False def FetchAccessToken(self, rapt_token=None): self.fetched_token = True return oauth2_client.AccessToken( ACCESS_TOKEN, GetExpiry(self.datetime_strategy, 3600), datetime_strategy=self.datetime_strategy, rapt_token=RAPT_TOKEN if rapt_token is None else rapt_token) def GetExpiry(datetime_strategy, length_in_seconds): token_expiry = (datetime_strategy.utcnow() + datetime.timedelta(seconds=length_in_seconds)) return token_expiry def CreateMockUserAccountClient(mock_datetime): return MockOAuth2UserAccountClient( TOKEN_URI, 'clid', 'clsecret', 'ref_token_abc123', AUTH_URI, mock_datetime) def CreateMockServiceAccountClient(mock_datetime): return MockOAuth2ServiceAccountClient( 'clid', 'private_key', 'password', AUTH_URI, TOKEN_URI, mock_datetime) class OAuth2AccountClientTest(unittest.TestCase): def setUp(self): self.tempdirs = [] self.mock_datetime = MockDateTime() self.start_time = datetime.datetime(2011, 3, 1, 11, 25, 13, 300826) self.mock_datetime.mock_now = self.start_time def testGetAccessTokenUserAccount(self): self.client = CreateMockUserAccountClient(self.mock_datetime) self._RunGetAccessTokenTest(expected_rapt=RAPT_TOKEN) def testGetAccessTokenServiceAccount(self): self.client = CreateMockServiceAccountClient(self.mock_datetime) self._RunGetAccessTokenTest()
Apache License 2.0
ibm/clai
clai/server/orchestration/patterns/max_orchestrator/max_orchestrator.py
MaxOrchestrator.choose_action
python
def choose_action(self, command: State, agent_names: List[str], candidate_actions: Optional[List[Union[Action, List[Action]]]], force_response: bool, pre_post_state: str) -> Optional[Action]: if not candidate_actions: return None confs = [self.__calculate_confidence__(action) for action in candidate_actions] idx_maxconf = np.argmax(confs) max_conf = confs[idx_maxconf] selected_candidate = candidate_actions[idx_maxconf] if force_response: return selected_candidate if max_conf >= self.threshold: return selected_candidate return None
Choose an action for CLAI to respond with
https://github.com/ibm/clai/blob/3215e3676b4a0857a56a1e126a052f089be5ff03/clai/server/orchestration/patterns/max_orchestrator/max_orchestrator.py#L37-L57
from typing import Optional, List, Union from pathlib import Path import os import json import numpy as np from clai.server.orchestration.orchestrator import Orchestrator from clai.server.command_message import State, Action class MaxOrchestrator(Orchestrator): def __init__(self): super(MaxOrchestrator, self).__init__() self._config_path = os.path.join( Path(__file__).parent.absolute(), 'config.json' ) self.__read_config__() def __read_config__(self): with open(self._config_path, 'r') as fileobj: config = json.load(fileobj) self.threshold = config['threshold']
MIT License
shiyuechengineer/meraki-dashboard
meraki/aio/api/switch.py
AsyncSwitch.updateDeviceSwitchRoutingInterface
python
def updateDeviceSwitchRoutingInterface(self, serial: str, interfaceId: str, **kwargs): kwargs.update(locals()) if 'multicastRouting' in kwargs: options = ['disabled', 'enabled', 'IGMP snooping querier'] assert kwargs['multicastRouting'] in options, f'''"multicastRouting" cannot be "{kwargs['multicastRouting']}", & must be set to one of: {options}''' metadata = { 'tags': ['switch', 'configure', 'routing', 'interfaces'], 'operation': 'updateDeviceSwitchRoutingInterface' } resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}' body_params = ['name', 'subnet', 'interfaceIp', 'multicastRouting', 'vlanId', 'ospfSettings', ] payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params} return self._session.put(metadata, resource, payload)
**Update a layer 3 interface for a switch** https://developer.cisco.com/meraki/api-v1/#!update-device-switch-routing-interface - serial (string): (required) - interfaceId (string): (required) - name (string): A friendly name or description for the interface or VLAN. - subnet (string): The network that this routed interface is on, in CIDR notation (ex. 10.1.1.0/24). - interfaceIp (string): The IP address this switch will use for layer 3 routing on this VLAN or subnet. This cannot be the same as the switch's management IP. - multicastRouting (string): Enable multicast support if, multicast routing between VLANs is required. Options are, 'disabled', 'enabled' or 'IGMP snooping querier'. - vlanId (integer): The VLAN this routed interface is on. VLAN must be between 1 and 4094. - ospfSettings (object): The OSPF routing settings of the interface.
https://github.com/shiyuechengineer/meraki-dashboard/blob/f00442acf762a94e7e446f80a2485d120e7090d5/meraki/aio/api/switch.py#L228-L258
class AsyncSwitch: def __init__(self, session): super().__init__() self._session = session def getDeviceSwitchPorts(self, serial: str): metadata = { 'tags': ['switch', 'configure', 'ports'], 'operation': 'getDeviceSwitchPorts' } resource = f'/devices/{serial}/switch/ports' return self._session.get(metadata, resource) def cycleDeviceSwitchPorts(self, serial: str, ports: list): kwargs = locals() metadata = { 'tags': ['switch', 'liveTools', 'ports'], 'operation': 'cycleDeviceSwitchPorts' } resource = f'/devices/{serial}/switch/ports/cycle' body_params = ['ports', ] payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params} return self._session.post(metadata, resource, payload) def getDeviceSwitchPortsStatuses(self, serial: str, **kwargs): kwargs.update(locals()) metadata = { 'tags': ['switch', 'monitor', 'ports', 'statuses'], 'operation': 'getDeviceSwitchPortsStatuses' } resource = f'/devices/{serial}/switch/ports/statuses' query_params = ['t0', 'timespan', ] params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params} return self._session.get(metadata, resource, params) def getDeviceSwitchPortsStatusesPackets(self, serial: str, **kwargs): kwargs.update(locals()) metadata = { 'tags': ['switch', 'monitor', 'ports', 'statuses', 'packets'], 'operation': 'getDeviceSwitchPortsStatusesPackets' } resource = f'/devices/{serial}/switch/ports/statuses/packets' query_params = ['t0', 'timespan', ] params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params} return self._session.get(metadata, resource, params) def getDeviceSwitchPort(self, serial: str, portId: str): metadata = { 'tags': ['switch', 'configure', 'ports'], 'operation': 'getDeviceSwitchPort' } resource = f'/devices/{serial}/switch/ports/{portId}' return self._session.get(metadata, resource) def updateDeviceSwitchPort(self, serial: str, portId: str, **kwargs): kwargs.update(locals()) if 'type' in kwargs: options = ['trunk', 'access'] assert kwargs['type'] in options, f'''"type" cannot be "{kwargs['type']}", & must be set to one of: {options}''' if 'stpGuard' in kwargs: options = ['disabled', 'root guard', 'bpdu guard', 'loop guard'] assert kwargs['stpGuard'] in options, f'''"stpGuard" cannot be "{kwargs['stpGuard']}", & must be set to one of: {options}''' if 'udld' in kwargs: options = ['Alert only', 'Enforce'] assert kwargs['udld'] in options, f'''"udld" cannot be "{kwargs['udld']}", & must be set to one of: {options}''' if 'accessPolicyType' in kwargs: options = ['Open', 'Custom access policy', 'MAC allow list', 'Sticky MAC allow list'] assert kwargs['accessPolicyType'] in options, f'''"accessPolicyType" cannot be "{kwargs['accessPolicyType']}", & must be set to one of: {options}''' metadata = { 'tags': ['switch', 'configure', 'ports'], 'operation': 'updateDeviceSwitchPort' } resource = f'/devices/{serial}/switch/ports/{portId}' body_params = ['name', 'tags', 'enabled', 'type', 'vlan', 'voiceVlan', 'allowedVlans', 'poeEnabled', 'isolationEnabled', 'rstpEnabled', 'stpGuard', 'linkNegotiation', 'portScheduleId', 'udld', 'accessPolicyType', 'accessPolicyNumber', 'macAllowList', 'stickyMacAllowList', 'stickyMacAllowListLimit', 'stormControlEnabled', 'flexibleStackingEnabled', ] payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params} return self._session.put(metadata, resource, payload) def getDeviceSwitchRoutingInterfaces(self, serial: str): metadata = { 'tags': ['switch', 'configure', 'routing', 'interfaces'], 'operation': 'getDeviceSwitchRoutingInterfaces' } resource = f'/devices/{serial}/switch/routing/interfaces' return self._session.get(metadata, resource) def createDeviceSwitchRoutingInterface(self, serial: str, name: str, interfaceIp: str, vlanId: int, **kwargs): kwargs.update(locals()) if 'multicastRouting' in kwargs: options = ['disabled', 'enabled', 'IGMP snooping querier'] assert kwargs['multicastRouting'] in options, f'''"multicastRouting" cannot be "{kwargs['multicastRouting']}", & must be set to one of: {options}''' metadata = { 'tags': ['switch', 'configure', 'routing', 'interfaces'], 'operation': 'createDeviceSwitchRoutingInterface' } resource = f'/devices/{serial}/switch/routing/interfaces' body_params = ['name', 'subnet', 'interfaceIp', 'multicastRouting', 'vlanId', 'defaultGateway', 'ospfSettings', ] payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params} return self._session.post(metadata, resource, payload) def getDeviceSwitchRoutingInterface(self, serial: str, interfaceId: str): metadata = { 'tags': ['switch', 'configure', 'routing', 'interfaces'], 'operation': 'getDeviceSwitchRoutingInterface' } resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}' return self._session.get(metadata, resource)
MIT License
thesis/chain-bitcoin-python
chain_bitcoin/endpoints.py
get_block_by_height
python
def get_block_by_height(height, config=default_config, **kw): return _get_block(str(height), config, **kw)
https://chain.com/docs/v1/curl/#bitcoin-block Arguments: height: integer A block height. Return: ``Block``
https://github.com/thesis/chain-bitcoin-python/blob/a242a538c6905467d2342183e8d20558c492dcd2/chain_bitcoin/endpoints.py#L304-L315
from __future__ import absolute_import __all__ = [] import functools import json from . import urls from .config import * from .exceptions import * from .map_list import * from .models import * base_url = 'https://api.chain.com' make_url = functools.partial(urls.make_url, base_url) json_header = {'content-type': 'application/json'} def json_args(x): return dict(data=json.dumps(x), headers=json_header) def parse_response_data(response): x = response.json() if 'error' in x: raise ChainError(x['error']) return x def endpoint(f): __all__.append(f.__name__) return f @endpoint def get_address(address, config=default_config, **kw): config = config.replace(**kw) require_api_key_id(config) requests = config_to_requests(config) url = make_url(['v1', config.block_chain, 'addresses', address]) response_data = parse_response_data(requests.get(url)) return Address.from_dict(response_data) @endpoint def get_addresses(addresses, config=default_config, **kw): config = config.replace(**kw) require_api_key_id(config) requests = config_to_requests(config) url = make_url( ['v1', config.block_chain, 'addresses', ','.join(addresses)] ) response_data = parse_response_data(requests.get(url)) return map_list(Address.from_dict, response_data) @endpoint def get_address_transactions(address, limit=None, config=default_config, **kw): config = config.replace(**kw) require_api_key_id(config) requests = config_to_requests(config) url = make_url( ['v1', config.block_chain, 'addresses', address, 'transactions'], [('limit', limit)] ) response_data = parse_response_data(requests.get(url)) return map_list(Transaction.from_dict, response_data) @endpoint def get_addresses_transactions(addresses, limit=None, config=default_config, **kw): config = config.replace(**kw) require_api_key_id(config) requests = config_to_requests(config) url = make_url( ['v1', config.block_chain, 'addresses', ','.join(addresses), 'transactions'], [('limit', limit)] ) response_data = parse_response_data(requests.get(url)) return map_list(Transaction.from_dict, response_data) @endpoint def get_address_unspents(address, limit=None, config=default_config, **kw): config = config.replace(**kw) require_api_key_id(config) requests = config_to_requests(config) url = make_url( ['v1', config.block_chain, 'addresses', address, 'unspents'], [('limit', limit)] ) response_data = parse_response_data(requests.get(url)) return map_list(Output.from_dict, response_data) @endpoint def get_addresses_unspents(addresses, limit=None, config=default_config, **kw): config = config.replace(**kw) require_api_key_id(config) requests = config_to_requests(config) url = make_url( ['v1', config.block_chain, 'addresses', ','.join(addresses), 'unspents'], [('limit', limit)] ) response_data = parse_response_data(requests.get(url)) return map_list(Output.from_dict, response_data) @endpoint def get_address_op_returns(address, config=default_config, **kw): config = config.replace(**kw) require_api_key_id(config) requests = config_to_requests(config) url = make_url( ['v1', config.block_chain, 'addresses', address, 'op-returns'] ) response_data = parse_response_data(requests.get(url)) return map_list(OpReturn.from_dict, response_data) @endpoint def get_transaction(transaction_hash, config=default_config, **kw): config = config.replace(**kw) require_api_key_id(config) requests = config_to_requests(config) url = make_url(['v1', config.block_chain, 'transactions', transaction_hash]) response_data = parse_response_data(requests.get(url)) return Transaction.from_dict(response_data) @endpoint def get_transaction_op_return(transaction_hash, config=default_config, **kw): config = config.replace(**kw) require_api_key_id(config) requests = config_to_requests(config) url = make_url(['v1', config.block_chain, 'transactions', transaction_hash, 'op-return']) response_data = parse_response_data(requests.get(url)) return OpReturn.from_dict(response_data) @endpoint def send_transaction(hex, config=default_config, **kw): config = config.replace(**kw) require_api_key_secret(config) requests = config_to_requests(config) url = make_url(['v1', config.block_chain, 'transactions']) request_data = {'hex': hex} response = requests.put(url, **json_args(request_data)) response_data = parse_response_data(response) return SendTransactionResult.from_dict(response_data) def _get_block(x, config=default_config, **kw): config = config.replace(**kw) require_api_key_id(config) requests = config_to_requests(config) url = make_url(['v1', config.block_chain, 'blocks', x]) response_data = parse_response_data(requests.get(url)) return Block.from_dict(response_data) @endpoint def get_block_by_hash(block_hash, config=default_config, **kw): return _get_block(block_hash, config, **kw) @endpoint
MIT License
mlinderm/npsv
paper/variant_stats.py
make_argument_parser
python
def make_argument_parser(): parser = argparse.ArgumentParser( __file__, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Compute summary statistics from Truvari HG002 tp-call.vcf", ) parser.add_argument( "--sample", help="Sample name", type=str, default="HG002", ) subparsers = parser.add_subparsers(dest="command", help="Sub-command help") parser_gq = subparsers.add_parser("gq", help="Concordance by GQ bucket") parser_vntr = subparsers.add_parser("vntr", help="Error enrichment by VNTR label") parser_len = subparsers.add_parser("len", help="Concordance by SVLEN") parser_list = subparsers.add_parser("list", help="List of all variants and metadata") parser_list = subparsers.add_parser("vntr-conc", help="Concordnace by VNTR label") parser.add_argument("vcf", metavar="TP-VCF", type=str, help="Truvari 'tp-call.vcf' file") return parser
Construct argument parser
https://github.com/mlinderm/npsv/blob/02a17f7c889e345a3d2af67ec26f0492c308cf73/paper/variant_stats.py#L10-L35
import argparse, collections, sys import numpy as np import pandas as pd import vcf import pybedtools.bedtool as bed from npsv.variant import variant_descriptor
MIT License
jwlodek/py_cui
py_cui/grid.py
Grid.get_cell_dimensions
python
def get_cell_dimensions(self): return self._row_height, self._column_width
Gets size in characters of single (row, column) cell location Returns ------- row_height : int height of row in characters column_width : int width of column in characters
https://github.com/jwlodek/py_cui/blob/e5f86515bb12929073ec62d27f061092d8040c23/py_cui/grid.py#L99-L110
import py_cui class Grid: def __init__(self, num_rows, num_columns, height, width, logger): self._num_rows = num_rows self._num_columns = num_columns self._height = height self._width = width self._offset_x = self._width % self._num_columns - 1 self._offset_y = self._height % self._num_rows - 1 self._row_height = int(self._height / self._num_rows) self._column_width = int(self._width / self._num_columns) self._logger = logger def get_dimensions(self): return self._num_rows, self._num_columns def get_dimensions_absolute(self): return self._height, self._width def get_offsets(self): return self._offset_x, self._offset_y
BSD 3-Clause New or Revised License
nitely/python-react-v8
react/react.py
React.as_json
python
def as_json(self): return self.to_json(self.opts)
Serialize :py:attr:`.opts` into a json.\ This is used internally and can\ be overridden to provide a faster\ json serializer. :return: Opts in json format :rtype: str
https://github.com/nitely/python-react-v8/blob/a47ec7156d302b208753e0e8df028406bd04e92a/react/react.py#L80-L90
from __future__ import unicode_literals import json import six from . import utils __all__ = [ 'set_up', 'React'] _DEFINE_GLOBALS_SCRIPT = ( 'var global = global || this,' 'console = console || {' ' warn: function(){},' ' error: function(){},' ' log: function(){},' ' info: function(){}};') def set_up(): utils.set_up() utils.run_script(_DEFINE_GLOBALS_SCRIPT) class React: def __init__(self, opts): self.opts = opts def build_js_script(self): return ('global.RenderToString({})' .format(self.as_json())) def render(self): return utils.run_script(self.build_js_script())
MIT License
shsiders/oclubs
oclubs/blueprints/clubblueprint.py
adjust_status_all_free_join
python
def adjust_status_all_free_join(club_type): if club_type == 'all': clubs = Club.allclubs(active_only=False) elif club_type == '11-12': clubs = Club.allclubs(active_only=False, grade_limit=[11, 12]) elif club_type == '9-10': clubs = Club.allclubs(active_only=False, grade_limit=[9, 10]) else: abort(404) clubs = filter(lambda c: c.reactivate, clubs) for club in clubs: club.joinmode = ClubJoinMode.FREE_JOIN flash('All clubs are free to join now.', 'adjust_status') return redirect(url_for('.adjust_status', club_type=club_type))
Change all clubs' join mode to free join
https://github.com/shsiders/oclubs/blob/1003f5079fbea1415db1c70e3aabfc2e1afd4236/oclubs/blueprints/clubblueprint.py#L525-L539
from __future__ import absolute_import, unicode_literals, division from datetime import date from flask import ( Blueprint, render_template, url_for, request, redirect, flash, abort ) from flask_login import current_user, login_required, fresh_login_required from oclubs.objs import Activity, User, Club, Upload, FormattedText from oclubs.enums import UserType, ClubType, ClubJoinMode, ActivityTime from oclubs.shared import ( download_xlsx, get_callsign_decorator, special_access_required, render_email_template, Pagination, require_active_club, require_student_membership, require_membership, require_not_student, true_or_fail, form_is_valid, fail ) from oclubs.exceptions import UploadNotSupported, AlreadyExists, NoRow from oclubs.access import siteconfig clubblueprint = Blueprint('clubblueprint', __name__) @clubblueprint.route('/view/<clubfilter:club_filter>') def clublist(club_filter): num = 20 clubs = Club.randomclubs(num, **club_filter.to_kwargs()) info = {} for club in clubs: info[club.name] = club.activities()[0] if club.activities() else None return render_template('club/clublist.html.j2', is_list=True, clubs=clubs, info=info, club_filter=club_filter) @clubblueprint.route('/') def home_redirect(): return redirect(url_for('.clublist', club_filter='all')) @clubblueprint.route('/<club>/') @get_callsign_decorator(Club, 'club') def clubintro(club): free_join = (current_user.is_authenticated and (club.joinmode == ClubJoinMode.FREE_JOIN and current_user.type == UserType.STUDENT and current_user not in club.members)) see_email = (current_user.is_authenticated and (current_user.type == UserType.ADMIN or current_user == club.leader)) is_admin = (current_user.is_authenticated and (current_user.type == UserType.CLASSROOM_ADMIN or current_user.type == UserType.DIRECTOR or current_user.type == UserType.ADMIN)) invite_member = club.joinmode == ClubJoinMode.BY_INVITATION return render_template('club/clubintro.html.j2', free_join=free_join, see_email=see_email, is_admin=is_admin, invite_member=invite_member) @clubblueprint.route('/<club>/introduction/submit', methods=['POST']) @get_callsign_decorator(Club, 'club') @login_required @require_active_club def clubintro_submit(club): true_or_fail(current_user.type == UserType.STUDENT, 'You may not join clubs.', 'join') true_or_fail(club.joinmode == ClubJoinMode.FREE_JOIN, 'You may not join this club.', 'join') if form_is_valid(): try: club.add_member(current_user) except AlreadyExists: fail('You are already in %s.' % club.name, 'join') return redirect(url_for('.clubintro', club=club.callsign)) parameters = {'club': club, 'current_user': current_user} contents = render_email_template('joinclubs', parameters) club.leader.email_user('New Club Member - ' + club.name, contents) club.leader.notify_user('%s has joined %s.' % (current_user.nickname, club.name)) flash('You have successfully joined ' + club.name + '.', 'join') return redirect(url_for('.clubintro', club=club.callsign)) @clubblueprint.route('/<club>/new_leader') @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required @fresh_login_required def newleader(club): return render_template('club/newleader.html.j2') @clubblueprint.route('/<club>/new_leader/submit', methods=['POST']) @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required @fresh_login_required def newleader_submit(club): leader_old = club.leader members_obj = club.members leader_name = request.form['leader'] for member_obj in members_obj: if leader_name == member_obj.passportname: club.leader = member_obj break else: abort(500) for member in club.teacher_and_members: parameters = {'user': member, 'club': club, 'leader_old': leader_old} contents = render_email_template('newleader', parameters) member.email_user('New Leader - ' + club.name, contents) member.notify_user(club.leader.nickname + ' becomes the new leader of ' + club.name + '.') return redirect(url_for('.clubintro', club=club.callsign)) @clubblueprint.route('/<club>/members') @get_callsign_decorator(Club, 'club') @require_membership def memberinfo(club): has_access = (current_user == club.leader or current_user == club.teacher or current_user.type == UserType.ADMIN) return render_template('club/memberinfo.html.j2', has_access=has_access) @clubblueprint.route('/<club>/members/notify_members', methods=['POST']) @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required def memberinfo_notify_members(club): notify_contents = request.form['contents'] if not notify_contents: flash('Contents cannot be blank', 'notify_members') return redirect(url_for('.memberinfo', club=club.callsign)) for member in club.members: member.notify_user(notify_contents) parameters = {'member': member, 'club': club, 'notify_contents': notify_contents} contents = render_email_template('notifymembers', parameters) member.email_user('Notification - ' + club.name, contents) flash('Notification sent.', 'notify_members') return redirect(url_for('.memberinfo', club=club.callsign)) @clubblueprint.route('/<club>/members/download') @get_callsign_decorator(Club, 'club') @require_membership @special_access_required def memberinfo_download(club): info = [] info.append(('Nick Name', 'Class', 'Passport Name', 'Email')) info.extend([(member.nickname, member.grade_and_class, member.passportname, member.email) for member in club.members]) return download_xlsx('Member Info.xlsx', info) @clubblueprint.route('/<club>/edit_info') @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required def changeclubinfo(club): return render_template('club/changeclubinfo.html.j2') @clubblueprint.route('/<club>/edit_info/submit', methods=['GET', 'POST']) @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required def changeclubinfo_submit(club): intro = request.form['intro'] if len(intro) > 90: fail('Your one sentence description is too long.', 'clubinfo') return redirect(url_for('.changeclubinfo', club=club.callsign)) elif request.form['intro'] != '': club.intro = request.form['intro'] desc = request.form['description'].strip() if desc: club.description = FormattedText.handle(current_user, club, request.form['description']) if 'picture' in request.files: file = request.files['picture'] if file.filename != '': try: club.picture = Upload.handle(current_user, club, file) except UploadNotSupported: fail('Please upload the correct file type.', 'clubinfo') return redirect(url_for('.changeclubinfo', club=club.callsign)) teacher_email = request.form['email'] if teacher_email != club.teacher.studentid: club.teacher = User.find_teacher(teacher_email) location = request.form['location'] if location != club.location and location != '': club.location = location for member in club.teacher_and_members: parameters = {'user': member, 'club': club} contents = render_email_template('changeclubinfo', parameters) member.email_user('Change Club Info - ' + club.name, contents) member.notify_user(club.name + '\'s information has been changed.') return redirect(url_for('.clubintro', club=club.callsign)) @clubblueprint.route('/<club>/adjust_member') @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required @fresh_login_required def adjustmember(club): invite_member = club.joinmode == ClubJoinMode.BY_INVITATION return render_template('club/adjustmember.html.j2', invite_member=invite_member) @clubblueprint.route('/<club>/adjust_member/submit', methods=['POST']) @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required @fresh_login_required def adjustmember_submit(club): member = User(request.form['uid']) true_or_fail(current_user != member, 'You cannot expel yourself.', 'expelled') if form_is_valid(): club.remove_member(member) parameters = {'member': member, 'club': club} contents = render_email_template('adjustmember', parameters) member.email_user('Member Adjustment - ' + club.name, contents) member.notify_user('You have been moved out of ' + club.name + '.') flash(member.nickname + ' has been expelled.', 'expelled') return redirect(url_for('.adjustmember', club=club.callsign)) @clubblueprint.route('/<club>/invite_member/submit', methods=['POST']) @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required @fresh_login_required def invitemember(club): true_or_fail(club.joinmode == ClubJoinMode.BY_INVITATION, 'You cannot invite members when the join mode is not ' 'by invitation.', 'invite_member') if form_is_valid(): new_member = User.find_user(request.form['gradeclass'], request.form['gnumber']) if new_member is None: fail('Please input correct user info to invite.', 'invite_member') else: parameters = {'club': club, 'member': new_member} contents = render_email_template('invitemember', parameters) new_member.email_user('Invitation - ' + club.name, contents) if new_member in club.members: fail('%s is already in the club.' % new_member.nickname, 'invite_member') else: club.send_invitation(new_member) flash('The invitation has been sent to %s(%s).' % (new_member.gnumber_id, new_member.nickname), 'invite_member') return redirect(url_for('.adjustmember', club=club.callsign)) @clubblueprint.route('/<club>/activities/', defaults={'page': 1}) @clubblueprint.route('/<club>/activities/<int:page>') @get_callsign_decorator(Club, 'club') def clubactivities(club, page): act_num = 20 count, acts = club.activities(limit=((page-1)*act_num, act_num)) pagination = Pagination(page, act_num, count) return render_template('club/clubact.html.j2', acts=acts, pagination=pagination) @clubblueprint.route('/<club>/photos/') @get_callsign_decorator(Club, 'club') def clubphoto(club): uploads = club.allactphotos() return render_template('club/clubphoto.html.j2', uploads=uploads) @clubblueprint.route('/<club>/new_activity') @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required def newact(club): years = (lambda m: map(lambda n: m + n, range(2)))(date.today().year) return render_template('club/newact.html.j2', years=years) @clubblueprint.route('/<club>/new_activity/submit', methods=['POST']) @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required def newact_submit(club): try: a = Activity.new() a.name = request.form['name'] if not a.name: fail('Please enter the name of the new activity.', 'newact') return redirect(url_for('.newact', club=club.callsign)) a.club = club a.description = FormattedText.handle(current_user, club, request.form['description']) a.post = FormattedText(0) try: actdate = date(int(request.form['year']), int(request.form['month']), int(request.form['day'])) except ValueError: fail('Invalid date.', 'newact') return redirect(url_for('.newact', club=club.callsign)) if actdate < date.today(): fail('Please enter a date not eariler than today.', 'newact') return redirect(url_for('.newact', club=club.callsign)) a.date = actdate time = ActivityTime[request.form['act_type'].upper()] is_other_act = time in [ActivityTime.UNKNOWN, ActivityTime.OTHERS] a.time = time a.location = request.form['location'] time_type = request.form['time_type'] try: cas = int(request.form['cas']) except ValueError: fail('Invalid CAS hours.', 'newact') return redirect(url_for('.newact', club=club.callsign)) if cas < 0: fail('Invalid CAS hours.', 'newact') return redirect(url_for('.newact', club=club.callsign)) if time_type == 'hours': a.cas = cas else: a.cas = cas / 60 a.selections = [] a.reservation = None a.create() flash(a.name + ' has been successfully created.', 'newact') except ValueError: fail('Please input all information to create a new activity.', 'newact') else: for member in club.teacher_and_members: parameters = {'member': member, 'club': club, 'act': a, 'is_other_act': is_other_act} contents = render_email_template('newact', parameters) member.email_user(a.name + ' - ' + club.name, contents) member.notify_user(club.name + ' is going to host ' + a.name + ' on ' + actdate.strftime('%b-%d-%y') + '.') return redirect(url_for('.newact', club=club.callsign)) @clubblueprint.route('/<club>/hongmei_status') @get_callsign_decorator(Club, 'club') @special_access_required def hongmei_status(club): acts = club.activities([ActivityTime.HONGMEI], (False, True)) return render_template('club/hmstatus.html.j2', acts=acts) @clubblueprint.route('/<club>/hongmei_status/download') @get_callsign_decorator(Club, 'club') @special_access_required def hongmei_status_download(club): result = [] result.append(['Date', 'Members']) hongmei = club.activities([ActivityTime.HONGMEI], (False, True)) for each in hongmei: result_each = [] result_each.append(each.date.strftime('%b-%d-%y')) members = each.signup_list() members_result = '' for member in members: if member['consentform'] == 0: consentform = 'No' else: consentform = 'Yes' members_result += member['user'].nickname + ': ' + str(member['user'].phone) + ' (Consent From Handed? ' + consentform + ')\n' result_each.append(members_result) result.append(result_each) return download_xlsx('HongMei Status - ' + club.name + '.xlsx', result) @clubblueprint.route('/<club>/new_hongmei_schedule') @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required def newhm(club): acts = club.activities([ActivityTime.HONGMEI], (False, True)) years = (lambda m: map(lambda n: m + n, range(2)))(date.today().year) return render_template('club/newhm.html.j2', acts=acts, years=years) @clubblueprint.route('/<club>/new_hongmei_schedule/submit', methods=['POST']) @get_callsign_decorator(Club, 'club') @require_active_club @special_access_required def newhm_submit(club): contents = request.form['contents'] try: actdate = date(int(request.form['year']), int(request.form['month']), int(request.form['day'])) except ValueError: fail('Please input valid date to submit.', 'newhm') if contents == '' or actdate < date.today(): fail('Please input contents or correct date to submit.', 'newhm') if form_is_valid(): a = Activity.new() a.name = contents a.club = club a.description = FormattedText.emptytext() a.date = actdate a.time = ActivityTime.HONGMEI a.location = 'HongMei Elementary School' a.cas = 1 a.post = FormattedText.emptytext() a.selections = [] a.create() return redirect(url_for('.newhm', club=club.callsign)) @clubblueprint.route('/adjust_status/', defaults={'club_type': 'all'}) @clubblueprint.route('/adjust_status/<club_type>') @special_access_required @require_not_student def adjust_status(club_type): if club_type == 'all': clubs = Club.allclubs(active_only=False) elif club_type == '11-12': clubs = Club.allclubs(active_only=False, grade_limit=[11, 12]) elif club_type == '9-10': clubs = Club.allclubs(active_only=False, grade_limit=[9, 10]) else: abort(404) clubs = filter(lambda c: c.reactivate, clubs) return render_template('club/adjuststatus.html.j2', clubs=clubs, ClubJoinMode=ClubJoinMode, club_type=club_type) @clubblueprint.route('/adjust_status/submit', defaults={'club_type': 'all'}, methods=['POST']) @clubblueprint.route('/adjust_status/<club_type>/submit', methods=['POST']) @special_access_required @require_not_student def adjust_status_submit(club_type): ids = request.form.getlist('activeness') clubs = map(Club, ids) for club in clubs: if club.is_active: club.is_active = False else: club.is_active = True ids = request.form.getlist('joinmode') clubs = map(Club, ids) for club in clubs: if club.joinmode == ClubJoinMode.FREE_JOIN: club.joinmode = ClubJoinMode.BY_INVITATION elif club.joinmode == ClubJoinMode.BY_INVITATION: club.joinmode = ClubJoinMode.FREE_JOIN flash('The change in clubs\' status has been submitted.', 'adjust_status') return redirect(url_for('.adjust_status', club_type=club_type)) @clubblueprint.route('/adjust_status/all_free_join', defaults={'club_type': 'all'}, methods=['POST']) @clubblueprint.route('/adjust_status/<club_type>/all_free_join', methods=['POST']) @special_access_required @require_not_student
MIT License
facebookresearch/compilergym
examples/actor_critic.py
Policy.forward
python
def forward(self, x): x = F.relu(self.affine1(x)) x = x.add(F.relu(self.affine2(x))) x = x.add(F.relu(self.affine3(x))) x = x.add(F.relu(self.affine4(x))) action_prob = F.softmax(self.action_head(x), dim=-1) state_values = self.value_head(x) return action_prob, state_values
Forward of both actor and critic
https://github.com/facebookresearch/compilergym/blob/00ae8c0d080da4d429f95398be1df01b5d6e7b71/examples/actor_critic.py#L165-L185
import random import statistics from collections import namedtuple from typing import List import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from absl import app, flags from torch.distributions import Categorical import compiler_gym.util.flags.episodes import compiler_gym.util.flags.learning_rate import compiler_gym.util.flags.seed from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags from compiler_gym.util.flags.env_from_flags import env_from_flags from compiler_gym.wrappers import ConstrainedCommandline, TimeLimit flags.DEFINE_list( "flags", [ "-break-crit-edges", "-early-cse-memssa", "-gvn-hoist", "-gvn", "-instcombine", "-instsimplify", "-jump-threading", "-loop-reduce", "-loop-rotate", "-loop-versioning", "-mem2reg", "-newgvn", "-reg2mem", "-simplifycfg", "-sroa", ], "List of optimizatins to explore.", ) flags.DEFINE_integer("episode_len", 5, "Number of transitions per episode.") flags.DEFINE_integer("hidden_size", 64, "Latent vector size.") flags.DEFINE_integer("log_interval", 100, "Episodes per log output.") flags.DEFINE_integer("iterations", 1, "Times to redo entire training.") flags.DEFINE_float("exploration", 0.0, "Rate to explore random transitions.") flags.DEFINE_float("mean_smoothing", 0.95, "Smoothing factor for mean normalization.") flags.DEFINE_float("std_smoothing", 0.4, "Smoothing factor for std dev normalization.") eps = np.finfo(np.float32).eps.item() SavedAction = namedtuple("SavedAction", ["log_prob", "value"]) FLAGS = flags.FLAGS class MovingExponentialAverage: def __init__(self, smoothing_factor): self.smoothing_factor = smoothing_factor self.value = None def next(self, entry): assert entry is not None if self.value is None: self.value = entry else: self.value = ( entry * (1 - self.smoothing_factor) + self.value * self.smoothing_factor ) return self.value class HistoryObservation(gym.ObservationWrapper): def __init__(self, env): super().__init__(env=env) self.observation_space = gym.spaces.Box( low=np.full(len(FLAGS.flags), 0, dtype=np.float32), high=np.full(len(FLAGS.flags), float("inf"), dtype=np.float32), dtype=np.float32, ) def reset(self, *args, **kwargs): self._steps_taken = 0 self._state = np.zeros( (FLAGS.episode_len - 1, self.action_space.n), dtype=np.int32 ) return super().reset(*args, **kwargs) def step(self, action: int): assert self._steps_taken < FLAGS.episode_len if self._steps_taken < FLAGS.episode_len - 1: self._state[self._steps_taken][action] = 1 self._steps_taken += 1 return super().step(action) def observation(self, observation): return self._state class Policy(nn.Module): def __init__(self): super().__init__() self.affine1 = nn.Linear( (FLAGS.episode_len - 1) * len(FLAGS.flags), FLAGS.hidden_size ) self.affine2 = nn.Linear(FLAGS.hidden_size, FLAGS.hidden_size) self.affine3 = nn.Linear(FLAGS.hidden_size, FLAGS.hidden_size) self.affine4 = nn.Linear(FLAGS.hidden_size, FLAGS.hidden_size) self.action_head = nn.Linear(FLAGS.hidden_size, len(FLAGS.flags)) self.value_head = nn.Linear(FLAGS.hidden_size, 1) self.saved_actions: List[SavedAction] = [] self.rewards: List[float] = [] self.moving_mean = MovingExponentialAverage(FLAGS.mean_smoothing) self.moving_std = MovingExponentialAverage(FLAGS.std_smoothing)
MIT License
terrapower/armi
armi/cases/suite.py
CaseSuite.remove
python
def remove(self, case): self._cases.remove(case) case._caseSuite = None
Remove a case from a suite.
https://github.com/terrapower/armi/blob/b4fceeb5c3c7f2feeaa8c9ac05aa635e5f1a15a0/armi/cases/suite.py#L74-L77
import os from typing import Optional, Sequence import traceback import tabulate from armi import runLog from armi import settings from armi.cases import case as armicase from armi.utils import directoryChangers class CaseSuite: def __init__(self, cs): self._cases = list() self.cs = cs def add(self, case): existing = next((c for c in self if case == c), None) if existing is not None: raise ValueError( "CaseSuite already contains case with title `{}`\nFirst case: {}\n" "Second case: {}".format(case.title, existing, case) ) self._cases.append(case) case._caseSuite = self
Apache License 2.0
aiven/myhoard
myhoard/controller.py
Controller._determine_unapplied_remote_binlogs
python
def _determine_unapplied_remote_binlogs(self, stream): missing_checked_key = f"{stream.stream_id}.missing_checked" if self.state["promote_details"].get(missing_checked_key) or self.state["force_promote"]: return already_processed_remote_indexes = set() for key in ["binlogs_to_fetch", "binlogs_to_apply", "binlogs_applying"]: for binlog in self.state["promote_details"].get(key, []): already_processed_remote_indexes.add(binlog["remote_index"]) missing_binlogs = [] missing_gtids = False with mysql_cursor(**self.mysql_client_params) as cursor: for binlog in stream.iterate_remote_binlogs(reverse=True): if binlog["remote_index"] in already_processed_remote_indexes: break if not binlog["gtid_ranges"]: missing_binlogs.insert(0, binlog) else: gtid_str = make_gtid_range_string(binlog["gtid_ranges"]) cursor.execute("SELECT GTID_SUBSET(%s, @@GLOBAL.gtid_executed) AS executed", [gtid_str]) executed = cursor.fetchone()["executed"] if executed: break missing_binlogs.insert(0, binlog) missing_gtids = True binlogs_to_fetch = self.state["promote_details"].get("binlogs_to_fetch", []) if missing_gtids: site = self._get_site_for_stream_id(stream.stream_id) missing_binlogs = [{**binlog, "site": site} for binlog in missing_binlogs] binlogs_to_fetch = binlogs_to_fetch + missing_binlogs self.state_manager.update_state( promote_details={ **self.state["promote_details"], missing_checked_key: True, "binlogs_to_fetch": binlogs_to_fetch, } )
Finds out if given stream contains any remote binlogs that have GTIDs that have not yet been applied locally. Possibly found binlogs are stored in state so that they get downloaded and applied.
https://github.com/aiven/myhoard/blob/e47731cef4cf5ec54c17d471f97c0b30b5cc1684/myhoard/controller.py#L700-L745
import contextlib import datetime import enum import json import logging import math import os import re import threading import time import pymysql from pghoard.rohmu import get_transfer from pghoard.rohmu.compressor import DecompressSink from pghoard.rohmu.encryptor import DecryptSink from .backup_stream import BackupStream from .binlog_scanner import BinlogScanner from .errors import BadRequest from .restore_coordinator import RestoreCoordinator from .state_manager import StateManager from .util import ( are_gtids_in_executed_set, change_master_to, DEFAULT_MYSQL_TIMEOUT, ERR_TIMEOUT, make_gtid_range_string, mysql_cursor, parse_fs_metadata, relay_log_name, ) ERR_CANNOT_CONNECT = 2003 class Controller(threading.Thread): @enum.unique class Mode(str, enum.Enum): active = "active" idle = "idle" observe = "observe" promote = "promote" restore = "restore" BACKUP_REFRESH_INTERVAL_BASE = 120.0 BACKUP_REFRESH_ACTIVE_MULTIPLIER = 10.0 ITERATION_SLEEP = 1.0 def __init__( self, *, backup_settings, backup_sites, binlog_purge_settings, mysql_binlog_prefix, mysql_client_params, mysql_config_file_name, mysql_data_directory, mysql_relay_log_index_file, mysql_relay_log_prefix, restart_mysqld_callback, restore_max_binlog_bytes, server_id, state_dir, stats, temp_dir, ): super().__init__() self.backup_refresh_interval_base = self.BACKUP_REFRESH_INTERVAL_BASE self.backup_settings = backup_settings self.backup_sites = backup_sites self.backup_streams = [] self.backup_streams_initialized = False self.binlog_not_caught_log_counter = 0 self.binlog_purge_settings = binlog_purge_settings scanner_state_file = os.path.join(state_dir, "binlog_scanner_state.json") self.binlog_scanner = BinlogScanner( binlog_prefix=mysql_binlog_prefix, server_id=server_id, state_file=scanner_state_file, stats=stats, ) self.is_running = True self.iteration_sleep = self.ITERATION_SLEEP self.lock = threading.RLock() self.log = logging.getLogger(self.__class__.__name__) self.max_binlog_bytes = None self.mysql_client_params = mysql_client_params self.mysql_config_file_name = mysql_config_file_name self.mysql_data_directory = mysql_data_directory self.mysql_relay_log_index_file = mysql_relay_log_index_file self.mysql_relay_log_prefix = mysql_relay_log_prefix self.restart_mysqld_callback = restart_mysqld_callback self.restore_max_binlog_bytes = restore_max_binlog_bytes self.restore_coordinator = None self.seen_basebackup_infos = {} self.server_id = server_id self.site_transfers = {} self.state = { "backup_request": {}, "backups": [], "backups_fetched_at": 0, "binlogs_purged_at": 0, "errors": 0, "force_promote": False, "last_binlog_purge": time.time(), "last_binlog_rotation": time.time(), "last_could_have_purged": time.time(), "mode": self.Mode.idle, "owned_stream_ids": [], "promote_details": {}, "promote_on_restore_completion": False, "replication_state": {}, "restore_options": {}, "server_uuid": None, "uploaded_binlogs": [], } self.state_dir = state_dir state_file = os.path.join(state_dir, "myhoard_controller_state.json") self.state_manager = StateManager(lock=self.lock, state=self.state, state_file=state_file) self.stats = stats self.temp_dir = temp_dir self.wakeup_event = threading.Event() self._get_upload_backup_site() self._update_mode_tag() def is_log_backed_up(self, *, log_index): return all( not backup_stream.is_streaming_binlogs() or backup_stream.is_log_backed_up(log_index=log_index) for backup_stream in self.backup_streams ) def is_safe_to_reload(self): restore_coordinator = self.restore_coordinator if restore_coordinator and restore_coordinator.phase == RestoreCoordinator.Phase.restoring_basebackup: return False with self.lock: for stream in self.backup_streams: if stream.active_phase == BackupStream.ActivePhase.basebackup: return False return True def mark_backup_requested(self, *, backup_reason, normalized_backup_time=None): normalized_backup_time = normalized_backup_time or self._current_normalized_backup_timestamp() new_request = {"backup_reason": backup_reason, "normalized_backup_time": normalized_backup_time} with self.lock: old = self.state["backup_request"] if old: if ( old == new_request or normalized_backup_time < old["normalized_backup_time"] or ( normalized_backup_time == old["normalized_backup_time"] and old["backup_reason"] == BackupStream.BackupReason.scheduled ) ): return self.state_manager.update_state(backup_request=new_request) @property def mode(self): return self.state["mode"] def restore_backup(self, *, site, stream_id, target_time=None, target_time_approximate_ok=None): with self.lock: if self.mode != self.Mode.idle: raise ValueError(f"Current mode is {self.mode}, restore only allowed while in idle mode") for backup in list(self.state["backups"]): if backup["stream_id"] != stream_id or backup["site"] != site: continue if not backup["basebackup_info"]: raise ValueError(f"Backup {backup!r} cannot be restored") if target_time: if target_time < backup["basebackup_info"]["end_ts"]: raise ValueError(f"Requested target time {target_time} predates backup completion: {backup!r}") if backup["closed_at"] and target_time > backup["closed_at"]: raise ValueError(f"Requested target time {target_time} is after backup close: {backup!r}") break else: raise ValueError(f"Requested backup {stream_id!r} for site {site!r} not found") self.log.info( "Restoring backup stream %r, target time %r%s", stream_id, target_time, " (approximate time)" if target_time_approximate_ok else "" ) self.state_manager.update_state( mode=self.Mode.restore, restore_options={ "binlog_streams": [{ "site": site, "stream_id": stream_id, }], "pending_binlogs_state_file": self._get_restore_coordinator_pending_state_file_and_remove_old(), "state_file": self._get_restore_coordinator_state_file_and_remove_old(), "stream_id": stream_id, "site": site, "target_time": target_time, "target_time_approximate_ok": target_time_approximate_ok, } ) self._update_mode_tag() self.wakeup_event.set() def rotate_and_back_up_binlog(self): local_log_index = self._rotate_binlog() self.wakeup_event.set() return local_log_index def run(self): self.log.info("Controller running") while self.is_running: try: if self.mode == self.Mode.idle: self._handle_mode_idle() elif self.mode == self.Mode.restore: self._handle_mode_restore() elif self.mode == self.Mode.active: self._handle_mode_active() elif self.mode == self.Mode.observe: self._handle_mode_observe() elif self.mode == self.Mode.promote: self._handle_mode_promote() else: assert False, f"Invalid mode {self.mode}" self.wakeup_event.wait(self._get_iteration_sleep()) self.wakeup_event.clear() except Exception as ex: self.log.exception("Unexpected exception in mode %s", self.mode) self.stats.unexpected_exception(ex=ex, where="Controller.run") self.state_manager.increment_counter(name="errors") self.stats.increase("myhoard.generic_errors") time.sleep(self.iteration_sleep) self.is_running = False def stop(self): self.log.info("Stopping controller") self.is_running = False self.wakeup_event.set() with contextlib.suppress(Exception): self.join() if self.restore_coordinator: self.restore_coordinator.stop() for stream in self.backup_streams: stream.stop() self.log.info("Controller stopped") def switch_to_active_mode(self, *, force=False): with self.lock: if self.mode == self.Mode.promote and force and self.state["promote_details"].get("binlogs_applying"): self.state_manager.update_state(force_promote=True) return elif self.mode == self.Mode.restore: if not force: self._fail_if_restore_is_not_complete() else: if not self.restore_coordinator: raise ValueError("Cannot switch mode, current restoration state is indeterminate") self.restore_coordinator.force_completion() self.state_manager.update_state(force_promote=True, promote_on_restore_completion=True) return elif force: raise BadRequest("Can only force promotion while waiting for binlogs to be applied") elif self.mode in {self.Mode.active, self.Mode.promote}: self.log.info("Already in %s mode when switch to active mode was requested", self.mode) return elif self.mode == self.Mode.observe: self._fail_if_observe_to_active_switch_is_not_allowed() self.state_manager.update_state( backups_fetched_at=0, mode=self.Mode.promote, restore_options={}, ) self._update_mode_tag() self.wakeup_event.set() def switch_to_observe_mode(self): with self.lock: if self.mode == self.Mode.observe: self.log.info("Requested switch to observe mode but currently mode is already that") return elif self.mode in {self.Mode.active, self.Mode.promote}: raise ValueError(f"Switch from {self.mode} to observe mode is not allowed") elif self.mode == self.Mode.restore: self._fail_if_restore_is_not_complete() self._fail_if_not_read_only() self.state_manager.update_state( backups_fetched_at=0, mode=self.Mode.observe, restore_options={}, ) self._update_mode_tag() @classmethod def collect_binlogs_to_purge( cls, *, backup_streams, binlogs, exclude_uuid=None, log, mode, purge_settings, replication_state ): only_binlogs_without_gtids = None binlogs_to_purge = [] binlogs_to_maybe_purge = [] for binlog in binlogs: binlog_age = time.time() - binlog["processed_at"] min_age = purge_settings["min_binlog_age_before_purge"] if binlog_age < min_age: log.info( "Binlog %s was processed %s seconds ago and min age before purging is %s seconds, not purging", binlog["local_index"], math.ceil(binlog_age), min_age ) break if mode == cls.Mode.active: can_purge = all( stream.is_binlog_safe_to_delete(binlog, exclude_uuid=exclude_uuid) for stream in backup_streams ) if not can_purge: log.info("Binlog %s reported not safe to delete by some backup streams", binlog["local_index"]) elif purge_settings["purge_when_observe_no_streams"] and not backup_streams: log.info("No backup streams and purging is allowed, assuming purging %s is safe", binlog["local_index"]) can_purge = True else: at_least_one_safe_stream = False at_least_one_unsafe_stream = False for stream in backup_streams: if stream.state["basebackup_info"]: if stream.is_binlog_safe_to_delete( binlog, exclude_uuid=exclude_uuid ): at_least_one_safe_stream = True else: at_least_one_unsafe_stream = True can_purge = at_least_one_safe_stream and not at_least_one_unsafe_stream if can_purge: log.info( "Binlog %s is reported safe to delete by at least one stream and not as unsafe by any", binlog["local_index"] ) else: log.info( "Binlog %s either reported as unsafe to delete (%s) by some stream or not reported as safe to " "delete by any (%s)", binlog["local_index"], at_least_one_unsafe_stream, at_least_one_safe_stream ) if not can_purge: break if not replication_state: log.info("No replication state set, assuming purging binlog %s is safe", binlog["local_index"]) binlogs_to_purge.append(binlog) elif not binlog["gtid_ranges"]: if only_binlogs_without_gtids is None: only_binlogs_without_gtids = True if mode == cls.Mode.observe: binlogs_to_purge.append(binlog) else: binlogs_to_maybe_purge.append(binlog) else: only_binlogs_without_gtids = False for server_name, gtid_executed in replication_state.items(): if not are_gtids_in_executed_set(gtid_executed, binlog["gtid_ranges"], exclude_uuid=exclude_uuid): log.info( "Binlog %s not yet replicated to server %r, not purging", binlog["local_index"], server_name ) can_purge = False break if can_purge: log.info("Binlog %s has been replicated to all servers, purging", binlog["local_index"]) binlogs_to_purge.extend(binlogs_to_maybe_purge) binlogs_to_maybe_purge = [] binlogs_to_purge.append(binlog) else: break return binlogs_to_purge, only_binlogs_without_gtids @staticmethod def get_backup_list(backup_sites, *, seen_basebackup_infos=None, site_transfers=None): if seen_basebackup_infos is None: seen_basebackup_infos = {} if site_transfers is None: site_transfers = {} backups = [] for site_name, site_config in backup_sites.items(): file_storage = site_transfers.get(site_name) if file_storage is None: file_storage = get_transfer(site_config["object_storage"]) site_transfers[site_name] = file_storage streams = list(file_storage.list_prefixes(site_name)) for site_and_stream_id in streams: basebackup_compressed_size = None basebackup_info = None closed_info = None completed_info = None for info in file_storage.list_iter(site_and_stream_id): file_name = info["name"].rsplit("/", 1)[-1] if file_name == "basebackup.xbstream": basebackup_compressed_size = info["size"] elif file_name == "basebackup.json": basebackup_info = seen_basebackup_infos.get(site_and_stream_id) if basebackup_info is None: info_str, _ = file_storage.get_contents_to_string(info["name"]) basebackup_info = json.loads(info_str.decode("utf-8")) seen_basebackup_infos[site_and_stream_id] = basebackup_info elif file_name == "closed.json": closed_info = parse_fs_metadata(info["metadata"]) elif file_name == "completed.json": completed_info = parse_fs_metadata(info["metadata"]) if basebackup_info and basebackup_compressed_size: basebackup_info = dict(basebackup_info, compressed_size=basebackup_compressed_size) resumable = basebackup_info and basebackup_compressed_size completed = resumable and completed_info closed = completed and closed_info backups.append({ "basebackup_info": basebackup_info, "closed_at": closed_info["closed_at"] if closed else None, "completed_at": completed_info["completed_at"] if completed else None, "recovery_site": site_config.get("recovery_only", False), "stream_id": site_and_stream_id.rsplit("/", 1)[-1], "resumable": bool(resumable), "site": site_name, }) return backups def _apply_downloaded_remote_binlogs(self): to_apply = self.state["promote_details"].get("binlogs_to_apply") if self.state["promote_details"].get("binlogs_applying") or not to_apply: return expected_ranges = [] with mysql_cursor(**self.mysql_client_params) as cursor: cursor.execute("STOP SLAVE") cursor.execute("SHOW SLAVE STATUS") slave_status = cursor.fetchone() first_name = slave_status["Relay_Log_File"] if not first_name: first_name = "relay.000001" if not self.state["promote_details"].get("relay_index_updated"): first_index = int(first_name.split(".")[-1]) if ( first_index == 1 and not slave_status["Relay_Master_Log_File"] and not slave_status["Exec_Master_Log_Pos"] and not slave_status["Retrieved_Gtid_Set"] ): self.log.info( "Slave status is empty, assuming RESET SLAVE has been executed and writing relay index manually" ) with open(self.mysql_relay_log_index_file, "wb") as index_file: names = [self._relay_log_name(index=i + 1, full_path=False) for i in range(len(to_apply))] index_file.write(("\n".join(names) + "\n").encode("utf-8")) self.log.info("Wrote names: %s", names) else: for _ in to_apply: cursor.execute("FLUSH RELAY LOGS") self.state_manager.update_state( promote_details={ **self.state["promote_details"], "relay_index_updated": True, } ) for idx, binlog in enumerate(to_apply): if not self.state["promote_details"].get("relay_logs_renamed"): os.rename(binlog["local_prefetch_name"], self._relay_log_name(index=first_index + idx)) self.log.info( "Renamed %r to %r", binlog["local_prefetch_name"], self._relay_log_name(index=first_index + idx) ) expected_ranges.extend(binlog["gtid_ranges"]) if not self.state["promote_details"].get("relay_logs_renamed"): self.state_manager.update_state( promote_details={ **self.state["promote_details"], "relay_logs_renamed": True, } ) options = { "MASTER_AUTO_POSITION": 0, "MASTER_HOST": "dummy", "RELAY_LOG_FILE": first_name, "RELAY_LOG_POS": 4, } change_master_to(cursor=cursor, options=options) cursor.execute("START SLAVE SQL_THREAD") expected_file = self._relay_log_name(index=first_index + len(to_apply), full_path=False) expected_ranges = make_gtid_range_string(expected_ranges) self.log.info( "Started SQL thread, waiting for file %r and GTID range %r to be reached", expected_file, expected_ranges ) self.state_manager.update_state( promote_details={ **self.state["promote_details"], "binlogs_applying": to_apply, "binlogs_to_apply": [], "expected_file": expected_file, "expected_ranges": expected_ranges, } ) def _binlog_uploaded(self, *, local_index, remote_key, stream): with self.lock: binlog_info = { "exclude_stream_id": stream.stream_id, "local_index": local_index, "remote_key": remote_key, } self.state_manager.update_state(uploaded_binlogs=self.state["uploaded_binlogs"] + [binlog_info]) def _build_backup_stream(self, backup): stream_id = backup["stream_id"] backup_site = self.backup_sites[backup["site"]] return BackupStream( backup_reason=None, compression=backup_site.get("compression"), file_storage_setup_fn=lambda: get_transfer(backup_site["object_storage"]), file_uploaded_callback=self._binlog_uploaded, mode=BackupStream.Mode.observe, mysql_client_params=self.mysql_client_params, mysql_config_file_name=self.mysql_config_file_name, mysql_data_directory=self.mysql_data_directory, normalized_backup_time=None, rsa_public_key_pem=backup_site["encryption_keys"]["public"], remote_binlogs_state_file=self._remote_binlogs_state_file_from_stream_id(stream_id), server_id=self.server_id, state_file=self._state_file_from_stream_id(stream_id), site=backup["site"], stats=self.stats, stream_id=stream_id, temp_dir=self.temp_dir, ) def _delete_backup_stream_state(self, stream_id): state_file = self._state_file_from_stream_id(stream_id) if os.path.exists(state_file): os.remove(state_file) remote_binlogs_state_file = self._remote_binlogs_state_file_from_stream_id(stream_id) if os.path.exists(remote_binlogs_state_file): os.remove(remote_binlogs_state_file) def _cache_server_uuid_if_missing(self): if self.state["server_uuid"]: return with mysql_cursor(**self.mysql_client_params) as cursor: cursor.execute("SELECT @@GLOBAL.server_uuid AS server_uuid") server_uuid = cursor.fetchone()["server_uuid"] self.state_manager.update_state(server_uuid=server_uuid) def _check_binlog_apply_status(self): binlogs = self.state["promote_details"].get("binlogs_applying") if not binlogs: return expected_file = self.state["promote_details"].get("expected_file") expected_ranges = self.state["promote_details"].get("expected_ranges") with mysql_cursor(**self.mysql_client_params) as cursor: cursor.execute("SHOW SLAVE STATUS") slave_status = cursor.fetchone() current_file = slave_status["Relay_Log_File"] reached_target = True if current_file != expected_file: reached_target = False elif expected_ranges: cursor.execute("SELECT GTID_SUBSET(%s, @@GLOBAL.gtid_executed) AS executed", [expected_ranges]) if not cursor.fetchone()["executed"]: reached_target = False if not reached_target: if self.state["force_promote"]: self.log.warning("Promotion target state not reached but forced promotion requested") else: sql_thread_running = slave_status["Slave_SQL_Running"] if sql_thread_running != "Yes": self.log.warning("Expected SQL thread to be running state is %s, starting it", sql_thread_running) cursor.execute("START SLAVE SQL_THREAD") return else: self.log.info("Expected relay log (%r) and GTIDs reached (%r)", expected_file, expected_ranges) cursor.execute("STOP SLAVE") promote_details = { **self.state["promote_details"], "binlogs_applying": [], "expected_file": None, "expected_ranges": None, } if not reached_target and self.state["force_promote"]: promote_details["binlogs_to_apply"] = [] promote_details["binlogs_to_fetch"] = [] self.state_manager.update_state(promote_details=promote_details) def _create_new_backup_stream_if_requested_and_max_streams_not_exceeded(self): if len(self.backup_streams) >= 2: return with self.lock: request = self.state["backup_request"] if request: self._start_new_backup( backup_reason=request["backup_reason"], normalized_backup_time=request["normalized_backup_time"], ) def _create_restore_coordinator_if_missing(self): if self.restore_coordinator: return options = self.state["restore_options"] backup_site = self.backup_sites[options["site"]] storage_config = backup_site["object_storage"] self.log.info("Creating new restore coordinator") self.restore_coordinator = RestoreCoordinator( binlog_streams=options["binlog_streams"], file_storage_config=storage_config, max_binlog_bytes=self.restore_max_binlog_bytes, mysql_client_params=self.mysql_client_params, mysql_config_file_name=self.mysql_config_file_name, mysql_data_directory=self.mysql_data_directory, mysql_relay_log_index_file=self.mysql_relay_log_index_file, mysql_relay_log_prefix=self.mysql_relay_log_prefix, pending_binlogs_state_file=options["pending_binlogs_state_file"], restart_mysqld_callback=self.restart_mysqld_callback, rsa_private_key_pem=backup_site["encryption_keys"]["private"], site=options["site"], state_file=options["state_file"], stats=self.stats, stream_id=options["stream_id"], target_time=options["target_time"], target_time_approximate_ok=options["target_time_approximate_ok"], temp_dir=self.temp_dir, ) if not self.restore_coordinator.is_complete(): self.log.info("Starting restore coordinator") self.restore_coordinator.start() else: self.log.info("Newly created restore coordinator is already in completed state") def _current_normalized_backup_timestamp(self): now = datetime.datetime.now(datetime.timezone.utc) normalized = now backup_interval_minutes = self.backup_settings["backup_interval_minutes"] backup_hour = self.backup_settings["backup_hour"] backup_minute = self.backup_settings["backup_minute"] if normalized.hour < backup_hour or (normalized.hour == backup_hour and normalized.minute < backup_minute): normalized = normalized - datetime.timedelta(days=1) normalized = normalized.replace(hour=backup_hour, minute=backup_minute, second=0, microsecond=0) while normalized + datetime.timedelta(minutes=backup_interval_minutes) < now: normalized = normalized + datetime.timedelta(minutes=backup_interval_minutes) return normalized.isoformat()
Apache License 2.0
tensorflow/quantum
tensorflow_quantum/python/layers/high_level/pqc.py
PQC.__init__
python
def __init__( self, model_circuit, operators, *, repetitions=None, backend='noiseless', differentiator=None, initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi), regularizer=None, constraint=None, **kwargs, ): super().__init__(**kwargs) if not isinstance(model_circuit, cirq.Circuit): raise TypeError("model_circuit must be a cirq.Circuit object." " Given: {}".format(model_circuit)) self._symbols_list = list( sorted(util.get_circuit_symbols(model_circuit))) self._symbols = tf.constant([str(x) for x in self._symbols_list]) self._model_circuit = util.convert_to_tensor([model_circuit]) if len(self._symbols_list) == 0: raise ValueError("model_circuit has no sympy.Symbols. Please " "provide a circuit that contains symbols so " "that their values can be trained.") if isinstance(operators, (cirq.PauliString, cirq.PauliSum)): operators = [operators] if not isinstance(operators, (list, np.ndarray, tuple)): raise TypeError("operators must be a cirq.PauliSum or " "cirq.PauliString, or a list, tuple, " "or np.array containing them. " "Got {}.".format(type(operators))) if not all([ isinstance(op, (cirq.PauliString, cirq.PauliSum)) for op in operators ]): raise TypeError("Each element in operators to measure " "must be a cirq.PauliString" " or cirq.PauliSum") self._operators = util.convert_to_tensor([operators]) self._analytic = False if repetitions is None: self._analytic = True if not self._analytic and not isinstance(repetitions, numbers.Integral): raise TypeError("repetitions must be a positive integer value." " Given: ".format(repetitions)) if not self._analytic and repetitions <= 0: raise ValueError("Repetitions must be greater than zero.") if not self._analytic: self._repetitions = tf.constant( [[repetitions for _ in range(len(operators))]], dtype=tf.dtypes.int32) if backend == 'noisy': raise ValueError("noisy backend value is not supported in " "tfq.layers.PQC. Please use tfq.layers.NoisyPQC " "instead.") not_default = backend != 'noiseless' not_default &= backend is not None if not isinstance( backend, cirq.Sampler) and repetitions is not None and not_default: raise TypeError("provided backend does not inherit cirq.Sampler " "and repetitions!=None. Please provide a backend " "that inherits cirq.Sampler or set " "repetitions=None.") if not isinstance(backend, cirq.sim.simulator.SimulatesExpectationValues ) and repetitions is None and not_default: raise TypeError("provided backend does not inherit " "cirq.sim.simulator.SimulatesExpectationValues and " "repetitions=None. Please provide a backend that " "inherits " "cirq.sim.simulator.SimulatesExpectationValues.") if self._analytic: self._executor = expectation.Expectation( backend=backend, differentiator=differentiator) else: self._executor = sampled_expectation.SampledExpectation( backend=backend, differentiator=differentiator) self._append_layer = elementary.AddCircuit() self.initializer = tf.keras.initializers.get(initializer) self.regularizer = tf.keras.regularizers.get(regularizer) self.constraint = tf.keras.constraints.get(constraint) self.parameters = self.add_weight('parameters', shape=self._symbols.shape, initializer=self.initializer, regularizer=self.regularizer, constraint=self.constraint, dtype=tf.float32, trainable=True)
Instantiate this layer. Create a layer that will output expectation values of the given operators when fed quantum data to it's input layer. This layer will accept one input tensor representing a quantum data source (these circuits must not contain any symbols) and append the model_circuit to them, execute them and then finally output the expectation values. model_circuit: `cirq.Circuit` containing `sympy.Symbols` that will be used as the model which will be fed quantum data inputs. operators: `cirq.PauliSum` or Python `list` of `cirq.PauliSum` objects used as observables at the end of the model circuit. repetitions: Optional Python `int` indicating how many samples to use when estimating expectation values. If `None` analytic expectation calculation is used. backend: Optional Backend to use to simulate states. Defaults to the noiseless TensorFlow simulator, however users may also specify a preconfigured cirq simulation object to use instead. If a cirq object is given it must inherit either `cirq.sim.simulator.SimulatesExpectationValues` if analytic expectations are desired or `cirq.Sampler` if sampled expectations are desired. differentiator: Optional `tfq.differentiator` object to specify how gradients of `model_circuit` should be calculated. initializer: Optional `tf.keras.initializer` object to specify how the symbols in `model_circuit` should be initialized when creating the managed variables. regularizer: Optional `tf.keras.regularizer` object applied to the managed variables parameterizing `model_circuit`. constraint: Optional `tf.keras.constraint` object applied to the managed variables parameterizing `model_circuit`.
https://github.com/tensorflow/quantum/blob/4c2f4ef966a801d8447593144614fd3e9decc3f0/tensorflow_quantum/python/layers/high_level/pqc.py#L133-L271
import numbers import numpy as np import tensorflow as tf import cirq import sympy from tensorflow_quantum.python.layers.circuit_executors import expectation, sampled_expectation from tensorflow_quantum.python.layers.circuit_construction import elementary from tensorflow_quantum.python import util class PQC(tf.keras.layers.Layer):
Apache License 2.0
wavefronthq/python-client
wavefront_api_client/models/response_container_facets_response_container.py
ResponseContainerFacetsResponseContainer.status
python
def status(self): return self._status
Gets the status of this ResponseContainerFacetsResponseContainer. # noqa: E501 :return: The status of this ResponseContainerFacetsResponseContainer. # noqa: E501 :rtype: ResponseStatus
https://github.com/wavefronthq/python-client/blob/e410ce0dd8a2334e995456f4f3d44e0f04664a3a/wavefront_api_client/models/response_container_facets_response_container.py#L81-L88
import pprint import re import six from wavefront_api_client.configuration import Configuration class ResponseContainerFacetsResponseContainer(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'response': 'FacetsResponseContainer', 'status': 'ResponseStatus' } attribute_map = { 'response': 'response', 'status': 'status' } def __init__(self, response=None, status=None, _configuration=None): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._response = None self._status = None self.discriminator = None if response is not None: self.response = response self.status = status @property def response(self): return self._response @response.setter def response(self, response): self._response = response @property
Apache License 2.0
aws/aws-dynamodb-encryption-python
examples/src/dynamodb_encryption_sdk_examples/wrapped_symmetric_encrypted_table.py
encrypt_item
python
def encrypt_item(table_name, aes_wrapping_key_bytes, hmac_signing_key_bytes): index_key = {"partition_attribute": "is this", "sort_attribute": 55} plaintext_item = { "example": "data", "some numbers": 99, "and some binary": Binary(b"\x00\x01\x02"), "leave me": "alone", } encrypted_attributes = set(plaintext_item.keys()) encrypted_attributes.remove("leave me") unencrypted_attributes = set(index_key.keys()) unencrypted_attributes.add("leave me") plaintext_item.update(index_key) table = boto3.resource("dynamodb").Table(table_name) wrapping_key = JceNameLocalDelegatedKey( key=aes_wrapping_key_bytes, algorithm="AES", key_type=EncryptionKeyType.SYMMETRIC, key_encoding=KeyEncodingType.RAW, ) signing_key = JceNameLocalDelegatedKey( key=hmac_signing_key_bytes, algorithm="HmacSHA512", key_type=EncryptionKeyType.SYMMETRIC, key_encoding=KeyEncodingType.RAW, ) wrapped_cmp = WrappedCryptographicMaterialsProvider( wrapping_key=wrapping_key, unwrapping_key=wrapping_key, signing_key=signing_key ) actions = AttributeActions( default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={"leave me": CryptoAction.DO_NOTHING} ) encrypted_table = EncryptedTable(table=table, materials_provider=wrapped_cmp, attribute_actions=actions) encrypted_table.put_item(Item=plaintext_item) encrypted_item = table.get_item(Key=index_key)["Item"] decrypted_item = encrypted_table.get_item(Key=index_key)["Item"] for name in encrypted_attributes: assert encrypted_item[name] != plaintext_item[name] assert decrypted_item[name] == plaintext_item[name] for name in unencrypted_attributes: assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name] encrypted_table.delete_item(Key=index_key)
Demonstrate use of EncryptedTable to transparently encrypt an item.
https://github.com/aws/aws-dynamodb-encryption-python/blob/94244332a77a1929ee4c1cebc366787ca57e206d/examples/src/dynamodb_encryption_sdk_examples/wrapped_symmetric_encrypted_table.py#L24-L86
import boto3 from boto3.dynamodb.types import Binary from dynamodb_encryption_sdk.delegated_keys.jce import JceNameLocalDelegatedKey from dynamodb_encryption_sdk.encrypted.table import EncryptedTable from dynamodb_encryption_sdk.identifiers import CryptoAction, EncryptionKeyType, KeyEncodingType from dynamodb_encryption_sdk.material_providers.wrapped import WrappedCryptographicMaterialsProvider from dynamodb_encryption_sdk.structures import AttributeActions
Apache License 2.0
mathandy/svgpathtools
svgpathtools/path.py
Line.split
python
def split(self, t): pt = self.point(t) return Line(self.start, pt), Line(pt, self.end)
returns two segments, whose union is this segment and which join at self.point(t).
https://github.com/mathandy/svgpathtools/blob/09ce497a4f0772f8c414195446bf85f2f88c6588/svgpathtools/path.py#L786-L790
from __future__ import division, absolute_import, print_function import re try: from collections.abc import MutableSequence except ImportError: from collections import MutableSequence from warnings import warn from operator import itemgetter import numpy as np from itertools import tee from functools import reduce from numpy import sqrt, cos, sin, tan, arccos as acos, arcsin as asin, degrees, radians, log, pi, ceil from numpy import exp, sqrt as csqrt, angle as phase, isnan try: from scipy.integrate import quad _quad_available = True except: _quad_available = False from .bezier import (bezier_intersections, bezier_bounding_box, split_bezier, bezier_by_line_intersections, polynomial2bezier, bezier2polynomial) from .misctools import BugException from .polytools import rational_limit, polyroots, polyroots01, imag, real try: str = basestring except NameError: pass COMMANDS = set('MmZzLlHhVvCcSsQqTtAa') UPPERCASE = set('MZLHVCSQTA') COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])") FLOAT_RE = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?") LENGTH_MIN_DEPTH = 5 LENGTH_ERROR = 1e-12 USE_SCIPY_QUAD = True ILENGTH_MIN_DEPTH = 5 ILENGTH_ERROR = 1e-12 ILENGTH_S_TOL = 1e-12 ILENGTH_MAXITS = 10000 CLOSED_WARNING_ON = True _NotImplemented4ArcException = Exception("This method has not yet been implemented for Arc objects.") _is_smooth_from_warning = ("The name of this method is somewhat misleading (yet kept for " "compatibility with scripts created using svg.path 2.0). This method " "is meant only for d-string creation and should NOT be used to check " "for kinks. To check a segment for differentiability, use the " "joins_smoothly_with() method instead or the kinks() function (in " "smoothing.py).\nTo turn off this warning, set " "warning_on=False.") def bezier_segment(*bpoints): if len(bpoints) == 2: start, end = bpoints return Line(start, end) elif len(bpoints) == 4: start, control1, control2, end = bpoints return CubicBezier(start, control1, control2, end) elif len(bpoints) == 3: start, control, end = bpoints return QuadraticBezier(start, control, end) else: assert len(bpoints) in (2, 3, 4) def is_bezier_segment(seg): return (isinstance(seg, Line) or isinstance(seg, QuadraticBezier) or isinstance(seg, CubicBezier)) def is_path_segment(seg): return is_bezier_segment(seg) or isinstance(seg, Arc) def is_bezier_path(path): return isinstance(path, Path) and all(map(is_bezier_segment, path)) def concatpaths(list_of_paths): return Path(*[seg for path in list_of_paths for seg in path]) def bbox2path(xmin, xmax, ymin, ymax): b = Line(xmin + 1j*ymin, xmax + 1j*ymin) t = Line(xmin + 1j*ymax, xmax + 1j*ymax) r = Line(xmax + 1j*ymin, xmax + 1j*ymax) l = Line(xmin + 1j*ymin, xmin + 1j*ymax) return Path(b, r, t.reversed(), l.reversed()) def polyline(*points): return Path(*[Line(points[i], points[i+1]) for i in range(len(points) - 1)]) def polygon(*points): return Path(*[Line(points[i], points[(i + 1) % len(points)]) for i in range(len(points))]) def bpoints2bezier(bpoints): order = len(bpoints) - 1 if order == 3: return CubicBezier(*bpoints) elif order == 2: return QuadraticBezier(*bpoints) elif order == 1: return Line(*bpoints) else: assert len(bpoints) in {2, 3, 4} def poly2bez(poly, return_bpoints=False): bpoints = polynomial2bezier(poly) if return_bpoints: return bpoints else: return bpoints2bezier(bpoints) def bez2poly(bez, numpy_ordering=True, return_poly1d=False): if is_bezier_segment(bez): bez = bez.bpoints() return bezier2polynomial(bez, numpy_ordering=numpy_ordering, return_poly1d=return_poly1d) def transform_segments_together(path, transformation): transformed_segs = [transformation(seg) for seg in path] joint_was_continuous = [sa.end == sb.start for sa, sb in path.joints()] for i, (sa, sb) in enumerate(path.joints()): if sa.end == sb.start: transformed_segs[i].end = transformed_segs[(i + 1) % len(path)].start return Path(*transformed_segs) def rotate(curve, degs, origin=None): def transform(z): return exp(1j*radians(degs))*(z - origin) + origin if origin is None: if isinstance(curve, Arc): origin = curve.center else: origin = curve.point(0.5) if isinstance(curve, Path): transformation = lambda seg: rotate(seg, degs, origin=origin) return transform_segments_together(curve, transformation) elif is_bezier_segment(curve): return bpoints2bezier([transform(bpt) for bpt in curve.bpoints()]) elif isinstance(curve, Arc): new_start = transform(curve.start) new_end = transform(curve.end) new_rotation = curve.rotation + degs return Arc(new_start, radius=curve.radius, rotation=new_rotation, large_arc=curve.large_arc, sweep=curve.sweep, end=new_end) else: raise TypeError("Input `curve` should be a Path, Line, " "QuadraticBezier, CubicBezier, or Arc object.") def translate(curve, z0): if isinstance(curve, Path): transformation = lambda seg: translate(seg, z0) return transform_segments_together(curve, transformation) elif is_bezier_segment(curve): return bpoints2bezier([bpt + z0 for bpt in curve.bpoints()]) elif isinstance(curve, Arc): new_start = curve.start + z0 new_end = curve.end + z0 return Arc(new_start, radius=curve.radius, rotation=curve.rotation, large_arc=curve.large_arc, sweep=curve.sweep, end=new_end) else: raise TypeError("Input `curve` should be a Path, Line, " "QuadraticBezier, CubicBezier, or Arc object.") def scale(curve, sx, sy=None, origin=0j): if sy is None: isy = 1j*sx else: isy = 1j*sy def _scale(z): if sy is None: return sx*z return sx*z.real + isy*z.imag def scale_bezier(bez): p = [_scale(c) for c in bez2poly(bez)] p[-1] += origin - _scale(origin) return poly2bez(p) if isinstance(curve, Path): transformation = lambda seg: scale(seg, sx, sy, origin) return transform_segments_together(curve, transformation) elif is_bezier_segment(curve): return scale_bezier(curve) elif isinstance(curve, Arc): if sy is None or sy == sx: return Arc(start=sx*(curve.start - origin) + origin, radius=sx*curve.radius, rotation=curve.rotation, large_arc=curve.large_arc, sweep=curve.sweep, end=sx*(curve.end - origin) + origin) else: raise Exception("\nFor `Arc` objects, only scale transforms " "with sx==sy are implemented.\n") else: raise TypeError("Input `curve` should be a Path, Line, " "QuadraticBezier, CubicBezier, or Arc object.") def transform(curve, tf): def to_point(p): return np.array([[p.real], [p.imag], [1.0]]) def to_vector(z): return np.array([[z.real], [z.imag], [0.0]]) def to_complex(v): return v.item(0) + 1j * v.item(1) if isinstance(curve, Path): transformation = lambda seg: transform(seg, tf) return transform_segments_together(curve, transformation) elif is_bezier_segment(curve): return bpoints2bezier([to_complex(tf.dot(to_point(p))) for p in curve.bpoints()]) elif isinstance(curve, Arc): new_start = to_complex(tf.dot(to_point(curve.start))) new_end = to_complex(tf.dot(to_point(curve.end))) rx2 = curve.radius.real ** 2 ry2 = curve.radius.imag ** 2 Q = np.array([[1/rx2, 0], [0, 1/ry2]]) invT = np.linalg.inv(tf[:2,:2]) D = reduce(np.matmul, [invT.T, Q, invT]) eigvals, eigvecs = np.linalg.eig(D) rx = 1 / np.sqrt(eigvals[0]) ry = 1 / np.sqrt(eigvals[1]) new_radius = complex(rx, ry) xeigvec = eigvecs[:, 0] rot = np.degrees(np.arccos(xeigvec[0])) if new_radius.real == 0 or new_radius.imag == 0 : return Line(new_start, new_end) else : return Arc(new_start, radius=new_radius, rotation=curve.rotation + rot, large_arc=curve.large_arc, sweep=curve.sweep, end=new_end, autoscale_radius=False) else: raise TypeError("Input `curve` should be a Path, Line, " "QuadraticBezier, CubicBezier, or Arc object.") def bezier_unit_tangent(seg, t): dseg = seg.derivative(t) try: unit_tangent = dseg/abs(dseg) except (ZeroDivisionError, FloatingPointError): dseg_poly = seg.poly().deriv() dseg_abs_squared_poly = (real(dseg_poly) ** 2 + imag(dseg_poly) ** 2) try: unit_tangent = csqrt(rational_limit(dseg_poly**2, dseg_abs_squared_poly, t)) except ValueError: bef = seg.poly().deriv()(t - 1e-4) aft = seg.poly().deriv()(t + 1e-4) mes = ("Unit tangent appears to not be well-defined at " "t = {}, \n".format(t) + "seg.poly().deriv()(t - 1e-4) = {}\n".format(bef) + "seg.poly().deriv()(t + 1e-4) = {}".format(aft)) raise ValueError(mes) return unit_tangent def segment_curvature(self, t, use_inf=False): dz = self.derivative(t) ddz = self.derivative(t, n=2) dx, dy = dz.real, dz.imag ddx, ddy = ddz.real, ddz.imag old_np_seterr = np.seterr(invalid='raise') try: kappa = abs(dx*ddy - dy*ddx)/sqrt(dx*dx + dy*dy)**3 except (ZeroDivisionError, FloatingPointError): p = self.poly() dp = p.deriv() ddp = dp.deriv() dx, dy = real(dp), imag(dp) ddx, ddy = real(ddp), imag(ddp) f2 = (dx*ddy - dy*ddx)**2 g2 = (dx*dx + dy*dy)**3 lim2 = rational_limit(f2, g2, t) if lim2 < 0: return 0 kappa = sqrt(lim2) finally: np.seterr(**old_np_seterr) return kappa def bezier_radialrange(seg, origin, return_all_global_extrema=False): def _radius(tau): return abs(seg.point(tau) - origin) shifted_seg_poly = seg.poly() - origin r_squared = real(shifted_seg_poly) ** 2 + imag(shifted_seg_poly) ** 2 extremizers = [0, 1] + polyroots01(r_squared.deriv()) extrema = [(_radius(t), t) for t in extremizers] if return_all_global_extrema: raise NotImplementedError else: seg_global_min = min(extrema, key=itemgetter(0)) seg_global_max = max(extrema, key=itemgetter(0)) return seg_global_min, seg_global_max def closest_point_in_path(pt, path): return path.radialrange(pt)[0] def farthest_point_in_path(pt, path): return path.radialrange(pt)[1] def path_encloses_pt(pt, opt, path): assert path.isclosed() intersections = Path(Line(pt, opt)).intersect(path) if len(intersections) % 2: return True else: return False def segment_length(curve, start, end, start_point, end_point, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH, depth=0): mid = (start + end)/2 mid_point = curve.point(mid) length = abs(end_point - start_point) first_half = abs(mid_point - start_point) second_half = abs(end_point - mid_point) length2 = first_half + second_half if (length2 - length > error) or (depth < min_depth): depth += 1 return (segment_length(curve, start, mid, start_point, mid_point, error, min_depth, depth) + segment_length(curve, mid, end, mid_point, end_point, error, min_depth, depth)) return length2 def inv_arclength(curve, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS, error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH): curve_length = curve.length(error=error, min_depth=min_depth) assert curve_length > 0 if not 0 <= s <= curve_length: raise ValueError("s is not in interval [0, curve.length()].") if s == 0: return 0 if s == curve_length: return 1 if isinstance(curve, Path): seg_lengths = [seg.length(error=error, min_depth=min_depth) for seg in curve] lsum = 0 for k, len_k in enumerate(seg_lengths): if lsum <= s <= lsum + len_k: t = inv_arclength(curve[k], s - lsum, s_tol=s_tol, maxits=maxits, error=error, min_depth=min_depth) return curve.t2T(k, t) lsum += len_k return 1 elif isinstance(curve, Line): return s / curve.length(error=error, min_depth=min_depth) elif (isinstance(curve, QuadraticBezier) or isinstance(curve, CubicBezier) or isinstance(curve, Arc)): t_upper = 1 t_lower = 0 iteration = 0 while iteration < maxits: iteration += 1 t = (t_lower + t_upper)/2 s_t = curve.length(t1=t, error=error, min_depth=min_depth) if abs(s_t - s) < s_tol: return t elif s_t < s: t_lower = t else: t_upper = t if t_upper == t_lower: warn("t is as close as a float can be to the correct value, " "but |s(t) - s| = {} > s_tol".format(abs(s_t-s))) return t raise Exception("Maximum iterations reached with s(t) - s = {}." "".format(s_t - s)) else: raise TypeError("First argument must be a Line, QuadraticBezier, " "CubicBezier, Arc, or Path object.") def crop_bezier(seg, t0, t1): assert t0 < t1 if t0 == 0: cropped_seg = seg.split(t1)[0] elif t1 == 1: cropped_seg = seg.split(t0)[1] else: pt1 = seg.point(t1) trimmed_seg = crop_bezier(seg, t0, 1) t1_adj = trimmed_seg.radialrange(pt1)[0][1] cropped_seg = crop_bezier(trimmed_seg, 0, t1_adj) return cropped_seg class Line(object): def __init__(self, start, end): self.start = start self.end = end def __hash__(self): return hash((self.start, self.end)) def __repr__(self): return 'Line(start=%s, end=%s)' % (self.start, self.end) def __eq__(self, other): if not isinstance(other, Line): return NotImplemented return self.start == other.start and self.end == other.end def __ne__(self, other): if not isinstance(other, Line): return NotImplemented return not self == other def __getitem__(self, item): return self.bpoints()[item] def __len__(self): return 2 def joins_smoothly_with(self, previous, wrt_parameterization=False): if wrt_parameterization: return self.start == previous.end and np.isclose( self.derivative(0), previous.derivative(1)) else: return self.start == previous.end and np.isclose( self.unit_tangent(0), previous.unit_tangent(1)) def point(self, t): distance = self.end - self.start return self.start + distance*t def points(self, ts): return self.poly()(ts) def length(self, t0=0, t1=1, error=None, min_depth=None): return abs(self.end - self.start)*(t1-t0) def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS, error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH): return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error, min_depth=min_depth) def bpoints(self): return self.start, self.end def poly(self, return_coeffs=False): p = self.bpoints() coeffs = ([p[1] - p[0], p[0]]) if return_coeffs: return coeffs else: return np.poly1d(coeffs) def derivative(self, t=None, n=1): assert self.end != self.start if n == 1: return self.end - self.start elif n > 1: return 0 else: raise ValueError("n should be a positive integer.") def unit_tangent(self, t=None): assert self.end != self.start dseg = self.end - self.start return dseg/abs(dseg) def normal(self, t=None): return -1j*self.unit_tangent(t) def curvature(self, t): return 0 def reversed(self): return Line(self.end, self.start) def intersect(self, other_seg, tol=None): if isinstance(other_seg, Line): assert other_seg.end != other_seg.start and self.end != self.start assert self != other_seg a = (self.start.real, self.end.real) b = (self.start.imag, self.end.imag) c = (other_seg.start.real, other_seg.end.real) d = (other_seg.start.imag, other_seg.end.imag) denom = ((a[1] - a[0])*(d[0] - d[1]) - (b[1] - b[0])*(c[0] - c[1])) if np.isclose(denom, 0): return [] t1 = (c[0]*(b[0] - d[1]) - c[1]*(b[0] - d[0]) - a[0]*(d[0] - d[1]))/denom t2 = -(a[1]*(b[0] - d[0]) - a[0]*(b[1] - d[0]) - c[0]*(b[0] - b[1]))/denom if 0 <= t1 <= 1 and 0 <= t2 <= 1: return [(t1, t2)] return [] elif isinstance(other_seg, QuadraticBezier): t2t1s = bezier_by_line_intersections(other_seg, self) return [(t1, t2) for t2, t1 in t2t1s] elif isinstance(other_seg, CubicBezier): t2t1s = bezier_by_line_intersections(other_seg, self) return [(t1, t2) for t2, t1 in t2t1s] elif isinstance(other_seg, Arc): t2t1s = other_seg.intersect(self) return [(t1, t2) for t2, t1 in t2t1s] elif isinstance(other_seg, Path): raise TypeError( "other_seg must be a path segment, not a Path object, use " "Path.intersect().") else: raise TypeError("other_seg must be a path segment.") def bbox(self): xmin = min(self.start.real, self.end.real) xmax = max(self.start.real, self.end.real) ymin = min(self.start.imag, self.end.imag) ymax = max(self.start.imag, self.end.imag) return xmin, xmax, ymin, ymax def point_to_t(self, point): if np.isclose(point, self.start, rtol=0, atol=1e-6): return 0.0 elif np.isclose(point, self.end, rtol=0, atol=1e-6): return 1.0 p = self.poly() t = (point - p[0]) / p[1] if np.isclose(t.imag, 0) and (t.real >= 0.0) and (t.real <= 1.0): return t.real return None def cropped(self, t0, t1): return Line(self.point(t0), self.point(t1))
MIT License
districtdatalabs/minke
minke/corpus.py
BaleenPickledCorpusReader.docs
python
def docs(self, fileids=None, categories=None): fileids = self._resolve(fileids, categories) for path, enc, fileid in self.abspaths(fileids, True, True): with open(path, 'rb') as f: yield pickle.load(f)
Returns the document loaded from a pickled object for every file in the corpus. Similar to the BaleenCorpusReader, this uses a generator to acheive memory safe iteration.
https://github.com/districtdatalabs/minke/blob/e41e8583f1386dbad5249aad740343c984832f1e/minke/corpus.py#L297-L309
import os import bs4 import time import json import nltk import codecs import pickle import nltk.data from six import string_types from nltk.tokenize import WordPunctTokenizer from nltk.corpus.reader.api import CorpusReader from nltk.corpus.reader.api import CategorizedCorpusReader from readability.readability import Document as Paper from readability.readability import Unparseable DOC_PATTERN = r'(?!\.)[a-z_\s]+/[a-f0-9]+\.json' PKL_PATTERN = r'(?!\.)[a-z_\s]+/[a-f0-9]+\.pickle' CAT_PATTERN = r'([a-z_\s]+)/.*' class BaleenCorpusReader(CategorizedCorpusReader, CorpusReader): TAGS = [ 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'p', 'li' ] def __init__(self, root, fileids=DOC_PATTERN, tags=None, word_tokenizer=WordPunctTokenizer(), sent_tokenizer=nltk.data.LazyLoader( 'tokenizers/punkt/english.pickle'), encoding='utf8', **kwargs): if not any(key.startswith('cat_') for key in kwargs.keys()): kwargs['cat_pattern'] = CAT_PATTERN CategorizedCorpusReader.__init__(self, kwargs) CorpusReader.__init__(self, root, fileids, encoding) self._word_tokenizer = word_tokenizer self._sent_tokenizer = sent_tokenizer self._good_tags = tags or self.TAGS def feeds(self): data = self.open('feeds.json') return json.load(data) def _resolve(self, fileids, categories): if fileids is not None and categories is not None: raise ValueError("Specify fileids or categories, not both") if categories is not None: return self.fileids(categories) return fileids def docs(self, fileids=None, categories=None): fileids = self._resolve(fileids, categories) for path, enc, fileid in self.abspaths(fileids, True, True): with codecs.open(path, 'r', encoding=enc) as f: yield json.load(f) def fields(self, fields, fileids=None, categories=None): if isinstance(fields, string_types): fields = [fields,] if len(fields) == 1: for doc in self.docs(fileids, categories): if fields[0] in doc: yield doc[fields[0]] else: for doc in self.docs(fileids, categories): yield { key: doc.get(key, None) for key in fields } def html(self, fileids=None, categories=None, readability=True): html = self.fields('content', fileids, categories) if readability: for doc in html: try: yield Paper(doc).summary() except Unparseable as e: print("Could not parse HTML: {}".format(e)) else: for doc in html: yield doc def paras(self, fileids=None, categories=None): for html in self.html(fileids, categories): soup = bs4.BeautifulSoup(html, 'lxml') for element in soup.find_all(self._good_tags): yield element.text def sents(self, fileids=None, categories=None): for paragraph in self.paras(fileids, categories): for sentence in self._sent_tokenizer.tokenize(paragraph): yield sentence def words(self, fileids=None, categories=None): for sentence in self.sents(fileids, categories): for token in self._word_tokenizer.tokenize(sentence): yield token def sizes(self, fileids=None, categories=None): fileids = self._resolve(fileids, categories) for path, enc, fileid in self.abspaths(fileids, True, True): yield os.path.getsize(path) def describe(self, fileids=None, categories=None): counts = nltk.FreqDist() tokens = nltk.FreqDist() started = time.time() for para in self.paras(fileids, categories): counts['paras'] += 1 for sent in self._sent_tokenizer.tokenize(para): counts['sents'] += 1 for word in self._word_tokenizer.tokenize(sent): counts['words'] += 1 tokens[word] += 1 n_fileids = len(self._resolve(fileids, categories) or self.fileids()) n_topics = len(self.categories(self._resolve(fileids, categories))) return { 'files': n_fileids, 'topics': n_topics, 'paras': counts['paras'], 'sents': counts['sents'], 'words': counts['words'], 'vocab': len(tokens), 'lexdiv': float(counts['words']) / float(len(tokens)), 'ppdoc': float(counts['paras']) / float(n_fileids), 'sppar': float(counts['sents']) / float(counts['paras']), 'secs': time.time() - started, } def describes(self, fileids=None, categories=None): return ( "Baleen corpus contains {files} files in {topics} categories.\n" "Structured as:\n" " {paras} paragraphs ({ppdoc:0.3f} mean paragraphs per file)\n" " {sents} sentences ({sppar:0.3f} mean sentences per paragraph).\n" "Word count of {words} with a vocabulary of {vocab} " "({lexdiv:0.3f} lexical diversity).\n" "Corpus scan took {secs:0.3f} seconds." ).format(**self.describe(fileids, categories)) class BaleenPickledCorpusReader(BaleenCorpusReader): def __init__(self, root, fileids=PKL_PATTERN, **kwargs): if not any(key.startswith('cat_') for key in kwargs.keys()): kwargs['cat_pattern'] = CAT_PATTERN CategorizedCorpusReader.__init__(self, kwargs) CorpusReader.__init__(self, root, fileids)
MIT License
syssi/xiaomi_raw
custom_components/xiaomi_miio_raw/sensor.py
async_setup_platform
python
def async_setup_platform(hass, config, async_add_devices, discovery_info=None): if DATA_KEY not in hass.data: hass.data[DATA_KEY] = {} host = config.get(CONF_HOST) token = config.get(CONF_TOKEN) _LOGGER.info("Initializing with host %s (token %s...)", host, token[:5]) try: miio_device = Device(host, token) device_info = miio_device.info() model = device_info.model _LOGGER.info( "%s %s %s detected", model, device_info.firmware_version, device_info.hardware_version, ) device = XiaomiMiioGenericDevice(miio_device, config, device_info) except DeviceException: raise PlatformNotReady hass.data[DATA_KEY][host] = device async_add_devices([device], update_before_add=True) @asyncio.coroutine def async_service_handler(service): method = SERVICE_TO_METHOD.get(service.service) params = { key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID } entity_ids = service.data.get(ATTR_ENTITY_ID) if entity_ids: devices = [ device for device in hass.data[DATA_KEY].values() if device.entity_id in entity_ids ] else: devices = hass.data[DATA_KEY].values() update_tasks = [] for device in devices: yield from getattr(device, method["method"])(**params) update_tasks.append(device.async_update_ha_state(True)) if update_tasks: yield from asyncio.wait(update_tasks, loop=hass.loop) for service in SERVICE_TO_METHOD: schema = SERVICE_TO_METHOD[service].get("schema", SERVICE_SCHEMA) hass.services.async_register( DOMAIN, service, async_service_handler, schema=schema )
Set up the sensor from config.
https://github.com/syssi/xiaomi_raw/blob/3f4db780aa056642cada2ff8d31e09ea35659954/custom_components/xiaomi_miio_raw/sensor.py#L85-L142
import asyncio import logging from ast import literal_eval from functools import partial import homeassistant.helpers.config_validation as cv import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_NAME, CONF_TOKEN from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.entity import Entity from miio import Device, DeviceException _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "Xiaomi Miio Device" DATA_KEY = "sensor.xiaomi_miio_raw" DOMAIN = "xiaomi_miio_raw" CONF_SENSOR_PROPERTY = "sensor_property" CONF_SENSOR_UNIT = "sensor_unit" CONF_DEFAULT_PROPERTIES = "default_properties" CONF_DEFAULT_PROPERTIES_GETTER = "default_properties_getter" CONF_MAX_PROPERTIES = "max_properties" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_SENSOR_PROPERTY): cv.string, vol.Optional(CONF_SENSOR_UNIT): cv.string, vol.Optional(CONF_DEFAULT_PROPERTIES_GETTER, default="get_prop"): cv.string, vol.Optional(CONF_DEFAULT_PROPERTIES, default=["power"]): vol.All( cv.ensure_list, [cv.string] ), vol.Optional(CONF_MAX_PROPERTIES, default=15): cv.positive_int, } ) ATTR_MODEL = "model" ATTR_FIRMWARE_VERSION = "firmware_version" ATTR_HARDWARE_VERSION = "hardware_version" ATTR_PROPERTIES = "properties" ATTR_SENSOR_PROPERTY = "sensor_property" ATTR_METHOD = "method" ATTR_PARAMS = "params" CMD_GET_PROPERTIES = "get_properties" SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids}) SERVICE_SCHEMA_SET_PROPERTIES = SERVICE_SCHEMA.extend( { vol.Optional(ATTR_PROPERTIES, default=["power"]): vol.All( cv.ensure_list, [cv.string] ) } ) SERVICE_SCHEMA_COMMAND = SERVICE_SCHEMA.extend( { vol.Required(ATTR_METHOD): cv.string, vol.Optional(ATTR_PARAMS, default=[]): vol.All(cv.ensure_list), } ) SERVICE_CUSTOM_TURN_ON = "sensor_turn_on" SERVICE_CUSTOM_TURN_OFF = "sensor_turn_off" SERVICE_SET_PROPERTIES = "sensor_set_properties" SERVICE_COMMAND = "sensor_raw_command" SERVICE_TO_METHOD = { SERVICE_CUSTOM_TURN_ON: {"method": "async_turn_on"}, SERVICE_CUSTOM_TURN_OFF: {"method": "async_turn_off"}, SERVICE_SET_PROPERTIES: { "method": "async_set_properties", "schema": SERVICE_SCHEMA_SET_PROPERTIES, }, SERVICE_COMMAND: {"method": "async_command", "schema": SERVICE_SCHEMA_COMMAND}, } @asyncio.coroutine
Apache License 2.0
linkedin/luminol
src/luminol/correlator.py
Correlator.is_correlated
python
def is_correlated(self, threshold=0): return self.correlation_result if self.correlation_result.coefficient >= threshold else False
Compare with a threshold to determine whether two timeseries correlate to each other. :return: a CorrelationResult object if two time series correlate otherwise false.
https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/correlator.py#L113-L118
from luminol import exceptions, utils from luminol.algorithms.correlator_algorithms.all import correlator_algorithms from luminol.anomaly_detector import AnomalyDetector from luminol.constants import CORRELATOR_ALGORITHM from luminol.modules.time_series import TimeSeries class Correlator(object): def __init__(self, time_series_a, time_series_b, time_period=None, use_anomaly_score=False, algorithm_name=None, algorithm_params=None): self.time_series_a = self._load(time_series_a) self.time_series_b = self._load(time_series_b) if use_anomaly_score: self.time_series_a = self._get_anomaly_scores(self.time_series_a) self.time_series_b = self._get_anomaly_scores(self.time_series_b) if time_period: start_p, end_p = time_period try: self.time_series_a = self.time_series_a.crop(start_p, end_p) self.time_series_b = self.time_series_b.crop(start_p, end_p) except ValueError: raise exceptions.NotEnoughDataPoints self._sanity_check() self.algorithm_params = {'time_series_a': self.time_series_a, 'time_series_b': self.time_series_b} self._get_algorithm_and_params(algorithm_name, algorithm_params) self._correlate() def _get_anomaly_scores(self, time_series): return AnomalyDetector(time_series, score_only=True).get_all_scores() def _load(self, time_series): if isinstance(time_series, TimeSeries): return time_series if isinstance(time_series, dict): return TimeSeries(time_series) return TimeSeries(utils.read_csv(time_series)) def _get_algorithm_and_params(self, algorithm_name, algorithm_params): algorithm_name = algorithm_name or CORRELATOR_ALGORITHM try: self.algorithm = correlator_algorithms[algorithm_name] except KeyError: raise exceptions.AlgorithmNotFound('luminol.Correlator: ' + str(algorithm_name) + ' not found.') if algorithm_params: if not isinstance(algorithm_params, dict): raise exceptions.InvalidDataFormat('luminol.Correlator: algorithm_params passed is not a dictionary.') else: self.algorithm_params = self.algorithm_params.copy() self.algorithm_params.update(algorithm_params) def _sanity_check(self): if len(self.time_series_a) < 2 or len(self.time_series_b) < 2: raise exceptions.NotEnoughDataPoints('luminol.Correlator: Too few data points!') def _correlate(self): a = self.algorithm(**self.algorithm_params) self.correlation_result = a.run() def get_correlation_result(self): return self.correlation_result
Apache License 2.0
kuri65536/python-for-android
python-modules/twisted/twisted/words/protocols/irc.py
ServerSupportedFeatures.isupport_TOPICLEN
python
def isupport_TOPICLEN(self, params): return _intOrDefault(params[0])
Maximum length of a topic that may be set.
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/words/protocols/irc.py#L962-L966
import errno, os, random, re, stat, struct, sys, time, types, traceback import string, socket import warnings import textwrap from os import path from twisted.internet import reactor, protocol from twisted.persisted import styles from twisted.protocols import basic from twisted.python import log, reflect, text NUL = chr(0) CR = chr(015) NL = chr(012) LF = NL SPC = chr(040) MAX_COMMAND_LENGTH = 512 CHANNEL_PREFIXES = '&#!+' class IRCBadMessage(Exception): pass class IRCPasswordMismatch(Exception): pass class IRCBadModes(ValueError): def parsemsg(s): prefix = '' trailing = [] if not s: raise IRCBadMessage("Empty line.") if s[0] == ':': prefix, s = s[1:].split(' ', 1) if s.find(' :') != -1: s, trailing = s.split(' :', 1) args = s.split() args.append(trailing) else: args = s.split() command = args.pop(0) return prefix, command, args def split(str, length=80): return [chunk for line in str.split('\n') for chunk in textwrap.wrap(line, length)] def _intOrDefault(value, default=None): if value: try: return int(value) except (TypeError, ValueError): pass return default class UnhandledCommand(RuntimeError): class _CommandDispatcherMixin(object): prefix = None def dispatch(self, commandName, *args): def _getMethodName(command): return '%s_%s' % (self.prefix, command) def _getMethod(name): return getattr(self, _getMethodName(name), None) method = _getMethod(commandName) if method is not None: return method(*args) method = _getMethod('unknown') if method is None: raise UnhandledCommand("No handler for %r could be found" % (_getMethodName(commandName),)) return method(commandName, *args) def parseModes(modes, params, paramModes=('', '')): if len(modes) == 0: raise IRCBadModes('Empty mode string') if modes[0] not in '+-': raise IRCBadModes('Malformed modes string: %r' % (modes,)) changes = ([], []) direction = None count = -1 for ch in modes: if ch in '+-': if count == 0: raise IRCBadModes('Empty mode sequence: %r' % (modes,)) direction = '+-'.index(ch) count = 0 else: param = None if ch in paramModes[direction]: try: param = params.pop(0) except IndexError: raise IRCBadModes('Not enough parameters: %r' % (ch,)) changes[direction].append((ch, param)) count += 1 if len(params) > 0: raise IRCBadModes('Too many parameters: %r %r' % (modes, params)) if count == 0: raise IRCBadModes('Empty mode sequence: %r' % (modes,)) return changes class IRC(protocol.Protocol): buffer = "" hostname = None encoding = None def connectionMade(self): self.channels = [] if self.hostname is None: self.hostname = socket.getfqdn() def sendLine(self, line): if self.encoding is not None: if isinstance(line, unicode): line = line.encode(self.encoding) self.transport.write("%s%s%s" % (line, CR, LF)) def sendMessage(self, command, *parameter_list, **prefix): if not command: raise ValueError, "IRC message requires a command." if ' ' in command or command[0] == ':': raise ValueError, "Somebody screwed up, 'cuz this doesn't" " look like a command to me: %s" % command line = string.join([command] + list(parameter_list)) if prefix.has_key('prefix'): line = ":%s %s" % (prefix['prefix'], line) self.sendLine(line) if len(parameter_list) > 15: log.msg("Message has %d parameters (RFC allows 15):\n%s" % (len(parameter_list), line)) def dataReceived(self, data): lines = (self.buffer + data).split(LF) self.buffer = lines.pop() for line in lines: if len(line) <= 2: continue if line[-1] == CR: line = line[:-1] prefix, command, params = parsemsg(line) command = command.upper() self.handleCommand(command, prefix, params) def handleCommand(self, command, prefix, params): method = getattr(self, "irc_%s" % command, None) try: if method is not None: method(prefix, params) else: self.irc_unknown(prefix, command, params) except: log.deferr() def irc_unknown(self, prefix, command, params): raise NotImplementedError(command, prefix, params) def privmsg(self, sender, recip, message): self.sendLine(":%s PRIVMSG %s :%s" % (sender, recip, lowQuote(message))) def notice(self, sender, recip, message): self.sendLine(":%s NOTICE %s :%s" % (sender, recip, message)) def action(self, sender, recip, message): self.sendLine(":%s ACTION %s :%s" % (sender, recip, message)) def topic(self, user, channel, topic, author=None): if author is None: if topic is None: self.sendLine(':%s %s %s %s :%s' % ( self.hostname, RPL_NOTOPIC, user, channel, 'No topic is set.')) else: self.sendLine(":%s %s %s %s :%s" % ( self.hostname, RPL_TOPIC, user, channel, lowQuote(topic))) else: self.sendLine(":%s TOPIC %s :%s" % (author, channel, lowQuote(topic))) def topicAuthor(self, user, channel, author, date): self.sendLine(':%s %d %s %s %s %d' % ( self.hostname, 333, user, channel, author, date)) def names(self, user, channel, names): prefixLength = len(channel) + len(user) + 10 namesLength = 512 - prefixLength L = [] count = 0 for n in names: if count + len(n) + 1 > namesLength: self.sendLine(":%s %s %s = %s :%s" % ( self.hostname, RPL_NAMREPLY, user, channel, ' '.join(L))) L = [n] count = len(n) else: L.append(n) count += len(n) + 1 if L: self.sendLine(":%s %s %s = %s :%s" % ( self.hostname, RPL_NAMREPLY, user, channel, ' '.join(L))) self.sendLine(":%s %s %s %s :End of /NAMES list" % ( self.hostname, RPL_ENDOFNAMES, user, channel)) def who(self, user, channel, memberInfo): for info in memberInfo: (username, hostmask, server, nickname, flag, hops, realName) = info assert flag in ("H", "G") self.sendLine(":%s %s %s %s %s %s %s %s %s :%d %s" % ( self.hostname, RPL_WHOREPLY, user, channel, username, hostmask, server, nickname, flag, hops, realName)) self.sendLine(":%s %s %s %s :End of /WHO list." % ( self.hostname, RPL_ENDOFWHO, user, channel)) def whois(self, user, nick, username, hostname, realName, server, serverInfo, oper, idle, signOn, channels): self.sendLine(":%s %s %s %s %s %s * :%s" % ( self.hostname, RPL_WHOISUSER, user, nick, username, hostname, realName)) self.sendLine(":%s %s %s %s %s :%s" % ( self.hostname, RPL_WHOISSERVER, user, nick, server, serverInfo)) if oper: self.sendLine(":%s %s %s %s :is an IRC operator" % ( self.hostname, RPL_WHOISOPERATOR, user, nick)) self.sendLine(":%s %s %s %s %d %d :seconds idle, signon time" % ( self.hostname, RPL_WHOISIDLE, user, nick, idle, signOn)) self.sendLine(":%s %s %s %s :%s" % ( self.hostname, RPL_WHOISCHANNELS, user, nick, ' '.join(channels))) self.sendLine(":%s %s %s %s :End of WHOIS list." % ( self.hostname, RPL_ENDOFWHOIS, user, nick)) def join(self, who, where): self.sendLine(":%s JOIN %s" % (who, where)) def part(self, who, where, reason=None): if reason: self.sendLine(":%s PART %s :%s" % (who, where, reason)) else: self.sendLine(":%s PART %s" % (who, where)) def channelMode(self, user, channel, mode, *args): self.sendLine(":%s %s %s %s %s %s" % ( self.hostname, RPL_CHANNELMODEIS, user, channel, mode, ' '.join(args))) class ServerSupportedFeatures(_CommandDispatcherMixin): prefix = 'isupport' def __init__(self): self._features = { 'CHANNELLEN': 200, 'CHANTYPES': tuple('#&'), 'MODES': 3, 'NICKLEN': 9, 'PREFIX': self._parsePrefixParam('(ovh)@+%'), 'CHANMODES': self._parseChanModesParam(['b', '', 'lk'])} def _splitParamArgs(cls, params, valueProcessor=None): if valueProcessor is None: valueProcessor = lambda x: x def _parse(): for param in params: if ':' not in param: param += ':' a, b = param.split(':', 1) yield a, valueProcessor(b) return list(_parse()) _splitParamArgs = classmethod(_splitParamArgs) def _unescapeParamValue(cls, value): def _unescape(): parts = value.split('\\x') yield parts.pop(0) for s in parts: octet, rest = s[:2], s[2:] try: octet = int(octet, 16) except ValueError: raise ValueError('Invalid hex octet: %r' % (octet,)) yield chr(octet) + rest if '\\x' not in value: return value return ''.join(_unescape()) _unescapeParamValue = classmethod(_unescapeParamValue) def _splitParam(cls, param): if '=' not in param: param += '=' key, value = param.split('=', 1) return key, map(cls._unescapeParamValue, value.split(',')) _splitParam = classmethod(_splitParam) def _parsePrefixParam(cls, prefix): if not prefix: return None if prefix[0] != '(' and ')' not in prefix: raise ValueError('Malformed PREFIX parameter') modes, symbols = prefix.split(')', 1) symbols = zip(symbols, xrange(len(symbols))) modes = modes[1:] return dict(zip(modes, symbols)) _parsePrefixParam = classmethod(_parsePrefixParam) def _parseChanModesParam(self, params): names = ('addressModes', 'param', 'setParam', 'noParam') if len(params) > len(names): raise ValueError( 'Expecting a maximum of %d channel mode parameters, got %d' % ( len(names), len(params))) items = map(lambda key, value: (key, value or ''), names, params) return dict(items) _parseChanModesParam = classmethod(_parseChanModesParam) def getFeature(self, feature, default=None): return self._features.get(feature, default) def hasFeature(self, feature): return self.getFeature(feature) is not None def parse(self, params): for param in params: key, value = self._splitParam(param) if key.startswith('-'): self._features.pop(key[1:], None) else: self._features[key] = self.dispatch(key, value) def isupport_unknown(self, command, params): return tuple(params) def isupport_CHANLIMIT(self, params): return self._splitParamArgs(params, _intOrDefault) def isupport_CHANMODES(self, params): try: return self._parseChanModesParam(params) except ValueError: return self.getFeature('CHANMODES') def isupport_CHANNELLEN(self, params): return _intOrDefault(params[0], self.getFeature('CHANNELLEN')) def isupport_CHANTYPES(self, params): return tuple(params[0]) def isupport_EXCEPTS(self, params): return params[0] or 'e' def isupport_IDCHAN(self, params): return self._splitParamArgs(params) def isupport_INVEX(self, params): return params[0] or 'I' def isupport_KICKLEN(self, params): return _intOrDefault(params[0]) def isupport_MAXLIST(self, params): return self._splitParamArgs(params, _intOrDefault) def isupport_MODES(self, params): return _intOrDefault(params[0]) def isupport_NETWORK(self, params): return params[0] def isupport_NICKLEN(self, params): return _intOrDefault(params[0], self.getFeature('NICKLEN')) def isupport_PREFIX(self, params): try: return self._parsePrefixParam(params[0]) except ValueError: return self.getFeature('PREFIX') def isupport_SAFELIST(self, params): return True def isupport_STATUSMSG(self, params): return params[0] def isupport_TARGMAX(self, params): return dict(self._splitParamArgs(params, _intOrDefault))
Apache License 2.0