repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
gabstopper/smc-python
smc/core/route.py
Routing.ospf_areas
python
def ospf_areas(self): return gateway_by_type(self, 'ospfv2_area')
OSPFv2 areas applied to a routing node. This can be called from the engine, interface or network level. Return is a tuple of (interface, network, bgp_peering). This simplifies viewing and removing BGP Peers from the routing table:: >>> for ospf in engine.routing.ospf_areas: ... ospf ... (Routing(name=Interface 0,level=interface,type=physical_interface), Routing(name=network-1.1.1.0/24,level=network,type=network), Routing(name=area10,level=gateway,type=ospfv2_area)) .. seealso:: :meth:`~bgp_peerings` and :meth:`~netlinks` for obtaining other routing element types :rtype: tuple(Routing)
https://github.com/gabstopper/smc-python/blob/54386c8a710727cc1acf69334a57b155d2f5408c/smc/core/route.py#L370-L389
import collections from smc.base.model import SubElement, Element, ElementCache from smc.base.util import element_resolver from smc.api.exceptions import InterfaceNotFound, ModificationAborted from smc.base.structs import SerializedIterable def flush_parent_cache(node): if node._parent is None: node._del_cache() return node._del_cache() flush_parent_cache(node._parent) class RoutingTree(SubElement): def __init__(self, data=None, **meta): super(RoutingTree, self).__init__(**meta) if data is not None: self.data = ElementCache(data) def __iter__(self): for node in self.data[self.typeof]: data = ElementCache(node) yield(self.__class__( href=data.get_link('self'), type=self.__class__.__name__, data=node, parent=self)) @property def name(self): return self.data.get('name') @property def nicid(self): return self.data.get('nic_id') @property def dynamic_nicid(self): return self.data.get('dynamic_nicid') @property def ip(self): return self.data.get('ip') @property def level(self): return self.data.get('level') @property def related_element_type(self): if 'related_element_type' in self.data: return self.data.get('related_element_type') return None if self.dynamic_nicid or (self.nicid and '.' in self.nicid) else Element.from_href(self.data.get('href')).typeof def as_tree(self, level=0): ret = '--' * level + repr(self) + '\n' for routing_node in self: ret += routing_node.as_tree(level+1) return ret def get(self, interface_id): for interface in self: if interface.nicid == str(interface_id) or interface.dynamic_nicid == str(interface_id): return interface raise InterfaceNotFound('Specified interface {} does not exist on ' 'this engine.'.format(interface_id)) def delete(self): super(RoutingTree, self).delete() flush_parent_cache(self._parent) def update(self): super(RoutingTree, self).update() flush_parent_cache(self._parent) def all(self): return [node for node in self] def __str__(self): return '{}(name={},level={},type={})'.format( self.__class__.__name__, self.name, self.level, self.related_element_type) def __repr__(self): return str(self) class Routing(RoutingTree): typeof = 'routing_node' def __init__(self, data=None, **meta): self._parent = meta.pop('parent', None) super(Routing, self).__init__(data, **meta) @property def routing_node_element(self): return from_meta(self) @property def bgp_peerings(self): return gateway_by_type(self, 'bgp_peering') @property def netlinks(self): return gateway_by_type(self, 'netlink') @property
Apache License 2.0
blacklight/platypush
platypush/plugins/camera/ir/mlx90640/__init__.py
CameraIrMlx90640Plugin.__init__
python
def __init__(self, rawrgb_path: Optional[str] = None, resolution: Tuple[int, int] = (32, 24), warmup_frames: Optional[int] = 5, **kwargs): super().__init__(device='mlx90640', resolution=resolution, warmup_frames=warmup_frames, **kwargs) if not rawrgb_path: rawrgb_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib', 'examples', 'rawrgb') rawrgb_path = os.path.abspath(os.path.expanduser(rawrgb_path)) assert os.path.isfile(rawrgb_path), 'rawrgb executable not found. Please follow the documentation of this plugin to build it' self.rawrgb_path = rawrgb_path self._capture_proc = None
:param rawrgb_path: Specify it if the rawrgb executable compiled from https://github.com/pimoroni/mlx90640-library is in another folder than `<directory of this file>/lib/examples`. :param resolution: Device resolution (default: 32x24). :param warmup_frames: Number of frames to be skipped on sensor initialization/warmup (default: 2). :param kwargs: Extra parameters to be passed to :class:`platypush.plugins.camera.CameraPlugin`.
https://github.com/blacklight/platypush/blob/a5f1dc2638d7c6308325e0ca39dc7d5e262836aa/platypush/plugins/camera/ir/mlx90640/__init__.py#L35-L55
import os import subprocess from typing import Optional, Tuple from platypush.plugins import action from platypush.plugins.camera import CameraPlugin, Camera class CameraIrMlx90640Plugin(CameraPlugin):
MIT License
xiaoxiae/grafatko
examples/dfs.py
dfs
python
def dfs(graph: DrawableGraph): selected: Set[DrawableNode] = graph.get_selected_nodes() assert not graph.is_weighted(), "Graph mustn't be weighted." assert len(selected) != 0, "Some nodes must be selected." state: Dict[DrawableNode, State] = {} graph.set_default_animation_duration(300) for n in graph.get_nodes(): state[n] = State.open if n in selected else State.unexplored graph.change_color(n, state[n].value, parallel=True) for node in selected: __dfs(node, graph, state)
A DFS implementation.
https://github.com/xiaoxiae/grafatko/blob/11f09c15567e34e37aee07d8356af25b679fb429/examples/dfs.py#L30-L49
from enum import Enum from grafatko import * class State(Enum): unexplored = Color.text() open = Color.red() closed = Color.background() current = Color.blue() def __dfs(node: DrawableNode, graph: DrawableGraph, state): for adjacent in node.get_adjacent_nodes(): if state[adjacent] is State.unexplored: state[adjacent] = State.open graph.change_color(adjacent, State.open.value) __dfs(adjacent, graph, state) graph.change_color(node, State.closed.value) state[node] = State.closed
MIT License
hazyresearch/butterfly
butterfly/permutation.py
Permutation.forward
python
def forward(self, input): prob = torch.sigmoid(self.logit) if self.share_logit: m = int(math.ceil(math.log2(self.size))) prob = prob.unsqueeze(0).expand(m - 1, 3) return permutation_mult(prob, input, increasing_stride=self.increasing_stride)
Parameters: input: (batch, size) if real or (batch, size, 2) if complex Return: output: (batch, size) if real or (batch, size, 2) if complex
https://github.com/hazyresearch/butterfly/blob/7217b5d93bc78e1229fed3761bcc70d943f604b7/butterfly/permutation.py#L28-L39
import math import torch from torch import nn from .permutation_multiply import permutation_mult, permutation_mult_single class Permutation(nn.Module): def __init__(self, size, share_logit=False, increasing_stride=False): super().__init__() self.size = size m = int(math.ceil(math.log2(size))) assert size == 1 << m, "size must be a power of 2" self.share_logit = share_logit self.increasing_stride = increasing_stride self.logit = nn.Parameter(torch.randn(3)) if share_logit else nn.Parameter(torch.randn(m - 1, 3))
Apache License 2.0
docusign/code-examples-python
app/admin/examples/eg005_audit_users/views.py
audit_users
python
def audit_users(): args = Eg005Controller.get_args() try: results = Eg005Controller.worker(args) current_app.logger.info(f"""Auditing users""") except ApiException as err: return process_error(err) return render_template( "example_done.html", title="Audit users", h1="Audit users", message="Results from eSignUserManagement:getUserProfiles method:", json=json.dumps(json.dumps(results, default=str)) )
1. Get required arguments 2. Call the worker method 3. Render the response
https://github.com/docusign/code-examples-python/blob/1e6ca12f6304d01e573a138e103028c23155196a/app/admin/examples/eg005_audit_users/views.py#L19-L41
import json from os import path from docusign_admin.client.api_exception import ApiException from flask import Blueprint, render_template, current_app from app.docusign import authenticate from app.error_handlers import process_error from .controller import Eg005Controller from ....ds_config import DS_CONFIG eg = "eg005" eg005 = Blueprint(eg, __name__) @eg005.route("/eg005", methods=["POST"]) @authenticate(eg=eg)
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/extensions_v1beta1_api.py
ExtensionsV1beta1Api.delete_namespaced_ingress
python
def delete_namespaced_ingress(self, name, namespace, **kwargs): kwargs['_return_http_data_only'] = True return self.delete_namespaced_ingress_with_http_info(name, namespace, **kwargs)
delete_namespaced_ingress # noqa: E501 delete an Ingress # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_ingress(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str name: name of the Ingress (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1Status If the method is called asynchronously, returns the request thread.
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/api/extensions_v1beta1_api.py#L351-L381
from __future__ import absolute_import import re import six from kubernetes_asyncio.client.api_client import ApiClient from kubernetes_asyncio.client.exceptions import ( ApiTypeError, ApiValueError ) class ExtensionsV1beta1Api(object): def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_ingress(self, namespace, body, **kwargs): kwargs['_return_http_data_only'] = True return self.create_namespaced_ingress_with_http_info(namespace, body, **kwargs) def create_namespaced_ingress_with_http_info(self, namespace, body, **kwargs): local_var_params = locals() all_params = [ 'namespace', 'body', 'pretty', 'dry_run', 'field_manager' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_ingress" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('namespace' not in local_var_params or local_var_params['namespace'] is None): raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_ingress`") if self.api_client.client_side_validation and ('body' not in local_var_params or local_var_params['body'] is None): raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_ingress`") collection_formats = {} path_params = {} if 'namespace' in local_var_params: path_params['namespace'] = local_var_params['namespace'] query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: query_params.append(('dryRun', local_var_params['dry_run'])) if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: query_params.append(('fieldManager', local_var_params['field_manager'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ExtensionsV1beta1Ingress', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_ingress(self, namespace, **kwargs): kwargs['_return_http_data_only'] = True return self.delete_collection_namespaced_ingress_with_http_info(namespace, **kwargs) def delete_collection_namespaced_ingress_with_http_info(self, namespace, **kwargs): local_var_params = locals() all_params = [ 'namespace', 'pretty', '_continue', 'dry_run', 'field_selector', 'grace_period_seconds', 'label_selector', 'limit', 'orphan_dependents', 'propagation_policy', 'resource_version', 'timeout_seconds', 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_ingress" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('namespace' not in local_var_params or local_var_params['namespace'] is None): raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_ingress`") collection_formats = {} path_params = {} if 'namespace' in local_var_params: path_params['namespace'] = local_var_params['namespace'] query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if '_continue' in local_var_params and local_var_params['_continue'] is not None: query_params.append(('continue', local_var_params['_continue'])) if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: query_params.append(('dryRun', local_var_params['dry_run'])) if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: query_params.append(('fieldSelector', local_var_params['field_selector'])) if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: query_params.append(('labelSelector', local_var_params['label_selector'])) if 'limit' in local_var_params and local_var_params['limit'] is not None: query_params.append(('limit', local_var_params['limit'])) if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: query_params.append(('resourceVersion', local_var_params['resource_version'])) if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
Apache License 2.0
helios-protocol/py-helios-node
helios/utils/datastructures.py
TaskQueue.__len__
python
def __len__(self) -> int: return len(self._tasks)
How many tasks are queued for completion
https://github.com/helios-protocol/py-helios-node/blob/691b378938f0a36bf8774dc1ee4e4370b6cf7c63/helios/utils/datastructures.py#L274-L276
from asyncio import ( AbstractEventLoop, Lock, PriorityQueue, Queue, QueueFull, ) from collections import defaultdict from enum import Enum from functools import total_ordering from itertools import count from typing import ( Any, Callable, Dict, Generator, Generic, Iterable, Set, Tuple, Type, TypeVar, ) from eth_utils import ( ValidationError, to_tuple, ) from eth_utils.toolz import ( concat, identity, ) from helios.utils.queues import ( queue_get_batch, queue_get_nowait, ) TFunc = TypeVar('TFunc') TPrerequisite = TypeVar('TPrerequisite', bound=Enum) TTask = TypeVar('TTask') TTaskID = TypeVar('TTaskID') class StaticMethod(Generic[TFunc]): def __get__(self, oself: Any, owner: Any) -> TFunc: return self._func def __set__(self, oself: Any, value: TFunc) -> None: self._func = value @total_ordering class SortableTask(Generic[TTask]): _order_fn: StaticMethod[Callable[[TTask], Any]] = None @classmethod def orderable_by_func(cls, order_fn: Callable[[TTask], Any]) -> 'Type[SortableTask[TTask]]': return type('PredefinedSortableTask', (cls, ), dict(_order_fn=staticmethod(order_fn))) def __init__(self, task: TTask) -> None: if self._order_fn is None: raise ValidationError("Must create this class with orderable_by_func before init") self._task = task _comparable_val = self._order_fn(task) try: self_equal = _comparable_val == _comparable_val self_lt = _comparable_val < _comparable_val self_gt = _comparable_val > _comparable_val if not self_equal or self_lt or self_gt: raise ValidationError( "The orderable function provided a comparable value that does not compare" f"validly to itself: equal to self? {self_equal}, less than self? {self_lt}, " f"greater than self? {self_gt}" ) except TypeError as exc: raise ValidationError( f"The provided order_fn {self._order_fn!r} did not return a sortable " f"value from {task!r}" ) from exc self._comparable_val = _comparable_val @property def original(self) -> TTask: return self._task def __eq__(self, other: Any) -> bool: if not isinstance(other, SortableTask): return False else: return self._comparable_val == other._comparable_val def __lt__(self, other: Any) -> bool: if not isinstance(other, SortableTask): return False else: return self._comparable_val < other._comparable_val class TaskQueue(Generic[TTask]): _task_wrapper: Type[SortableTask[TTask]] _in_progress: Dict[int, Tuple[TTask, ...]] _open_queue: 'PriorityQueue[SortableTask[TTask]]' _tasks: Set[TTask] def __init__( self, maxsize: int = 0, order_fn: Callable[[TTask], Any] = identity, *, loop: AbstractEventLoop = None) -> None: self._maxsize = maxsize self._full_lock = Lock(loop=loop) self._open_queue = PriorityQueue(maxsize, loop=loop) self._task_wrapper = SortableTask.orderable_by_func(order_fn) self._id_generator = count() self._tasks = set() self._in_progress = {} async def add(self, tasks: Tuple[TTask, ...]) -> None: if not isinstance(tasks, tuple): raise ValidationError(f"must pass a tuple of tasks to add(), but got {tasks!r}") already_pending = self._tasks.intersection(tasks) if already_pending: raise ValidationError( f"Duplicate tasks detected: {already_pending!r} are already present in the queue" ) remaining = tuple(sorted(map(self._task_wrapper, tasks))) while remaining: num_tasks = len(self._tasks) if self._maxsize <= 0: open_slots = len(remaining) elif num_tasks < self._maxsize: open_slots = self._maxsize - num_tasks else: await self._full_lock.acquire() num_tasks = len(self._tasks) open_slots = self._maxsize - num_tasks queueing, remaining = remaining[:open_slots], remaining[open_slots:] for task in queueing: try: self._open_queue.put_nowait(task) except QueueFull as exc: task_idx = queueing.index(task) qsize = self._open_queue.qsize() raise QueueFull( f'TaskQueue unsuccessful in adding task {task.original!r} ', f'because qsize={qsize}, ' f'num_tasks={num_tasks}, maxsize={self._maxsize}, open_slots={open_slots}, ' f'num queueing={len(queueing)}, len(_tasks)={len(self._tasks)}, task_idx=' f'{task_idx}, queuing={queueing}, original msg: {exc}', ) original_queued = tuple(task.original for task in queueing) self._tasks.update(original_queued) if self._full_lock.locked() and len(self._tasks) < self._maxsize: self._full_lock.release() def get_nowait(self, max_results: int = None) -> Tuple[int, Tuple[TTask, ...]]: if self._open_queue.empty(): raise QueueFull("No tasks are available to get") else: ranked_tasks = queue_get_nowait(self._open_queue, max_results) pending_tasks = tuple(task.original for task in ranked_tasks) next_id = next(self._id_generator) self._in_progress[next_id] = pending_tasks return (next_id, pending_tasks) async def get(self, max_results: int = None) -> Tuple[int, Tuple[TTask, ...]]: ranked_tasks = await queue_get_batch(self._open_queue, max_results) pending_tasks = tuple(task.original for task in ranked_tasks) next_id = next(self._id_generator) self._in_progress[next_id] = pending_tasks return (next_id, pending_tasks) def complete(self, batch_id: int, completed: Tuple[TTask, ...]) -> None: if batch_id not in self._in_progress: raise ValidationError(f"batch id {batch_id} not recognized, with tasks {completed!r}") attempted = self._in_progress.pop(batch_id) unrecognized_tasks = set(completed).difference(attempted) if unrecognized_tasks: self._in_progress[batch_id] = attempted raise ValidationError( f"cannot complete tasks {unrecognized_tasks!r} in this batch, only {attempted!r}" ) incomplete = set(attempted).difference(completed) for task in incomplete: self._open_queue.put_nowait(self._task_wrapper(task)) self._tasks.difference_update(completed) if self._full_lock.locked() and len(self._tasks) < self._maxsize: self._full_lock.release() def num_in_progress(self) -> int: return len(self._tasks) - self._open_queue.qsize()
MIT License
tobgu/pyrsistent
pyrsistent/_pvector.py
v
python
def v(*elements): return pvector(elements)
Create a new persistent vector containing all parameters to this function. >>> v1 = v(1, 2, 3) >>> v1 pvector([1, 2, 3])
https://github.com/tobgu/pyrsistent/blob/cb5d8c1173bf59fac513cd5a29a7ef99ee756558/pyrsistent/_pvector.py#L703-L711
from abc import abstractmethod, ABCMeta from collections.abc import Sequence, Hashable from numbers import Integral import operator from pyrsistent._transformations import transform def _bitcount(val): return bin(val).count("1") BRANCH_FACTOR = 32 BIT_MASK = BRANCH_FACTOR - 1 SHIFT = _bitcount(BIT_MASK) def compare_pvector(v, other, operator): return operator(v.tolist(), other.tolist() if isinstance(other, PVector) else other) def _index_or_slice(index, stop): if stop is None: return index return slice(index, stop) class PythonPVector(object): __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '__weakref__') def __new__(cls, count, shift, root, tail): self = super(PythonPVector, cls).__new__(cls) self._count = count self._shift = shift self._root = root self._tail = tail self._tail_offset = self._count - len(self._tail) return self def __len__(self): return self._count def __getitem__(self, index): if isinstance(index, slice): if index.start is None and index.stop is None and index.step is None: return self return _EMPTY_PVECTOR.extend(self.tolist()[index]) if index < 0: index += self._count return PythonPVector._node_for(self, index)[index & BIT_MASK] def __add__(self, other): return self.extend(other) def __repr__(self): return 'pvector({0})'.format(str(self.tolist())) def __str__(self): return self.__repr__() def __iter__(self): return iter(self.tolist()) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): return self is other or (hasattr(other, '__len__') and self._count == len(other)) and compare_pvector(self, other, operator.eq) def __gt__(self, other): return compare_pvector(self, other, operator.gt) def __lt__(self, other): return compare_pvector(self, other, operator.lt) def __ge__(self, other): return compare_pvector(self, other, operator.ge) def __le__(self, other): return compare_pvector(self, other, operator.le) def __mul__(self, times): if times <= 0 or self is _EMPTY_PVECTOR: return _EMPTY_PVECTOR if times == 1: return self return _EMPTY_PVECTOR.extend(times * self.tolist()) __rmul__ = __mul__ def _fill_list(self, node, shift, the_list): if shift: shift -= SHIFT for n in node: self._fill_list(n, shift, the_list) else: the_list.extend(node) def tolist(self): the_list = [] self._fill_list(self._root, self._shift, the_list) the_list.extend(self._tail) return the_list def _totuple(self): return tuple(self.tolist()) def __hash__(self): return hash(self._totuple()) def transform(self, *transformations): return transform(self, transformations) def __reduce__(self): return pvector, (self.tolist(),) def mset(self, *args): if len(args) % 2: raise TypeError("mset expected an even number of arguments") evolver = self.evolver() for i in range(0, len(args), 2): evolver[args[i]] = args[i+1] return evolver.persistent() class Evolver(object): __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '_dirty_nodes', '_extra_tail', '_cached_leafs', '_orig_pvector') def __init__(self, v): self._reset(v) def __getitem__(self, index): if not isinstance(index, Integral): raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) if index < 0: index += self._count + len(self._extra_tail) if self._count <= index < self._count + len(self._extra_tail): return self._extra_tail[index - self._count] return PythonPVector._node_for(self, index)[index & BIT_MASK] def _reset(self, v): self._count = v._count self._shift = v._shift self._root = v._root self._tail = v._tail self._tail_offset = v._tail_offset self._dirty_nodes = {} self._cached_leafs = {} self._extra_tail = [] self._orig_pvector = v def append(self, element): self._extra_tail.append(element) return self def extend(self, iterable): self._extra_tail.extend(iterable) return self def set(self, index, val): self[index] = val return self def __setitem__(self, index, val): if not isinstance(index, Integral): raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) if index < 0: index += self._count + len(self._extra_tail) if 0 <= index < self._count: node = self._cached_leafs.get(index >> SHIFT) if node: node[index & BIT_MASK] = val elif index >= self._tail_offset: if id(self._tail) not in self._dirty_nodes: self._tail = list(self._tail) self._dirty_nodes[id(self._tail)] = True self._cached_leafs[index >> SHIFT] = self._tail self._tail[index & BIT_MASK] = val else: self._root = self._do_set(self._shift, self._root, index, val) elif self._count <= index < self._count + len(self._extra_tail): self._extra_tail[index - self._count] = val elif index == self._count + len(self._extra_tail): self._extra_tail.append(val) else: raise IndexError("Index out of range: %s" % (index,)) def _do_set(self, level, node, i, val): if id(node) in self._dirty_nodes: ret = node else: ret = list(node) self._dirty_nodes[id(ret)] = True if level == 0: ret[i & BIT_MASK] = val self._cached_leafs[i >> SHIFT] = ret else: sub_index = (i >> level) & BIT_MASK ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val) return ret def delete(self, index): del self[index] return self def __delitem__(self, key): if self._orig_pvector: l = PythonPVector(self._count, self._shift, self._root, self._tail).tolist() l.extend(self._extra_tail) self._reset(_EMPTY_PVECTOR) self._extra_tail = l del self._extra_tail[key] def persistent(self): result = self._orig_pvector if self.is_dirty(): result = PythonPVector(self._count, self._shift, self._root, self._tail).extend(self._extra_tail) self._reset(result) return result def __len__(self): return self._count + len(self._extra_tail) def is_dirty(self): return bool(self._dirty_nodes or self._extra_tail) def evolver(self): return PythonPVector.Evolver(self) def set(self, i, val): if not isinstance(i, Integral): raise TypeError("'%s' object cannot be interpreted as an index" % type(i).__name__) if i < 0: i += self._count if 0 <= i < self._count: if i >= self._tail_offset: new_tail = list(self._tail) new_tail[i & BIT_MASK] = val return PythonPVector(self._count, self._shift, self._root, new_tail) return PythonPVector(self._count, self._shift, self._do_set(self._shift, self._root, i, val), self._tail) if i == self._count: return self.append(val) raise IndexError("Index out of range: %s" % (i,)) def _do_set(self, level, node, i, val): ret = list(node) if level == 0: ret[i & BIT_MASK] = val else: sub_index = (i >> level) & BIT_MASK ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val) return ret @staticmethod def _node_for(pvector_like, i): if 0 <= i < pvector_like._count: if i >= pvector_like._tail_offset: return pvector_like._tail node = pvector_like._root for level in range(pvector_like._shift, 0, -SHIFT): node = node[(i >> level) & BIT_MASK] return node raise IndexError("Index out of range: %s" % (i,)) def _create_new_root(self): new_shift = self._shift if (self._count >> SHIFT) > (1 << self._shift): new_root = [self._root, self._new_path(self._shift, self._tail)] new_shift += SHIFT else: new_root = self._push_tail(self._shift, self._root, self._tail) return new_root, new_shift def append(self, val): if len(self._tail) < BRANCH_FACTOR: new_tail = list(self._tail) new_tail.append(val) return PythonPVector(self._count + 1, self._shift, self._root, new_tail) new_root, new_shift = self._create_new_root() return PythonPVector(self._count + 1, new_shift, new_root, [val]) def _new_path(self, level, node): if level == 0: return node return [self._new_path(level - SHIFT, node)] def _mutating_insert_tail(self): self._root, self._shift = self._create_new_root() self._tail = [] def _mutating_fill_tail(self, offset, sequence): max_delta_len = BRANCH_FACTOR - len(self._tail) delta = sequence[offset:offset + max_delta_len] self._tail.extend(delta) delta_len = len(delta) self._count += delta_len return offset + delta_len def _mutating_extend(self, sequence): offset = 0 sequence_len = len(sequence) while offset < sequence_len: offset = self._mutating_fill_tail(offset, sequence) if len(self._tail) == BRANCH_FACTOR: self._mutating_insert_tail() self._tail_offset = self._count - len(self._tail) def extend(self, obj): l = obj.tolist() if isinstance(obj, PythonPVector) else list(obj) if l: new_vector = self.append(l[0]) new_vector._mutating_extend(l[1:]) return new_vector return self def _push_tail(self, level, parent, tail_node): ret = list(parent) if level == SHIFT: ret.append(tail_node) return ret sub_index = ((self._count - 1) >> level) & BIT_MASK if len(parent) > sub_index: ret[sub_index] = self._push_tail(level - SHIFT, parent[sub_index], tail_node) return ret ret.append(self._new_path(level - SHIFT, tail_node)) return ret def index(self, value, *args, **kwargs): return self.tolist().index(value, *args, **kwargs) def count(self, value): return self.tolist().count(value) def delete(self, index, stop=None): l = self.tolist() del l[_index_or_slice(index, stop)] return _EMPTY_PVECTOR.extend(l) def remove(self, value): l = self.tolist() l.remove(value) return _EMPTY_PVECTOR.extend(l) class PVector(metaclass=ABCMeta): @abstractmethod def __len__(self): @abstractmethod def __getitem__(self, index): @abstractmethod def __add__(self, other): @abstractmethod def __mul__(self, times): @abstractmethod def __hash__(self): @abstractmethod def evolver(self): @abstractmethod def mset(self, *args): @abstractmethod def set(self, i, val): @abstractmethod def append(self, val): @abstractmethod def extend(self, obj): @abstractmethod def index(self, value, *args, **kwargs): @abstractmethod def count(self, value): @abstractmethod def transform(self, *transformations): @abstractmethod def delete(self, index, stop=None): @abstractmethod def remove(self, value): _EMPTY_PVECTOR = PythonPVector(0, SHIFT, [], []) PVector.register(PythonPVector) Sequence.register(PVector) Hashable.register(PVector) def python_pvector(iterable=()): return _EMPTY_PVECTOR.extend(iterable) try: import os if os.environ.get('PYRSISTENT_NO_C_EXTENSION'): pvector = python_pvector else: from pvectorc import pvector PVector.register(type(pvector())) except ImportError: pvector = python_pvector
MIT License
gregorch/ipet
ipet/concepts/Editable.py
Editable.equals
python
def equals(self, other): if other is None: return False elif other.__class__ != self.__class__: return False return self.attributesToStringDict() == other.attributesToStringDict()
returns True if other is of the same class and has the same attributes as self
https://github.com/gregorch/ipet/blob/e4135ff936d3aa447a960d854f9c51554e5ba7dc/ipet/concepts/Editable.py#L56-L65
import collections class Editable: editabletypes = [float, str, int, bool] def getEditableAttributes(self): return [elem for elem in dir(self) if not elem.startswith('__') and not isinstance(getattr(self, elem), collections.Callable) and type(getattr(self, elem)) in self.editabletypes] def editAttribute(self, attributename, newvalue): if not hasattr(self, attributename): raise AttributeError("Editable has no attribute named %s" % (attributename)) try: setter_method = getattr(self, 'set_' + attributename) setter_method(newvalue) except AttributeError: setattr(self, attributename, newvalue) def checkAttributes(self): return True def attributesToDict(self): return {elem:getattr(self, elem) for elem in self.getEditableAttributes()} def attributesToStringDict(self): return {elem:str(getattr(self, elem)) for elem in self.getEditableAttributes()} def getRequiredOptionsByAttribute(self, attr): return None def getAttrDocumentation(self, attr): for line in self.__init__.__doc__.splitlines(): if line.strip().startswith(attr): return line[line.index(":") + 1:] return None
MIT License
s-gupta/visual-concepts
sg_utils.py
load_variables
python
def load_variables(pickle_file_name): if os.path.exists(pickle_file_name): with open(pickle_file_name, 'rb') as f: d = cPickle.load(f) return d else: raise Exception('{:s} does not exists.'.format(pickle_file_name))
d = load_variables(pickle_file_name) Output: d is a dictionary of variables stored in the pickle file.
https://github.com/s-gupta/visual-concepts/blob/0e223639df399dc973f8c7005ee68c228afc9784/sg_utils.py#L42-L53
import numpy as np import cPickle import heapq import os from IPython.core.debugger import Tracer import scipy.io as scio import time def tic_toc_print(interval, string): global tic_toc_print_time_old if 'tic_toc_print_time_old' not in globals(): tic_toc_print_time_old = time.time() print string else: new_time = time.time() if new_time - tic_toc_print_time_old > interval: tic_toc_print_time_old = new_time; print string def mkdir_if_missing(output_dir): if not os.path.exists(output_dir): os.makedirs(output_dir) def save_variables(pickle_file_name, var, info, overwrite = False): if os.path.exists(pickle_file_name) and overwrite == False: raise Exception('{:s} exists and over write is false.'.format(pickle_file_name)) assert(type(var) == list); assert(type(info) == list); d = {} for i in xrange(len(var)): d[info[i]] = var[i] with open(pickle_file_name, 'wb') as f: cPickle.dump(d, f, cPickle.HIGHEST_PROTOCOL)
BSD 2-Clause Simplified License
3dlg-hcvc/plan2scene
code/src/plan2scene/texture_prop/trainer/texture_prop_trainer.py
TexturePropTrainer._setup_crit
python
def _setup_crit(self): return get_crit(self.conf, self.system_conf.train)
Setup the loss function. :return: Loss function.
https://github.com/3dlg-hcvc/plan2scene/blob/cc3481f503fc096d1a50ea4fbcc668b2a3b75fb5/code/src/plan2scene/texture_prop/trainer/texture_prop_trainer.py#L100-L105
import logging from torch_geometric.data import DataLoader import os from config_parser import Config from plan2scene.common.house_parser import load_houses_with_embeddings, load_houses_with_textures, parse_houses from plan2scene.common.image_description import ImageSource from plan2scene.common.trainer.epoch_summary import EpochSummary from plan2scene.common.trainer.save_reason import SaveReason from plan2scene.crop_select.util import fill_textures from plan2scene.evaluation.evaluator import evaluate from plan2scene.evaluation.matchers import PairedMatcher, UnpairedMatcher from plan2scene.evaluation.metric_impl.substance_classifier.classifier import SubstanceClassifier from plan2scene.evaluation.metrics import FreqHistL1, HSLHistL1, ClassificationError, TileabilityMean from plan2scene.texture_gen.predictor import TextureGenPredictor from plan2scene.texture_gen.utils.io import load_conf_eval from plan2scene.texture_prop.houses_dataset import HouseDataset from plan2scene.texture_prop.trainer.abstract_trainer import AbstractTrainer from plan2scene.texture_prop.trainer.metric_description import MetricDescription, MetricResult from plan2scene.config_manager import ConfigManager import multiprocessing import os.path as osp import torch from plan2scene.texture_prop.utils import get_graph_generator, get_network, get_crit, get_optim, update_embeddings class TexturePropTrainer(AbstractTrainer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._epoch_counter = multiprocessing.Value("i", 0) self._combined_emb_dim = None self._tg_predictor = None def _setup_datasets(self) -> None: train_graph_generator = get_graph_generator(self.conf, self.system_conf.train_graph_generator, include_target=True) val_graph_generator = get_graph_generator(self.conf, self.system_conf.val_graph_generator, include_target=True) nt_graph_generator = get_graph_generator(self.conf, self.system_conf.val_graph_generator, include_target=False) self._train_dataset = HouseDataset(load_houses_with_embeddings(self.conf, data_split="train", drop_fraction="0.0", embeddings_path=osp.join(self.conf.data_paths.train_texture_prop_train_data, "surface_texture_embeddings")), graph_generator=train_graph_generator, epoch_counter=self._epoch_counter) self._train_dataloader = DataLoader(self._train_dataset, batch_size=self.system_conf.train.bs, shuffle=self.system_conf.train.shuffle_trainset, num_workers=self.num_workers) self._val_dataset = HouseDataset(load_houses_with_embeddings(self.conf, data_split="val", drop_fraction="0.0", embeddings_path=osp.join(self.conf.data_paths.train_texture_prop_val_data, "surface_texture_embeddings")), graph_generator=val_graph_generator) self._val_dataloader = DataLoader(self._val_dataset, batch_size=self.system_conf.train.bs) self._val_nt_dataset = HouseDataset(load_houses_with_embeddings(self.conf, data_split="val", drop_fraction="0.0", embeddings_path=osp.join(self.conf.data_paths.train_texture_prop_val_data, "surface_texture_embeddings")), graph_generator=nt_graph_generator) self._val_nt_dataloader = DataLoader(self._val_nt_dataset, batch_size=self.system_conf.train.bs) def _setup_extra(self) -> None: self._tg_predictor = TextureGenPredictor(conf=load_conf_eval(config_path=self.conf.texture_gen.texture_synth_conf), rgb_median_emb=self.conf.texture_gen.rgb_median_emb) self._tg_predictor.load_checkpoint(checkpoint_path=self.conf.texture_gen.checkpoint_path) self._combined_emb_dim = self.conf.texture_gen.combined_emb_dim if self.system_conf.graph_generator.include_enable_in_target: self._combined_emb_dim += 1 def _setup_metrics(self) -> list: return [ MetricDescription("color", PairedMatcher(HSLHistL1())), MetricDescription("subs", PairedMatcher(ClassificationError(SubstanceClassifier(classifier_conf=self.conf.metrics.substance_classifier)))), MetricDescription("freq", PairedMatcher(FreqHistL1())), MetricDescription("tile", UnpairedMatcher(TileabilityMean(metric_param=self.conf.metrics.tileability_mean_metric))), ] def _setup_network(self): return get_network(conf=self.conf, network_arch=self.system_conf.network_arch)
MIT License
programa-stic/barf-project
barf/arch/arm/arm.py
ArmInstruction.orig_instr
python
def orig_instr(self): return self._orig_instr
Get instruction string before parsing.
https://github.com/programa-stic/barf-project/blob/9547ef843b8eb021c2c32c140e36173c0b4eafa3/barf/arch/arm/arm.py#L411-L413
from __future__ import absolute_import from past.builtins import long from barf.arch import ARCH_ARM_MODE_ARM from barf.arch import ARCH_ARM_MODE_THUMB from barf.arch import ArchitectureInformation from barf.arch import AssemblyInstruction arm_alias_reg_map = { "a1": "r0", "a2": "r1", "a3": "r2", "a4": "r3", "v1": "r4", "v2": "r5", "v3": "r6", "v4": "r7", "v5": "r8", "v6": "r9", "v7": "r10", "v8": "r11", "sb": "r9", "sl": "r10", "fp": "r11", "ip": "r12", "sp": "r13", "lr": "r14", "pc": "r15", } ARM_COND_CODE_EQ = 0 ARM_COND_CODE_NE = 1 ARM_COND_CODE_CS = 2 ARM_COND_CODE_CC = 3 ARM_COND_CODE_MI = 4 ARM_COND_CODE_PL = 5 ARM_COND_CODE_VS = 6 ARM_COND_CODE_VC = 7 ARM_COND_CODE_HI = 8 ARM_COND_CODE_LS = 9 ARM_COND_CODE_GE = 10 ARM_COND_CODE_LT = 11 ARM_COND_CODE_GT = 12 ARM_COND_CODE_LE = 13 ARM_COND_CODE_AL = 14 ARM_COND_CODE_HS = 15 ARM_COND_CODE_LO = 16 cc_mapper = { "eq": ARM_COND_CODE_EQ, "ne": ARM_COND_CODE_NE, "cs": ARM_COND_CODE_CS, "hs": ARM_COND_CODE_HS, "cc": ARM_COND_CODE_CC, "lo": ARM_COND_CODE_LO, "mi": ARM_COND_CODE_MI, "pl": ARM_COND_CODE_PL, "vs": ARM_COND_CODE_VS, "vc": ARM_COND_CODE_VC, "hi": ARM_COND_CODE_HI, "ls": ARM_COND_CODE_LS, "ge": ARM_COND_CODE_GE, "lt": ARM_COND_CODE_LT, "gt": ARM_COND_CODE_GT, "le": ARM_COND_CODE_LE, "al": ARM_COND_CODE_AL, } cc_inverse_mapper = {v: k for k, v in cc_mapper.items()} ARM_LDM_STM_IA = 0 ARM_LDM_STM_IB = 1 ARM_LDM_STM_DA = 2 ARM_LDM_STM_DB = 3 ARM_LDM_STM_FD = 4 ARM_LDM_STM_FA = 5 ARM_LDM_STM_ED = 6 ARM_LDM_STM_EA = 7 ldm_stm_am_mapper = { "ia": ARM_LDM_STM_IA, "ib": ARM_LDM_STM_IB, "da": ARM_LDM_STM_DA, "db": ARM_LDM_STM_DB, "fd": ARM_LDM_STM_FD, "fa": ARM_LDM_STM_FA, "ed": ARM_LDM_STM_ED, "ea": ARM_LDM_STM_EA, } ldm_stm_am_inverse_mapper = {v: k for k, v in ldm_stm_am_mapper.items()} ldm_stack_am_to_non_stack_am = { ARM_LDM_STM_FA: ARM_LDM_STM_DA, ARM_LDM_STM_FD: ARM_LDM_STM_IA, ARM_LDM_STM_EA: ARM_LDM_STM_DB, ARM_LDM_STM_ED: ARM_LDM_STM_IB, } stm_stack_am_to_non_stack_am = { ARM_LDM_STM_ED: ARM_LDM_STM_DA, ARM_LDM_STM_EA: ARM_LDM_STM_IA, ARM_LDM_STM_FD: ARM_LDM_STM_DB, ARM_LDM_STM_FA: ARM_LDM_STM_IB, } ARM_MEMORY_INDEX_OFFSET = 0 ARM_MEMORY_INDEX_PRE = 1 ARM_MEMORY_INDEX_POST = 2 class ArmArchitectureInformation(ArchitectureInformation): regs_32 = [ ("r0", 32), ("r1", 32), ("r2", 32), ("r3", 32), ("r4", 32), ("r5", 32), ("r6", 32), ("r7", 32), ("r8", 32), ("r9", 32), ("r10", 32), ("r11", 32), ("r12", 32), ("r13", 32), ("r14", 32), ("r15", 32), ("apsr", 32), ] regs_32_alias = [ ("sp", 32), ("lr", 32), ("pc", 32), ("fp", 32), ] regs_flags = [ ("nf", 1), ("zf", 1), ("cf", 1), ("vf", 1), ] def __init__(self, architecture_mode): super(ArmArchitectureInformation, self).__init__() self._arch_mode = architecture_mode self._registers_all = [] self._registers_gp_all = [] self._registers_gp_base = [] self._registers_flags = [] self._registers_size = {} self._alias_mapper = {} self._load_registers() self._load_alias_mapper() @property def architecture_mode(self): return self._arch_mode @property def architecture_size(self): arch_size_map = { ARCH_ARM_MODE_ARM: 32, ARCH_ARM_MODE_THUMB: 32, } return arch_size_map[self._arch_mode] @property def operand_size(self): operand_size_map = { ARCH_ARM_MODE_ARM: 32, ARCH_ARM_MODE_THUMB: 32, } return operand_size_map[self._arch_mode] @property def address_size(self): address_size_map = { ARCH_ARM_MODE_ARM: 32, ARCH_ARM_MODE_THUMB: 32, } return address_size_map[self._arch_mode] @property def registers_all(self): return self._registers_all @property def registers_gp_all(self): return self._registers_gp_all @property def registers_gp_base(self): return self._registers_gp_base @property def registers_flags(self): return self._registers_flags @property def registers_size(self): return self._registers_size @property def alias_mapper(self): return self._alias_mapper @property def max_instruction_size(self): instruction_size_map = { ARCH_ARM_MODE_ARM: 4, ARCH_ARM_MODE_THUMB: 4, } return instruction_size_map[self._arch_mode] def instr_is_ret(self, instruction): is_ret = False if instruction.mnemonic == "pop" and ("pc" in str(instruction.operands[1]) or "r15" in str(instruction.operands[1])): is_ret = True if instruction.mnemonic == "ldr" and ("pc" in str(instruction.operands[0]) or "r15" in str(instruction.operands[0])): is_ret = True return is_ret def instr_is_call(self, instruction): return instruction.mnemonic == "bl" def instr_is_halt(self, instruction): return False def instr_is_branch(self, instruction): branch_instr = [ "bal", "bcs", "beq", "bge", "bgt", "bhs", "ble", "blo", "bls", "blt", "bne", "bpl", "bx", "b", "bhi", ] return instruction.mnemonic_full in branch_instr def instr_is_branch_cond(self, instruction): branch_instr = [ "bcs", "beq", "bge", "bgt", "bhi", "bhs", "ble", "blo", "bls", "blt", "bne", "bpl", ] return instruction.mnemonic_full in branch_instr def instr_is_syscall(self, instruction): raise NotImplementedError() def stack_pointer_register(self): return "r13" def instr_pointer_register(self): return "r15" def flags_register(self): return "apsr" def flags_default_value(self): return 0x0 def _load_alias_mapper(self): alias_mapper = { "fp": ("r11", 0), "sp": ("r13", 0), "lr": ("r14", 0), "pc": ("r15", 0), } flags_reg = "apsr" flags_mapper = { "nf": (flags_reg, 31), "zf": (flags_reg, 30), "cf": (flags_reg, 29), "vf": (flags_reg, 28), } alias_mapper.update(flags_mapper) self._alias_mapper = alias_mapper def _load_registers(self): registers_all = self.regs_flags + self.regs_32 + self.regs_32_alias registers_gp_all = self.regs_32 + self.regs_32_alias registers_gp_base = self.regs_32 for name, size in registers_all: self._registers_all.append(name) self._registers_size[name] = size for name, size in registers_gp_all: self._registers_gp_all.append(name) self._registers_size[name] = size for name, size in registers_gp_base: self._registers_gp_base.append(name) self._registers_size[name] = size self._registers_flags = [name for name, _ in self.regs_flags] def registers(self): return [] class ArmInstruction(AssemblyInstruction): __slots__ = [ '_orig_instr', '_mnemonic', '_operands', '_bytes', '_size', '_address', '_arch_mode', '_condition_code', '_update_flags', '_ldm_stm_addr_mode', ] def __init__(self, orig_instr, mnemonic, operands, arch_mode): super(ArmInstruction, self).__init__() self._orig_instr = orig_instr self._mnemonic = mnemonic self._operands = operands self._bytes = "" self._size = 4 self._address = None self._arch_mode = arch_mode self._condition_code = None self._update_flags = False self._ldm_stm_addr_mode = None @property
BSD 2-Clause Simplified License
alibaba/easytransfer
easytransfer/preprocessors/comprehension_preprocessor.py
MultiTurnComprehensionPreprocessor.convert_example_to_features
python
def convert_example_to_features(self, example): paragraph_text = convert_to_unicode(example[self.context_col_name]) question_id_list = convert_to_unicode(example[self.query_col_name]).split("||") questions = list(zip(question_id_list[::2], question_id_list[1::2])) answer_starts_list = convert_to_unicode(example[self.answer_col_name]).split("||") answers = list(zip(answer_starts_list[::2], [int(t) for t in answer_starts_list[1::2]])) if len(answers) != len(questions): assert len(questions) == len(answers) + 1, "Need put same number of history " "questions and answer." answers.append(("", -1)) is_training = False else: is_training = True doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) qas = [] for i, (question, answer) in enumerate(zip(questions, answers)): metadata = {'turn': i + 1, 'history_turns': [], 'tok_history_answer_markers': [], 'history_turns_text': []} end_index = i question_with_histories = '' start_index = 0 history_answer_marker = [] for history_turn, (each_answer, each_question) in enumerate( zip(answers[start_index: end_index], questions[start_index: end_index])): each_marker = [each_answer[1], each_answer[1] + len(each_answer[0]), each_answer[0]] history_answer_marker.append(each_marker) metadata['history_turns'].append(history_turn + start_index + 1) metadata['history_turns_text'].append((each_question[0], each_answer[0])) question_with_histories += question[0] qas.append({'id': question[1], 'question': question_with_histories, 'answers': [{'answer_start': answer[1], 'text': answer[0]}], 'history_answer_marker': history_answer_marker, 'metadata': metadata}) examples = list() for qa in qas: qas_id = qa["id"] question_text = qa["question"] if len(qa["answers"]) != 1: raise ValueError( "For training, each question should have exactly 1 answer.") answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if is_training and actual_text.find(cleaned_answer_text) == -1: tf.logging.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue tok_history_answer_marker = [0] * len(doc_tokens) for marker_index, marker in enumerate(qa['history_answer_marker']): each_tok_history_answer_marker = [0] * len(doc_tokens) history_orig_answer_text = marker[2] history_answer_offset = marker[0] history_answer_length = len(history_orig_answer_text) history_start_position = char_to_word_offset[history_answer_offset] history_end_position = char_to_word_offset[history_answer_offset + history_answer_length - 1] history_actual_text = " ".join(doc_tokens[history_start_position:(history_end_position + 1)]) history_cleaned_answer_text = " ".join(whitespace_tokenize(history_orig_answer_text)) if history_actual_text.find(history_cleaned_answer_text) != -1: tok_history_answer_marker = tok_history_answer_marker[: history_start_position] + [1] * (history_end_position - history_start_position + 1) + tok_history_answer_marker[history_end_position + 1:] each_tok_history_answer_marker = each_tok_history_answer_marker[: history_start_position] + [1] * (history_end_position - history_start_position + 1) + each_tok_history_answer_marker[history_end_position + 1:] assert len(tok_history_answer_marker) == len(doc_tokens) assert len(each_tok_history_answer_marker) == len(doc_tokens) qa['metadata']['tok_history_answer_markers'].append(each_tok_history_answer_marker) else: tf.logging.warning("Could not find history answer: '%s' vs. '%s'", history_actual_text, history_cleaned_answer_text) example = CQAExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, history_answer_marker=tok_history_answer_marker, metadata=qa['metadata']) examples.append(example) features = [] for (example_index, example) in enumerate(examples): variations = self.convert_examples_to_example_variations([example], self.max_considered_history_turns) for example in variations: metadata = example.metadata query_tokens = self.config.tokenizer.tokenize(example.question_text) if len(query_tokens) > self.max_query_length: query_tokens = query_tokens[0:self.max_query_length] history_answer_marker = example.history_answer_marker tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] all_history_answer_marker = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = self.config.tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) all_history_answer_marker.append(history_answer_marker[i]) tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, self.config.tokenizer, example.orig_answer_text) max_tokens_for_doc = self.max_seq_length - len(query_tokens) - 3 _DocSpan = collections.namedtuple( "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, self.doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): marker = [] tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") marker.append(0) segment_ids.append(0) for token in query_tokens: tokens.append(token) marker.append(0) segment_ids.append(0) tokens.append("[SEP]") marker.append(0) segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) marker.append(all_history_answer_marker[split_token_index]) segment_ids.append(1) tokens.append("[SEP]") marker.append(0) segment_ids.append(1) input_ids = self.config.tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) while len(input_ids) < self.max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) marker.append(0) assert len(input_ids) == self.max_seq_length assert len(input_mask) == self.max_seq_length assert len(segment_ids) == self.max_seq_length assert len(marker) == self.max_seq_length if is_training: doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 if (example.start_position < doc_start or example.end_position < doc_start or example.start_position > doc_end or example.end_position > doc_end): continue doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset else: start_position = -1 end_position = -1 features.append( CQAInputFeatures( unique_id=str(uuid.uuid4()), example_index=example_index, doc_span_index=doc_span_index, doc_tokens=doc_tokens, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position, history_answer_marker=marker, metadata=metadata, qas_id=example.qas_id)) return features
Convert single example to multiple input features Args: items (`dict`): inputs from the reader Returns: features (`list`): list of `CQAInputFeatures`
https://github.com/alibaba/easytransfer/blob/6909238c45b5708968f955b7d971a79b25434597/easytransfer/preprocessors/comprehension_preprocessor.py#L623-L885
import collections from copy import deepcopy import os import six import uuid import numpy as np import tensorflow as tf from .preprocessor import Preprocessor, PreprocessorConfig from .tokenization import convert_to_unicode, printable_text def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index class ComprehensionPreprocessorConfig(PreprocessorConfig): def __init__(self, **kwargs): super(ComprehensionPreprocessorConfig, self).__init__(**kwargs) self.input_schema = kwargs.get("input_schema") self.sequence_length = kwargs.get("sequence_length") self.first_sequence = kwargs.get("first_sequence") self.second_sequence = kwargs.get("second_sequence") self.label_name = kwargs.get("label_name") self.label_enumerate_values = kwargs.get("label_enumerate_values") class Example(object): def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=False): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (printable_text(self.qas_id)) s += ", question_text: %s" % ( printable_text(self.question_text)) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.start_position: s += ", end_position: %d" % (self.end_position) if self.start_position: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): def __init__(self, unique_id, qas_id, example_index, doc_span_index, doc_tokens, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.qas_id = qas_id self.example_index = example_index self.doc_span_index = doc_span_index self.doc_tokens = doc_tokens self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible class ComprehensionPreprocessor(Preprocessor): config_class = ComprehensionPreprocessorConfig def __init__(self, config, thread_num=1, **kwargs): super(ComprehensionPreprocessor, self).__init__(config, thread_num=thread_num, **kwargs) self.config = config self.max_seq_length = config.sequence_length self.context_col_name = config.first_sequence self.max_query_length = int(config.max_query_length) self.doc_stride = int(config.doc_stride) if hasattr(config, "doc_stride") else 128 self.query_col_name = config.second_sequence self.answer_col_name = config.label_name self.input_tensor_names = [] if "/" in config.pretrain_model_name_or_path: dirname = os.path.dirname(config.pretrain_model_name_or_path) self.language = dirname.split("-")[-1] else: self.language = config.pretrain_model_name_or_path.split("-")[-1] input_schema = config.input_schema self.input_tensor_names = [] for schema in input_schema.split(","): name = schema.split(":")[0] self.input_tensor_names.append(name) self.example_count = 0 def convert_example_to_features(self, items): paragraph_text = convert_to_unicode(items[self.context_col_name]) question_id_list = convert_to_unicode(items[self.query_col_name]).split("||") questions = list(zip(question_id_list[::2], question_id_list[1::2])) if self.answer_col_name in self.input_tensor_names: answer_starts_list = convert_to_unicode(items[self.answer_col_name]).split("||") answers = list(zip(answer_starts_list[::2], [int(t) for t in answer_starts_list[1::2]])) is_training = True else: answers = list() is_training = False if self.mode.startswith("predict"): is_training = False doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) examples = list() for idx, (question_text, qas_id), in enumerate(questions): start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: orig_answer_text, answer_offset = answers[idx] is_impossible = (answer_offset == -1) if not is_impossible: answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] actual_text = " ".join( doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: tf.logging.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = Example( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) features = list() for (example_index, example) in enumerate(examples): query_tokens = self.config.tokenizer.tokenize(example.question_text) if len(query_tokens) > self.max_query_length: query_tokens = query_tokens[0:self.max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = self.config.tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, self.config.tokenizer, example.orig_answer_text) max_tokens_for_doc = self.max_seq_length - len(query_tokens) - 3 _DocSpan = collections.namedtuple( "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, self.doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = self.config.tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) while len(input_ids) < self.max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == self.max_seq_length assert len(input_mask) == self.max_seq_length assert len(segment_ids) == self.max_seq_length start_position = None end_position = None if is_training and not example.is_impossible: doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and example.is_impossible: start_position = 0 end_position = 0 unique_id = str(uuid.uuid4()) if self.example_count < 20: tf.logging.info("*** Example ***") tf.logging.info("unique_id: %s" % (unique_id)) tf.logging.info("example_index: %s" % (example_index)) tf.logging.info("doc_span_index: %s" % (doc_span_index)) tf.logging.info("tokens: %s" % " ".join( [printable_text(x) for x in tokens])) tf.logging.info("token_to_orig_map: %s" % " ".join( ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) tf.logging.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) ])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and example.is_impossible: tf.logging.info("impossible example") if is_training and not example.is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) tf.logging.info("start_position: %d" % (start_position)) tf.logging.info("end_position: %d" % (end_position)) tf.logging.info( "answer: %s" % (printable_text(answer_text))) self.example_count += 1 feature = InputFeatures( unique_id=unique_id, qas_id=example.qas_id, example_index=example_index, doc_span_index=doc_span_index, doc_tokens=doc_tokens, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible) features.append(feature) return features def call(self, inputs): items = [] for name in self.input_tensor_names: items.append(inputs[name]) return items def process(self, inputs): if isinstance(inputs, dict): inputs = [inputs] all_feature_list = [] for idx, example in enumerate(inputs): feature_list = self.convert_example_to_features(example) for feature in feature_list: for key, val in example.items(): setattr(feature, key, val) all_feature_list.extend(feature_list) ret = dict() for key in all_feature_list[0].__dict__.keys(): ret[key] = list() for features in all_feature_list: ret[key].append(getattr(features, key)) for key, val in ret.items(): ret[key] = np.array(val) return ret class CQAExample(object): def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, history_answer_marker=None, metadata=None): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.history_answer_marker = history_answer_marker self.metadata = metadata class CQAInputFeatures(object): def __init__(self, qas_id, unique_id, example_index, doc_span_index, tokens, doc_tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, start_position=None, end_position=None, history_answer_marker=None, metadata=None): self.qas_id = qas_id self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.doc_tokens = doc_tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.start_position = start_position self.end_position = end_position self.history_answer_marker = history_answer_marker self.metadata = metadata class MultiTurnComprehensionPreprocessor(Preprocessor): config_class = ComprehensionPreprocessorConfig def __init__(self, config, **kwargs): super(MultiTurnComprehensionPreprocessor, self).__init__(config, **kwargs) self.config = config self.doc_stride = int(config.doc_stride) if hasattr(config, "doc_stride") else 128 self.max_seq_length = int(config.sequence_length) if hasattr(config, "sequence_length") else 384 self.max_query_length = int(config.max_query_length) if hasattr(config, "max_query_length") else 64 self.max_considered_history_turns = int(config.max_considered_history_turns) if hasattr(config, "max_considered_history_turns") else 11 self.context_col_name = config.first_sequence self.query_col_name = config.second_sequence self.answer_col_name = config.label_name if "/" in config.pretrain_model_name_or_path: dirname = os.path.dirname(config.pretrain_model_name_or_path) self.language = dirname.split("-")[-1] else: self.language = config.pretrain_model_name_or_path.split("-")[-1] self.input_tensor_names = [] input_schema = config.input_schema for schema in input_schema.split(","): name = schema.split(":")[0] self.input_tensor_names.append(name) @staticmethod def convert_examples_to_example_variations(examples, max_considered_history_turns): new_examples = [] for example in examples: if len(example.metadata['tok_history_answer_markers']) == 0: example.metadata['history_turns'] = [] new_examples.append(example) else: for history_turn, marker, history_turn_text in zip( example.metadata['history_turns'][- max_considered_history_turns:], example.metadata['tok_history_answer_markers'][- max_considered_history_turns:], example.metadata['history_turns_text'][- max_considered_history_turns:]): each_new_example = deepcopy(example) each_new_example.history_answer_marker = marker each_new_example.metadata['history_turns'] = [history_turn] each_new_example.metadata['tok_history_answer_markers'] = [marker] each_new_example.metadata['history_turns_text'] = [history_turn_text] new_examples.append(each_new_example) return new_examples
Apache License 2.0
mjbrusso/audioplayer
audioplayer/abstractaudioplayer.py
AbstractAudioPlayer._dopause
python
def _dopause(self): pass
Platform dependent code
https://github.com/mjbrusso/audioplayer/blob/1681eb9673f8cc8b26bbe646fda601c5caf3a27a/audioplayer/abstractaudioplayer.py#L117-L121
from abc import ABC, abstractmethod import os import sys class AudioPlayerError(Exception): pass class AbstractAudioPlayer(ABC): @abstractmethod def __init__(self, filename): self._player = None self._filename = filename self._volume = 100 if not os.path.sep in filename: self._fullfilename = os.path.join( os.getcwd(), filename) else: self._fullfilename = os.path.abspath(filename) if not os.path.exists(self._fullfilename): raise FileNotFoundError( 'File does not exist: "{}"'.format(self._fullfilename)) def __del__(self): if not self._player is None: self.close() @property def filename(self): return self._filename @property def fullfilename(self): return self._fullfilename @property def volume(self): return self._volume @volume.setter def volume(self, value): self._volume = max(min(value, 100), 0) if not self._player is None: self._do_setvolume(value) @abstractmethod def _do_setvolume(self, value): pass @abstractmethod def _load_player(self): pass def load_player(self): player = self._load_player() if player is None: raise AudioPlayerError( 'Error loading player for file "{}"'.format(self._fullfilename)) return player @abstractmethod def _doplay(self, loop=False, block=False): pass def play(self, loop=False, block=False): if self._player is None: self._player = self.load_player() self._do_setvolume(self._volume) self._doplay(loop, block) @abstractmethod
MIT License
google/encrypted-bigquery-client
src/query_lib.py
_WhereClause.Rewrite
python
def Rewrite(self): if not self._argument: return '' necessary_attributes = [ 'as_clause', 'schema', 'nsquare', 'master_key', 'table_id', ] self._CheckNecessaryAttributes(necessary_attributes) if not isinstance(self.as_clause, _AsClause): raise ValueError('Invalid as clause.') rewritten_argument = [copy(self._argument)] rewritten_argument = _RewritePostfixExpressions( rewritten_argument, self.as_clause.GetOriginalArgument(), self.schema, self.nsquare)[0] rewritten_argument = interpreter.RewriteSelectionCriteria( rewritten_argument, self.schema, self.master_key, self.table_id) return 'WHERE %s' % rewritten_argument
Rewrites where argument to send to BigQuery server. Returns: Rewritten where clause. Raises: ValueError: Invalid clause type or necessary argument not given.
https://github.com/google/encrypted-bigquery-client/blob/ff5ec7cd27d4c305cd039639d058a3be47c12604/src/query_lib.py#L354-L381
from copy import copy import hashlib import uuid import bigquery_client import common_util as util import ebq_crypto as ecrypto import query_interpreter as interpreter class QueryManifest(object): HASH_PREFIX = 'HP' RECORDS_WRITTEN = 'recordsWritten' def __init__(self, uuid_cls=None, hash_cls=None): self.manifest = { 'columns': {}, 'column_aliases': {}, 'statistics': { self.RECORDS_WRITTEN: None, }, } self.uuid = str(uuid_cls()) self.base_hasher = hash_cls(self.uuid) @classmethod def Generate(cls, unused_schema=None): qm = cls(uuid_cls=uuid.uuid4, hash_cls=hashlib.sha256) return qm def _SetRawColumnAlias(self, column_name, column_alias): self.manifest['column_aliases'][column_alias] = column_name self.manifest['columns'][column_name] = column_alias def _GetRawColumnName(self, column_alias): return self.manifest['column_aliases'].get(column_alias, None) def GenerateColumnAlias(self, column_name): hasher = self.base_hasher.copy() hasher.update(column_name) return '%s%s' % (self.HASH_PREFIX, hasher.hexdigest()) def GetColumnAliasForName(self, column_name, extras=None, generate=True): column_alias = self.manifest['columns'].get(column_name, None) if column_alias is None: if generate: column_alias = self.GenerateColumnAlias(column_name) if extras is None: self._SetRawColumnAlias(column_name, column_alias) else: for extra_column_name in extras: self._SetRawColumnAlias(extra_column_name, column_alias) self._SetRawColumnAlias(column_name, column_alias) return column_alias def GetColumnNameForAlias(self, column_alias): return self._GetRawColumnName(column_alias) @property def statistics(self): return self.manifest['statistics'] def __str__(self): return '[%s:%s %s]' % (self.__class__.__name__, id(self), self.manifest) class _Clause(object): _argument = None def __init__(self, argument, argument_type=None, **extra_args): super(_Clause, self).__init__() if argument_type is not None: if not isinstance(argument, argument_type): raise ValueError('Invalid argument. Expected type %s.' % argument_type) self._argument = argument for key, value in extra_args.iteritems(): setattr(self, key, value) def Rewrite(self): return '' def GetOriginalArgument(self): return self._argument def _CheckNecessaryAttributes(self, attributes): for attribute in attributes: try: getattr(self, attribute) except AttributeError: raise ValueError('Need %s attribute to rewrite.' % attribute) class _AsClause(_Clause): def __init__(self, argument, **extra_args): super(_AsClause, self).__init__(argument, dict, **extra_args) def ConstructColumnNames(self, column_names): rewritten_column_names = [] for i in range(len(column_names)): single_column = {} if i in self._argument: single_column['name'] = self._argument[i] else: single_column['name'] = str(interpreter.ToInfix(copy(column_names[i]))) rewritten_column_names.append(single_column) return rewritten_column_names class _WithinClause(_Clause): def __init__(self, argument, **extra_args): super(_WithinClause, self).__init__(argument, dict, **extra_args) class _SelectClause(_Clause): _unencrypted_queries = None _encrypted_queries = None _aggregation_queries = None _table_expressions = None def __init__(self, argument, **extra_args): super(_SelectClause, self).__init__(argument, list, **extra_args) def Rewrite(self): if not self._argument: raise ValueError('Cannot have empty select clause.') necessary_attributes = [ 'as_clause', 'within_clause', 'schema', 'nsquare', ] self._CheckNecessaryAttributes(necessary_attributes) if not isinstance(self.as_clause, _AsClause): raise ValueError('Invalid as clause.') if not isinstance(self.within_clause, _WithinClause): raise ValueError('Invalid within clause.') manifest = getattr(self, 'manifest', None) temp_argument = copy(self._argument) self._table_expressions = ( _RewritePostfixExpressions(temp_argument, self.as_clause.GetOriginalArgument(), self.schema, self.nsquare)) self._unencrypted_queries = ( _ExtractUnencryptedQueries(self._table_expressions, self.within_clause.GetOriginalArgument())) self._aggregation_queries = ( _ExtractAggregationQueries(self._table_expressions, self.within_clause.GetOriginalArgument(), self.as_clause.GetOriginalArgument())) self._encrypted_queries = _ExtractFieldQueries( self._table_expressions, self.as_clause.GetOriginalArgument(), manifest) all_queries = copy(self._aggregation_queries) all_queries.extend(self._encrypted_queries) all_queries.extend(self._unencrypted_queries) return 'SELECT %s' % ', '.join(map(str, all_queries)) def GetAggregationQueries(self): if self._aggregation_queries is None: raise ValueError('Queries have yet to be retrieved. Rewrite query first.') return self._aggregation_queries def GetEncryptedQueries(self): if self._encrypted_queries is None: raise ValueError('Queries have yet to be retrieved. Rewrite query first.') return self._encrypted_queries def GetUnencryptedQueries(self): if self._unencrypted_queries is None: raise ValueError('Queries have yet to be retrieved. Rewrite query first.') return self._unencrypted_queries def GetTableExpressions(self): if self._table_expressions is None: raise ValueError('Queries have yet to be retrieved. Rewrite query first.') return self._table_expressions class _FromClause(_Clause): def __init__(self, argument, **extra_args): super(_FromClause, self).__init__(argument, list, **extra_args) def Rewrite(self): if not self._argument: return '' return 'FROM %s' % ', '.join(self._argument) class _JoinClause(_Clause): def __init__(self, argument, **extra_args): super(_JoinClause, self).__init__(argument, list, **extra_args) def Rewrite(self): if not self._argument: return '' joins = [''] for one_join in self._argument: join_expr = copy(one_join)[1:] join_expr = interpreter.RewriteSelectionCriteria( join_expr, self.schema, self.master_key, self.table_id) join_clause = '%s ON %s' % (one_join[0], join_expr) joins.append(join_clause) return ' JOIN '.join(joins)[1:] class _HavingClause(_Clause): def __init__(self, argument, **extra_args): super(_HavingClause, self).__init__(argument, list, **extra_args) def Rewrite(self): if not self._argument: return '' necessary_attributes = [ 'as_clause', 'schema', 'nsquare', 'master_key', 'table_id', ] self._CheckNecessaryAttributes(necessary_attributes) if not isinstance(self.as_clause, _AsClause): raise ValueError('Invalid as clause.') rewritten_argument = [copy(self._argument)] rewritten_argument = _RewritePostfixExpressions( rewritten_argument, self.as_clause.GetOriginalArgument(), self.schema, self.nsquare)[0] for token in rewritten_argument: if not isinstance(token, util.AggregationQueryToken): continue if token.startswith(util.PAILLIER_SUM_PREFIX): raise bigquery_client.BigqueryInvalidQueryError( 'Cannot include SUM/AVG on homomorphic encryption in HAVING ' 'clause.', None, None, None) elif token.startswith(util.GROUP_CONCAT_PREFIX): field = token.split(util.GROUP_CONCAT_PREFIX)[1][:-1] if util.IsEncrypted(field): raise bigquery_client.BigqueryInvalidQueryError( 'Cannot include GROUP_CONCAT on encrypted field in HAVING ' 'clause.', None, None, None) rewritten_argument = interpreter.RewriteSelectionCriteria( rewritten_argument, self.schema, self.master_key, self.table_id) return 'HAVING %s' % rewritten_argument class _WhereClause(_Clause): def __init__(self, argument, **extra_args): super(_WhereClause, self).__init__(argument, list, **extra_args)
Apache License 2.0
blacktear23/py-servicebus
servicebus/pika/heartbeat.py
HeartbeatChecker._has_received_data
python
def _has_received_data(self): return not self._bytes_received == self.bytes_received_on_connection
Returns True if the connection has received data on the connection. :rtype: bool
https://github.com/blacktear23/py-servicebus/blob/c3d6ccf0b2abf131ca1060d89f3c0d4ab08481e4/servicebus/pika/heartbeat.py#L124-L130
import logging from servicebus.pika import frame LOGGER = logging.getLogger(__name__) class HeartbeatChecker(object): MAX_IDLE_COUNT = 2 _CONNECTION_FORCED = 320 _STALE_CONNECTION = "Too Many Missed Heartbeats, No reply in %i seconds" def __init__(self, connection, interval, idle_count=MAX_IDLE_COUNT): self._connection = connection self._interval = interval self._max_idle_count = idle_count self._bytes_received = 0 self._bytes_sent = 0 self._heartbeat_frames_received = 0 self._heartbeat_frames_sent = 0 self._idle_byte_intervals = 0 self._timer = None self._setup_timer() @property def active(self): return self._connection.heartbeat is self @property def bytes_received_on_connection(self): return self._connection.bytes_received @property def connection_is_idle(self): return self._idle_byte_intervals >= self._max_idle_count def received(self): LOGGER.debug('Received heartbeat frame') self._heartbeat_frames_received += 1 def send_and_check(self): LOGGER.debug('Received %i heartbeat frames, sent %i', self._heartbeat_frames_received, self._heartbeat_frames_sent) if self.connection_is_idle: return self._close_connection() if not self._has_received_data: self._idle_byte_intervals += 1 else: self._idle_byte_intervals = 0 self._update_counters() self._send_heartbeat_frame() self._start_timer() def stop(self): if self._timer: LOGGER.debug('Removing timeout for next heartbeat interval') self._connection.remove_timeout(self._timer) self._timer = None def _close_connection(self): LOGGER.debug('Connection is idle, %i stale byte intervals', self._idle_byte_intervals) duration = self._max_idle_count * self._interval text = HeartbeatChecker._STALE_CONNECTION % duration self._connection.close(HeartbeatChecker._CONNECTION_FORCED, text) self._connection._adapter_disconnect() self._connection._on_disconnect(HeartbeatChecker._CONNECTION_FORCED, text) @property
BSD 3-Clause New or Revised License
spyder-ide/qtsass
qtsass/cli.py
create_parser
python
def create_parser(): parser = argparse.ArgumentParser( prog='QtSASS', description='Compile a Qt compliant CSS file from a SASS stylesheet.', ) parser.add_argument( 'input', type=str, help='The SASS stylesheet file.', ) parser.add_argument( '-o', '--output', type=str, help='The path of the generated Qt compliant CSS file.', ) parser.add_argument( '-w', '--watch', action='store_true', help='If set, recompile when the source file changes.', ) parser.add_argument( '-d', '--debug', action='store_true', help='Set the logging level to DEBUG.', ) return parser
Create qtsass's cli parser.
https://github.com/spyder-ide/qtsass/blob/06f15194239ba430d5a9a144c1cc7c6b03e585a3/qtsass/cli.py#L38-L67
from __future__ import absolute_import, print_function import argparse import logging import os import sys import time from qtsass.api import ( compile, compile_dirname, compile_filename, enable_logging, watch, ) _log = logging.getLogger(__name__)
MIT License
cgatoxford/cgatpipelines
CGATPipelines/Pipeline/Execution.py
joinStatements
python
def joinStatements(statements, infile): prefix = getTempFilename() pattern = "%s_%%i" % prefix result = [] for x, statement in enumerate(statements): if x == 0: s = re.sub("@IN@", infile, statement) else: s = re.sub("@IN@", pattern % x, statement) s = re.sub("@OUT@", pattern % (x + 1), s).strip() if s.endswith(";"): s = s[:-1] result.append(s) assert prefix != "" result.append("rm -f %s*" % prefix) result = "; checkpoint ; ".join(result) return result
join a chain of statements into a single statement. Each statement contains an @IN@ or a @OUT@ placeholder or both. These will be replaced by the names of successive temporary files. In the first statement, @IN@ is replaced with `infile`. The last statement should move @IN@ to outfile. Arguments --------- statements : list A list of command line statements. infile : string Filename of the first data set. Returns ------- statement : string A single command line statement.
https://github.com/cgatoxford/cgatpipelines/blob/a34d460b5fc64984f6da0acb18aee43c5e02d5fc/CGATPipelines/Pipeline/Execution.py#L215-L259
import importlib import os import pickle import pipes import re import subprocess import sys import CGAT.Experiment as E import CGAT.IOTools as IOTools from CGAT.IOTools import snip as snip from CGATPipelines.Pipeline.Utils import getCallerLocals from CGATPipelines.Pipeline.Parameters import substituteParameters from CGATPipelines.Pipeline.Files import getTempFilename from CGATPipelines.Pipeline.Cluster import * try: import drmaa HAS_DRMAA = True except (ImportError, RuntimeError): HAS_DRMAA = False PARAMS = {} GLOBAL_SESSION = None def _pickle_args(args, kwargs): use_args = ["to_cluster", "logfile", "job_options", "job_queue", "job_threads", "job_memory"] submit_args = {} for arg in use_args: if arg in kwargs: submit_args[arg] = kwargs[arg] del kwargs[arg] args_file = getTempFilename(shared=True) pickle.dump([args, kwargs], open(args_file, "wb")) return (submit_args, args_file) def startSession(): global GLOBAL_SESSION GLOBAL_SESSION = drmaa.Session() GLOBAL_SESSION.initialize() return GLOBAL_SESSION def closeSession(): if GLOBAL_SESSION is not None: GLOBAL_SESSION.exit() def shellquote(statement): _quote_pos = re.compile('(?=[^-0-9a-zA-Z_./\n])') if statement: return _quote_pos.sub('\\\\', statement).replace('\n', "'\n'") else: return "''" def execute(statement, **kwargs): if not kwargs: kwargs = getCallerLocals() kwargs = dict(list(PARAMS.items()) + list(kwargs.items())) E.info("running %s" % (statement % kwargs)) if "cwd" not in kwargs: cwd = PARAMS["workingdir"] else: cwd = kwargs["cwd"] statement = " ".join(re.sub("\t+", " ", statement).split("\n")).strip() if statement.endswith(";"): statement = statement[:-1] os.environ.update({'BASH_ENV': os.path.join(os.environ['HOME'],'.bashrc')}) process = subprocess.Popen(statement % kwargs, cwd=cwd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) stdout, stderr = process.communicate() if process.returncode != 0: raise OSError( "Child was terminated by signal %i: \n" "The stderr was: \n%s\n%s\n" % (-process.returncode, stderr, statement)) return stdout, stderr def buildStatement(**kwargs): if "statement" not in kwargs: raise ValueError("'statement' not defined") local_params = substituteParameters(**kwargs) try: statement = kwargs.get("statement") % local_params except KeyError as msg: raise KeyError( "Error when creating command: could not " "find %s in dictionaries" % msg) except ValueError as msg: raise ValueError("Error when creating command: %s, statement = %s" % ( msg, kwargs.get("statement"))) statement = " ".join(re.sub("\t+", " ", statement).split("\n")).strip() if statement.endswith(";"): statement = statement[:-1] return statement
MIT License
demisto/demisto-py
demisto_client/demisto_api/models/system_agent.py
SystemAgent.servercontext
python
def servercontext(self): return self._servercontext
Gets the servercontext of this SystemAgent. # noqa: E501 :return: The servercontext of this SystemAgent. # noqa: E501 :rtype: list[int]
https://github.com/demisto/demisto-py/blob/95d29e07693d27c133f7fe6ef9da13e4b6dbf542/demisto_client/demisto_api/models/system_agent.py#L51-L58
import pprint import re import six class SystemAgent(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'servercontext': 'list[int]' } attribute_map = { 'servercontext': 'servercontext' } def __init__(self, servercontext=None): self._servercontext = None self.discriminator = None if servercontext is not None: self.servercontext = servercontext @property
Apache License 2.0
awused/cynaoko
naoko/lib/database.py
NaokoDB.getPlaylist
python
def getPlaylist(self, name, columns, blockedFlags=0b11, blockedSites = []): _tables = {'videos' : set(['type', 'id', 'duration_ms', 'title'])} legal_cols = set.union(_tables['videos']) if not columns: columns = legal_cols if not set(columns) <= legal_cols: raise ProgrammingError("Argument columns: %s not a subset of video " "columns %s" % (columns, _tables['videos'])) col_repl = {'id' : 'v.id', 'type' : 'v.type'} sel_cols = [] for col in columns: sel_col = col if col in col_repl: sel_col = col_repl[col] sel_cols.append(sel_col) sel_list = ', '.join(sel_cols) binds = (name,) sel_cls = "SELECT " + sel_list + " FROM playlists p INNER JOIN videos v ON p.id = v.id AND p.type = v.type " where_cls = " WHERE p.name = ? " order_cls = " ORDER BY p.idx ASC" if isinstance(blockedFlags, (int, long)): where_cls += " AND v.flags & ? = 0 " binds += (blockedFlags,) if isinstance(blockedSites, (list, tuple)): sites_cls = " AND v.type NOT IN (" flg = False for b in blockedSites: if isinstance(b, (str, unicode)) and len(b) == 2: if flg: sites_cls += "," sites_cls += "?" binds += (b,) flg = True if flg: where_cls += sites_cls + ") " sql = sel_cls + where_cls + order_cls self.logger.debug("Generated SQL %s" % (sql)) with self.execute(sql, binds) as cur: return cur.fetchall()
Retrieves an ordered playlist. num must be an integer specifying the maximum number of rows to return. By default all rows are retrieved columns must be an iterable specifying which columns to retrieve. By default all columns will be retrieved. See naoko.sql for database schema. orderby must be a tuple specifying the orderby clause. Valid values are ('id', 'ASC'), ('id', 'DESC'), or ('RANDOM()') The statement executed against the database will roughly be SELECT <columns> FROM video_stats vs, videos v WHERE vs.type = v.type AND vs.id = v.id [ORDER BY <orderby>] [LIMIT ?]
https://github.com/awused/cynaoko/blob/23e3f287814535e80268a0fa8dfb6d415bb4a9a2/naoko/lib/database.py#L505-L573
import sqlite3 import logging import time try: from settings import LOG_LEVEL except: print "Defaulting to LOG_LEVEL debug [%s]" % (__name__) LOG_LEVEL = logging.DEBUG ProgrammingError = sqlite3.ProgrammingError DatabaseError = sqlite3.DatabaseError def dbopen(fn): def dbopen_func(self, *args, **kwargs): if self._state == "open": return fn(self, *args, **kwargs) elif self._state == "closed": raise DatabaseError("Cannot perform operations on closed database") else: raise DatabaseError("Database must be open to perform operations") return dbopen_func class NaokoCursor(sqlite3.Cursor): _id = 0 def __init__(self, *args, **kwargs): self.logger = logging.getLogger('naokocursor') self.id = NaokoCursor._id NaokoCursor._id += 1 sqlite3.Cursor.__init__(self, *args, **kwargs) def __enter__(self): return self def __str__(self): return "NaokoCursor #%d" % self.id def __exit__(self, exc_type, exc_value, traceback): self.close() if not self.logger: return if exc_type and exc_value: self.logger.error("%s closed %s: %s" % (self, exc_type, exc_value)) else: self.logger.debug("%s closed" % self) class NaokoDB(object): _dbinfo_sql = "SELECT name FROM sqlite_master WHERE type='table'" _version_sql = "SELECT value FROM metadata WHERE key='dbversion'" _required_tables = set(["video_stats", "videos", "user_count", "bans", "chat"]) _foreign_keys = False def __enter__(self): return self def __init__(self, database): self.logger = logging.getLogger("database") self.logger.setLevel(LOG_LEVEL) self.db_file = database self.con = sqlite3.connect(database, timeout=60) self._state = "open" self.initdb() tables = self._getTables() if not self._required_tables <= tables: raise ValueError("Database '%s' is non-empty but " "does not provide required tables %s" % (database, self._required_tables - tables)) def __exit__(self, exc_type, exc_val, exc_tb): self._state = "closed" if self.con: self.con.close() if exc_type and exc_val: self.logger.error("Database '%s' closed due to %s: %s" % (self.db_file, exc_type, exc_val)) else: self.logger.debug("Database '%s' closed" % self.db_file) def _getTables(self): with self.execute(self._dbinfo_sql) as cur: return set([table[0] for table in cur.fetchall()]) def _getVersion(self): tables = self._getTables() if 'metadata' in tables: try: with self.execute(self._version_sql) as cur: version = cur.fetchone()[0] self.logger.debug("Database version is %s" % version) return int(version) except TypeError as e: self.logger.debug(e) self.logger.debug("Database version is 3 (empty metadata table)") self.executeDML("INSERT INTO metadata(key, value) VALUES ('dbversion', '3')") self.commit() return 3 elif tables : self.logger.debug("Database version is 1 (no metadata table)") return 1 def _update(self): version = self._getVersion() if version < 2: stmts = ["CREATE TABLE IF NOT EXISTS videos(type TEXT, id TEXT, duration_ms INTEGER, title TEXT, primary key(type, id))", "CREATE TABLE IF NOT EXISTS video_stats(type TEXT, id TEXT, uname TEXT, FOREIGN KEY(type, id) REFERENCES video(type, id))", "CREATE INDEX IF NOT EXISTS video_stats_idx ON video_stats(type, id)", "CREATE TABLE IF NOT EXISTS bans(reason TEXT, auth INTEGER, uname TEXT, timestamp INTEGER, mod TEXT)", "CREATE TABLE IF NOT EXISTS user_count(timestamp INTEGER, count INTEGER, primary key(timestamp, count))", "CREATE TABLE IF NOT EXISTS chat(timestamp INTEGER, username TEXT, userid TEXT, msg TEXT, protocol TEXT, channel TEXT, flags TEXT)", "CREATE INDEX IF NOT EXISTS chat_ts ON chat(timestamp)", "CREATE INDEX IF NOT EXISTS chat_user ON chat(username)", "ALTER TABLE videos ADD COLUMN flags INTEGER DEFAULT 0 NOT NULL", "CREATE TABLE metadata(key TEXT, value TEXT, PRIMARY KEY(key))", "INSERT INTO metadata(key, value) VALUES ('dbversion', '2')"] for stmt in stmts: self.executeDML(stmt) self.commit() if version < 3: stmts = ["UPDATE chat SET timestamp = timestamp * 1000", "UPDATE metadata SET value = '3' WHERE key = 'dbversion'"] for stmt in stmts: self.executeDML(stmt) self.commit() if version < 4: stmts = ["UPDATE user_count SET timestamp = count, count = timestamp WHERE timestamp < 1000", "UPDATE metadata SET value = '4' WHERE key = 'dbversion'"] for stmt in stmts: self.executeDML(stmt) self.commit() if version < 5: stmts = ["CREATE TABLE IF NOT EXISTS playlistmeta(name TEXT, uname TEXT, length INTEGER, timestamp INTEGER, PRIMARY KEY(name))", "CREATE TABLE IF NOT EXISTS playlists(name TEXT, idx INTEGER, type TEXT, id TEXT, PRIMARY KEY(name, idx), FOREIGN KEY(name) " + "REFERENCES playlistmeta(name), FOREIGN KEY(type, id) REFERENCES videos(type, id))", "CREATE TABLE IF NOT EXISTS video_stats2(type TEXT, id TEXT, uname TEXT, FOREIGN KEY(type, id) REFERENCES videos(type, id))", "INSERT INTO video_stats2(type, id, uname) SELECT type, id, uname FROM video_stats", "DROP INDEX video_stats_idx", "ALTER TABLE video_stats RENAME TO video_stats_backup", "ALTER TABLE video_stats2 RENAME TO video_stats", "CREATE INDEX IF NOT EXISTS video_stats_idx ON video_stats(type, id)", "UPDATE metadata SET value = '5' WHERE key = 'dbversion'"] for stmt in stmts: self.executeDML(stmt) self.commit() if version < 6: stmts = ["DROP TABLE video_stats_backup", "UPDATE videos SET flags = flags & ~1 WHERE type = 'yt'", "UPDATE metadata SET value = '6' WHERE key = 'dbversion'"] for stmt in stmts: self.executeDML(stmt) self.commit() self._foreign_keys = True @dbopen def initdb(self): self._update() assert self._getVersion() >= 6 @dbopen def cursor(self): cur = self.con.cursor(NaokoCursor) if self._foreign_keys: cur.execute("PRAGMA foreign_keys = ON"); return cur @dbopen def execute(self, stmt, *args): cur = self.cursor() cur.execute(stmt, *args) return cur @dbopen def executeDML(self, stmt, *args): with self.execute(stmt, *args): pass @dbopen def commit(self): self.con.commit() @dbopen def executescript(self, script): cur = self.cursor() cur.executescript(script) return cur @dbopen def fetch(self, stmt, *args): with self.execute(stmt, *args) as cur: return cur.fetchall() def close(self): self.__exit__(None, None, None) def getVideos(self, num=None, columns=None, orderby=None, duration_s=None, title=None, user=None, blockedFlags=0b11, blockedSites = []): _tables = {'videos' : set(['type', 'id', 'duration_ms', 'title']), 'video_stats' : set(['type', 'id', 'uname'])} legal_cols = set.union(_tables['videos'], _tables['video_stats']) if not columns: columns = legal_cols if not set(columns) <= legal_cols: raise ProgrammingError("Argument columns: %s not a subset of video " "columns %s" % (columns, _tables['videos'])) col_repl = {'id' : 'v.id', 'type' : 'v.type'} sel_cols = [] for col in columns: sel_col = col if col in col_repl: sel_col = col_repl[col] sel_cols.append(sel_col) binds = () sel_list = ', '.join(sel_cols) sel_cls = 'SELECT DISTINCT %s' % (sel_list) from_cls = ' FROM video_stats vs, videos v ' where_cls = ' WHERE vs.type = v.type AND vs.id = v.id ' if isinstance(duration_s, (int, long)): where_cls += " AND v.duration_ms <= ? " binds += (duration_s*1000,) if isinstance(title, (str, unicode)): where_cls += " AND v.title like ? COLLATE NOCASE " binds += ("%%%s%%" % (title),) if isinstance(user, (str, unicode)): where_cls += " AND vs.uname like ? COLLATE NOCASE " binds += (user,) if isinstance(blockedFlags, (int, long)): where_cls += " AND v.flags & ? = 0 " binds += (blockedFlags,) if isinstance(blockedSites, (list, tuple)): sites_cls = " AND v.type NOT IN (" flg = False for b in blockedSites: if isinstance(b, (str, unicode)) and len(b) == 2: if flg: sites_cls += "," sites_cls += "?" binds += (b,) flg = True if flg: where_cls += sites_cls + ") " sql = sel_cls + from_cls + where_cls def matchOrderBy(this, other): valid = this == other if not valid: valid = (len(this) == 2) and (len(other) == 2) for i in range(len(this)): valid = valid and (this[i].lower() == other[1].lower()) return valid valid = this and other and (this[0].lower() != other[0].lower()) if valid and (len(this) == 2) and this[1] and other[1]: return valid and (this[1].lower() == other[1].lower()) else: return valid and (this[1] == other[1]) if orderby is None: pass elif matchOrderBy(orderby, ('id', 'ASC')): sql += ' ORDER BY v.id ASC' elif matchOrderBy(orderby, ('id', 'DESC')): sql += ' ORDER BY v.id DESC' elif matchOrderBy(orderby, ('RANDOM()',)): sql += ' ORDER BY RANDOM()' else: raise ProgrammingError("Invalid orderby %s" % (orderby)) if isinstance(num, (int, long)): sql += ' LIMIT ?' binds += (num,) elif num != None: raise ProgrammingError("Invalid num %s" % (num)) self.logger.debug("Generated SQL %s" % (sql)) with self.execute(sql, binds) as cur: return cur.fetchall() def insertChat(self, msg, username, userid=None, timestamp=None, protocol='ST', channel=None, flags=None): if userid is None: userid = username if timestamp is None: timestamp = int(time.time() * 1000) chat = (timestamp, username, userid, msg, protocol, channel, flags) with self.cursor() as cur: self.logger.debug("Inserting chat message %s" % (chat,)) cur.execute("INSERT INTO chat VALUES(?, ?, ?, ?, ?, ?, ?)", chat) self.commit() def getQuote(self, nick, excludes=[], protocol=None): select_cls = "SELECT username, msg, timestamp, protocol FROM chat " where_cls = " WHERE msg NOT LIKE '/me%%' AND msg NOT LIKE '$%%' " limit_cls = " ORDER BY RANDOM() LIMIT 1" binds = () if protocol: where_cls += " AND protocol = ? " binds = (protocol,) if nick: where_cls += " AND username = ? COLLATE NOCASE " binds += (nick,) else: for e in excludes: where_cls += " AND (username != ? or protocol != ?) " binds += e sql = select_cls + where_cls + limit_cls rows = self.fetch(sql, binds) if rows: return rows[0] else: return None def flagVideo(self, site, vid, flags): self.logger.debug("Flagging %s:%s with flags %s", site, vid, bin(flags)) self.executeDML("UPDATE videos SET flags=(flags | ?) WHERE type = ? AND id = ?", (flags, site, vid)) self.commit() def unflagVideo(self, site, vid, flags): self.executeDML("UPDATE videos SET flags=(flags & ?) WHERE type = ? AND id = ?", (~flags, site, vid)) self.commit() def insertVideo(self, site, vid, title, dur, nick): self.logger.debug("Inserting %s into videos", (site, vid, int(dur * 1000), title, 0)) self.logger.debug("Inserting %s into video_stats", (site, vid, nick)) self.executeDML("INSERT OR IGNORE INTO videos VALUES(?, ?, ?, ?, ?)", (site, vid, int(dur * 1000), title, 0)) self.executeDML("INSERT INTO video_stats VALUES(?, ?, ?)", (site, vid, nick)) self.commit() self.unflagVideo(site, vid, 1) def insertPlaylist(self, name, vids, nick): self.deletePlaylist(name) timestamp = int(time.time() * 1000) self.logger.debug("Inserting %s into playlistmeta", (name, nick, len(vids), timestamp)) self.executeDML("INSERT INTO playlistmeta VALUES(?, ?, ?, ?)", (name, nick, len(vids), timestamp)) for idx, (site, id) in enumerate(vids): try: self.executeDML("INSERT INTO playlists VALUES(?, ?, ?, ?)", (name, idx, site, id)) except Exception as e: if str(e) != "foreign key constraint failed": raise e self.executeDML("UPDATE playlistmeta SET length = (SELECT COUNT(*) FROM playlists WHERE playlists.name = playlistmeta.name) where name = ?", (name,)) self.commit()
BSD 2-Clause Simplified License
ethereum/trinity
trinity/_utils/chains.py
get_data_dir_for_network_id
python
def get_data_dir_for_network_id(network_id: int, trinity_root_dir: Path) -> Path: try: return get_local_data_dir( PRECONFIGURED_NETWORKS[network_id].data_dir_name, trinity_root_dir ) except KeyError: raise KeyError(f"Unknown network id: `{network_id}`")
Returns the data directory for the chain associated with the given network id. If the network id is unknown, raises a KeyError.
https://github.com/ethereum/trinity/blob/6383280c5044feb06695ac2f7bc1100b7bcf4fe0/trinity/_utils/chains.py#L50-L60
import argparse import os import tempfile from pathlib import Path from typing import ( cast, Any, Dict, Iterable, Optional, Tuple, Union, ) from mypy_extensions import ( TypedDict, ) from eth_utils import ( decode_hex, ) from eth_keys import keys from eth_keys.datatypes import PrivateKey from p2p.constants import DEFAULT_MAX_PEERS from p2p.kademlia import Node as KademliaNode from trinity.constants import ( SYNC_LIGHT, ) from trinity.network_configurations import ( PRECONFIGURED_NETWORKS, ) def get_local_data_dir(chain_name: str, trinity_root_dir: Path) -> Path: try: return Path(os.environ['TRINITY_DATA_DIR']) except KeyError: return trinity_root_dir / chain_name
MIT License
ukbaz/python-bluezero
bluezero/microbit.py
Microbit.set_pin
python
def set_pin(self, pin_number, pin_input, pin_analogue): pin_bit = tools.int_to_uint32(2 ** pin_number) current_io_setting = self._pin_config current_ad_setting = self._pin_ad_config if pin_input: new_setting = tools.bitwise_or_2lists(pin_bit, current_io_setting) self._pin_config = new_setting else: pin_mask = tools.bitwise_xor_2lists(pin_bit, [0xff, 0xff, 0xff, 0xff]) new_setting = tools.bitwise_and_2lists(pin_mask, current_io_setting) self._pin_config = new_setting if pin_analogue: new_setting = tools.bitwise_or_2lists(pin_bit, current_ad_setting) self._pin_ad_config = new_setting else: pin_mask = tools.bitwise_xor_2lists(pin_bit, [0xff, 0xff, 0xff, 0xff]) new_setting = tools.bitwise_and_2lists(pin_mask, current_ad_setting) self._pin_ad_config = new_setting
For a given pin, set the direction and type for the microbit pin. :param pin_number: Pin number of the microbit :param pin_input: False for output, True for input :param pin_analogue: False for digital, True for analogue :return:
https://github.com/ukbaz/python-bluezero/blob/6f46b6cb7102831c1427f12e65b727fc6b54774a/bluezero/microbit.py#L442-L472
from time import sleep from bluezero import central from bluezero import tools from bluezero import constants ACCEL_SRV = 'E95D0753-251D-470A-A062-FA1922DFA9A8' ACCEL_DATA = 'E95DCA4B-251D-470A-A062-FA1922DFA9A8' ACCEL_PERIOD = 'E95DFB24-251D-470A-A062-FA1922DFA9A8' BTN_SRV = 'E95D9882-251D-470A-A062-FA1922DFA9A8' BTN_A_STATE = 'E95DDA90-251D-470A-A062-FA1922DFA9A8' BTN_B_STATE = 'E95DDA91-251D-470A-A062-FA1922DFA9A8' LED_SRV = 'E95DD91D-251D-470A-A062-FA1922DFA9A8' LED_STATE = 'E95D7B77-251D-470A-A062-FA1922DFA9A8' LED_TEXT = 'E95D93EE-251D-470A-A062-FA1922DFA9A8' LED_SCROLL = 'E95D0D2D-251D-470A-A062-FA1922DFA9A8' MAGNETO_SRV = 'E95DF2D8-251D-470A-A062-FA1922DFA9A8' MAGNETO_DATA = 'E95DFB11-251D-470A-A062-FA1922DFA9A8' MAGNETO_PERIOD = 'E95D386C-251D-470A-A062-FA1922DFA9A8' MAGNETO_BEARING = 'E95D9715-251D-470A-A062-FA1922DFA9A8' MAGNETO_CALIBRATE = 'E95DB358-251D-470A-A062-FA1922DFA9A8' IO_PIN_SRV = 'E95D127B-251D-470A-A062-FA1922DFA9A8' IO_PIN_DATA = 'E95D8D00-251D-470A-A062-FA1922DFA9A8' IO_AD_CONFIG = 'E95D5899-251D-470A-A062-FA1922DFA9A8' IO_PIN_CONFIG = 'E95DB9FE-251D-470A-A062-FA1922DFA9A8' IO_PIN_PWM = 'E95DD822-251D-470A-A062-FA1922DFA9A8' TEMP_SRV = 'E95D6100-251D-470A-A062-FA1922DFA9A8' TEMP_DATA = 'E95D9250-251D-470A-A062-FA1922DFA9A8' TEMP_PERIOD = 'E95D1B25-251D-470A-A062-FA1922DFA9A8' UART_SRV = '6E400001-B5A3-F393-E0A9-E50E24DCCA9E' UART_TX = '6E400002-B5A3-F393-E0A9-E50E24DCCA9E' UART_RX = '6E400003-B5A3-F393-E0A9-E50E24DCCA9E' MICROBIT = {ACCEL_SRV: [ACCEL_DATA, ACCEL_PERIOD], BTN_SRV: [BTN_A_STATE, BTN_B_STATE], LED_SRV: [LED_STATE, LED_TEXT, LED_SCROLL], MAGNETO_SRV: [MAGNETO_DATA, MAGNETO_PERIOD, MAGNETO_BEARING, MAGNETO_CALIBRATE], IO_PIN_SRV: [IO_PIN_DATA, IO_AD_CONFIG, IO_PIN_CONFIG, IO_PIN_PWM], TEMP_SRV: [TEMP_DATA, TEMP_PERIOD], UART_SRV: [UART_TX, UART_RX]} SERVICE_NAMES = {ACCEL_SRV: 'Accelerometer Service', BTN_SRV: 'Button Service', LED_SRV: 'LED Service', MAGNETO_SRV: 'Magnetometer Service', IO_PIN_SRV: 'IO Pin Service', TEMP_SRV: 'Temperature Service', UART_SRV: 'Nordic Semiconductor UART service', } logger = tools.create_module_logger(__name__) class Microbit: def __init__(self, device_addr, adapter_addr=None, **kwargs): legacy_params = ['accelerometer_service', 'button_service', 'led_service', 'magnetometer_service', 'pin_service', 'temperature_service', 'uart_service'] for kwarg in kwargs: if kwarg in legacy_params: logger.warning('The parameter %s has been deprecated. There ' 'is no longer a requirement to specify which ' 'services the micro:bit has.\nYou will get an ' 'Error in the log if you access a ' 'characteristic that does not exist on ' 'micro:bit') self.ubit = central.Central(adapter_addr=adapter_addr, device_addr=device_addr) self.user_pin_callback = None self.user_btn_a_callback = None self.user_btn_b_callback = None self.user_calibrate_cb = None self.uart_tx_cb = None self._accel_data = self.ubit.add_characteristic(ACCEL_SRV, ACCEL_DATA) self._accel_period = self.ubit.add_characteristic(ACCEL_SRV, ACCEL_PERIOD) self._btn_a_state = self.ubit.add_characteristic(BTN_SRV, BTN_A_STATE) self._btn_b_state = self.ubit.add_characteristic(BTN_SRV, BTN_B_STATE) self._led_state = self.ubit.add_characteristic(LED_SRV, LED_STATE) self._led_text = self.ubit.add_characteristic(LED_SRV, LED_TEXT) self._led_scroll = self.ubit.add_characteristic(LED_SRV, LED_SCROLL) self._magneto_data = self.ubit.add_characteristic( MAGNETO_SRV, MAGNETO_DATA) self._magneto_period = self.ubit.add_characteristic( MAGNETO_SRV, MAGNETO_PERIOD) self._magneto_bearing = self.ubit.add_characteristic( MAGNETO_SRV, MAGNETO_BEARING) self._magneto_calibrate = self.ubit.add_characteristic( MAGNETO_SRV, MAGNETO_CALIBRATE) self._io_pin_data = self.ubit.add_characteristic(IO_PIN_SRV, IO_PIN_DATA) self._io_ad_config = self.ubit.add_characteristic(IO_PIN_SRV, IO_AD_CONFIG) self._io_pin_config = self.ubit.add_characteristic(IO_PIN_SRV, IO_PIN_CONFIG) self._io_pin_pwm = self.ubit.add_characteristic(IO_PIN_SRV, IO_PIN_PWM) self._temp_data = self.ubit.add_characteristic(TEMP_SRV, TEMP_DATA) self._temp_period = self.ubit.add_characteristic(TEMP_SRV, TEMP_PERIOD) self._uart_tx = self.ubit.add_characteristic(UART_SRV, UART_TX) self._uart_rx = self.ubit.add_characteristic(UART_SRV, UART_RX) @staticmethod def available(adapter_address=None): for ubit in central.Central.available(adapter_address): if ubit.name and 'micro:bit' in ubit.name: yield Microbit(device_addr=ubit.address, adapter_addr=ubit.adapter) def services_available(self): named_services = [] for service in self.ubit.services_available: if service.upper() in SERVICE_NAMES: named_services.append(SERVICE_NAMES[service.upper()]) return named_services @property def name(self): return self.ubit.rmt_device.name @property def address(self): return self.ubit.rmt_device.address @property def connected(self): return self.ubit.connected def connect(self): self.ubit.connect() def disconnect(self): self.ubit.disconnect() @property def scroll_delay(self): return int.from_bytes(self._led_scroll.value, byteorder='little', signed=False) @scroll_delay.setter def scroll_delay(self, delay=None): if delay < 0: delay = 0 elif delay > 2**16: delay = 2**16 self._led_scroll.value = tools.int_to_uint16(delay) @property def text(self): pass @text.setter def text(self, words): data = [] text = '' if len(words) > 20: text = words[:19] else: text = words for letter in text: data.append(ord(letter)) self._led_text.value = data def _write_pixels(self, data): self._led_state.value = data def clear_display(self): self._write_pixels([0x00, 0x00, 0x00, 0x00, 0x00]) @property def pixels(self): rows = self._led_state.value return [int(i) for i in rows] @pixels.setter def pixels(self, rows): self._write_pixels([rows[0], rows[1], rows[2], rows[3], rows[4]]) @property def temperature(self): tmp_val = self._temp_data.value return int.from_bytes(tmp_val, byteorder='little', signed=True) @property def button_a(self): btn_val = self._btn_a_state.value return int.from_bytes(btn_val, byteorder='little', signed=False) @property def button_b(self): btn_val = self._btn_b_state.value return int.from_bytes(btn_val, byteorder='little', signed=False) def _decode_btn_a(self, *button_values): if 'Value' in button_values[1]: self.user_btn_a_callback(int(button_values[1]['Value'][0])) def _decode_btn_b(self, *button_values): if 'Value' in button_values[1]: self.user_btn_b_callback(int(button_values[1]['Value'][0])) def subscribe_button_a(self, user_callback): self.user_btn_a_callback = user_callback self._btn_a_state.add_characteristic_cb(self._decode_btn_a) self._btn_a_state.start_notify() def subscribe_button_b(self, user_callback): self.user_btn_b_callback = user_callback self._btn_b_state.add_characteristic_cb(self._decode_btn_b) self._btn_b_state.start_notify() def _decode_pins(self, *pin_values): if pin_values[0] != 'org.bluez.GattCharacteristic1': return if 'Value' in pin_values[1]: self.user_pin_callback(int(pin_values[1]['Value'][0]), int(pin_values[1]['Value'][1])) def subscribe_pins(self, user_callback): self.user_pin_callback = user_callback self._io_pin_data.add_characteristic_cb(self._decode_pins) self._io_pin_data.start_notify() @property def accelerometer(self): accel_bytes = self._accel_data.value return tools.bytes_to_xyz(accel_bytes) @property def magnetometer(self): mag_bytes = self._magneto_data.value return tools.bytes_to_xyz(mag_bytes) @property def bearing(self): mag_bear_val = self._magneto_bearing.value return int.from_bytes(mag_bear_val, byteorder='little', signed=False) def calibrate(self): self._magneto_calibrate.value = 0x01 def subscribe_calibrate(self, user_callback): self.user_calibrate_cb = user_callback self._magneto_calibrate.add_characteristic_cb(self._magneto_cal_cb) self._magneto_calibrate.start_notify() def _magneto_cal_cb(self, iface, changed_props, invalidated_props): if iface != 'org.bluez.GattCharacteristic1': return if 'Value' in changed_props: self.user_calibrate_cb(int(changed_props['Value'][0]))
MIT License
openstack/senlin
senlin/profiles/os/nova/server.py
ServerProfile._create_ports_from_properties
python
def _create_ports_from_properties(self, obj, networks, action_type): internal_ports = obj.data.get('internal_ports', []) if not networks: return [] for net_spec in networks: net = self._validate_network(obj, net_spec, action_type) port, ex = self._get_port(obj, net) if ex: d_ex = self._delete_ports(obj, internal_ports) if d_ex: raise d_ex else: raise ex port_attrs = { 'id': port.id, 'network_id': port.network_id, 'security_group_ids': port.security_group_ids, 'fixed_ips': port.fixed_ips } if self.PORT not in net: port_attrs.update({'remove': True}) if 'floating_ip_id' in net or self.FLOATING_NETWORK in net: fip, ex = self._get_floating_ip(obj, net, port_attrs['id']) if ex: d_ex = self._delete_ports(obj, internal_ports) if d_ex: raise d_ex else: raise ex port_attrs['floating'] = { 'id': fip.id, 'floating_ip_address': fip.floating_ip_address, 'floating_network_id': fip.floating_network_id, } if self.FLOATING_NETWORK in net: port_attrs['floating'].update({'remove': True}) internal_ports.append(port_attrs) if internal_ports: try: node_data = obj.data node_data.update(internal_ports=internal_ports) node_obj.Node.update(self.context, obj.id, {'data': node_data}) except exc.ResourceNotFound: self._rollback_ports(obj, internal_ports) raise return internal_ports
Create or find ports based on networks property. :param obj: The node object. :param networks: The networks property used for node. :param action_type: Either 'create' or 'update'. :returns: A list of created port's attributes.
https://github.com/openstack/senlin/blob/390779ca1e08f819683e79993696f945f1c0393e/senlin/profiles/os/nova/server.py#L743-L800
import base64 import copy from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from senlin.common import constraints from senlin.common import consts from senlin.common import context from senlin.common import exception as exc from senlin.common.i18n import _ from senlin.common import schema from senlin.objects import node as node_obj from senlin.profiles import base LOG = logging.getLogger(__name__) class ServerProfile(base.Profile): VERSIONS = { '1.0': [ {'status': consts.SUPPORTED, 'since': '2016.04'} ] } KEYS = ( CONTEXT, ADMIN_PASS, AUTO_DISK_CONFIG, AVAILABILITY_ZONE, BLOCK_DEVICE_MAPPING_V2, CONFIG_DRIVE, FLAVOR, IMAGE, KEY_NAME, METADATA, NAME, NETWORKS, PERSONALITY, SECURITY_GROUPS, USER_DATA, SCHEDULER_HINTS, ) = ( 'context', 'admin_pass', 'auto_disk_config', 'availability_zone', 'block_device_mapping_v2', 'config_drive', 'flavor', 'image', 'key_name', 'metadata', 'name', 'networks', 'personality', 'security_groups', 'user_data', 'scheduler_hints', ) BDM2_KEYS = ( BDM2_UUID, BDM2_SOURCE_TYPE, BDM2_DESTINATION_TYPE, BDM2_DISK_BUS, BDM2_DEVICE_NAME, BDM2_VOLUME_SIZE, BDM2_GUEST_FORMAT, BDM2_BOOT_INDEX, BDM2_DEVICE_TYPE, BDM2_DELETE_ON_TERMINATION, ) = ( 'uuid', 'source_type', 'destination_type', 'disk_bus', 'device_name', 'volume_size', 'guest_format', 'boot_index', 'device_type', 'delete_on_termination', ) NETWORK_KEYS = ( PORT, VNIC_TYPE, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS, FLOATING_NETWORK, FLOATING_IP, ) = ( 'port', 'vnic_type', 'fixed_ip', 'network', 'security_groups', 'floating_network', 'floating_ip', ) PERSONALITY_KEYS = ( PATH, CONTENTS, ) = ( 'path', 'contents', ) SCHEDULER_HINTS_KEYS = ( GROUP, ) = ( 'group', ) properties_schema = { CONTEXT: schema.Map( _('Customized security context for operating servers.'), ), ADMIN_PASS: schema.String( _('Password for the administrator account.'), ), AUTO_DISK_CONFIG: schema.Boolean( _('Whether the disk partition is done automatically.'), default=True, ), AVAILABILITY_ZONE: schema.String( _('Name of availability zone for running the server.'), ), BLOCK_DEVICE_MAPPING_V2: schema.List( _('A list specifying the properties of block devices to be used ' 'for this server.'), schema=schema.Map( _('A map specifying the properties of a block device to be ' 'used by the server.'), schema={ BDM2_UUID: schema.String( _('ID of the source image, snapshot or volume'), ), BDM2_SOURCE_TYPE: schema.String( _("Volume source type, must be one of 'image', " "'snapshot', 'volume' or 'blank'"), required=True, ), BDM2_DESTINATION_TYPE: schema.String( _("Volume destination type, must be 'volume' or " "'local'"), required=True, ), BDM2_DISK_BUS: schema.String( _('Bus of the device.'), ), BDM2_DEVICE_NAME: schema.String( _('Name of the device(e.g. vda, xda, ....).'), ), BDM2_VOLUME_SIZE: schema.Integer( _('Size of the block device in MB(for swap) and ' 'in GB(for other formats)'), required=True, ), BDM2_GUEST_FORMAT: schema.String( _('Specifies the disk file system format(e.g. swap, ' 'ephemeral, ...).'), ), BDM2_BOOT_INDEX: schema.Integer( _('Define the boot order of the device'), ), BDM2_DEVICE_TYPE: schema.String( _('Type of the device(e.g. disk, cdrom, ...).'), ), BDM2_DELETE_ON_TERMINATION: schema.Boolean( _('Whether to delete the volume when the server ' 'stops.'), ), } ), ), CONFIG_DRIVE: schema.Boolean( _('Whether config drive should be enabled for the server.'), ), FLAVOR: schema.String( _('ID of flavor used for the server.'), required=True, updatable=True, ), IMAGE: schema.String( _('ID of image to be used for the new server.'), updatable=True, ), KEY_NAME: schema.String( _('Name of Nova keypair to be injected to server.'), ), METADATA: schema.Map( _('A collection of key/value pairs to be associated with the ' 'server created. Both key and value must be <=255 chars.'), updatable=True, ), NAME: schema.String( _('Name of the server. When omitted, the node name will be used.'), updatable=True, ), NETWORKS: schema.List( _('List of networks for the server.'), schema=schema.Map( _('A map specifying the properties of a network for uses.'), schema={ NETWORK: schema.String( _('Name or ID of network to create a port on.'), ), PORT: schema.String( _('Port ID to be used by the network.'), ), VNIC_TYPE: schema.String( _('Define vnic_type to be used by port'), ), FIXED_IP: schema.String( _('Fixed IP to be used by the network.'), ), PORT_SECURITY_GROUPS: schema.List( _('A list of security groups to be attached to ' 'this port.'), schema=schema.String( _('Name of a security group'), required=True, ), ), FLOATING_NETWORK: schema.String( _('The network on which to create a floating IP'), ), FLOATING_IP: schema.String( _('The floating IP address to be associated with ' 'this port.'), ), }, ), updatable=True, ), PERSONALITY: schema.List( _('List of files to be injected into the server, where each.'), schema=schema.Map( _('A map specifying the path & contents for an injected ' 'file.'), schema={ PATH: schema.String( _('In-instance path for the file to be injected.'), required=True, ), CONTENTS: schema.String( _('Contents of the file to be injected.'), required=True, ), }, ), ), SCHEDULER_HINTS: schema.Map( _('A collection of key/value pairs to be associated with the ' 'Scheduler hints. Both key and value must be <=255 chars.'), ), SECURITY_GROUPS: schema.List( _('List of security groups.'), schema=schema.String( _('Name of a security group'), required=True, ), ), USER_DATA: schema.String( _('User data to be exposed by the metadata server.'), ), } OP_NAMES = ( OP_REBOOT, OP_REBUILD, OP_CHANGE_PASSWORD, OP_PAUSE, OP_UNPAUSE, OP_SUSPEND, OP_RESUME, OP_LOCK, OP_UNLOCK, OP_START, OP_STOP, OP_RESCUE, OP_UNRESCUE, OP_EVACUATE, OP_MIGRATE, ) = ( 'reboot', 'rebuild', 'change_password', 'pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock', 'start', 'stop', 'rescue', 'unrescue', 'evacuate', 'migrate', ) ADMIN_PASSWORD = 'admin_pass' RESCUE_IMAGE = 'image_ref' EVACUATE_OPTIONS = ( EVACUATE_HOST, EVACUATE_FORCE ) = ( 'host', 'force' ) OPERATIONS = { OP_REBOOT: schema.Operation( _("Reboot the nova server."), schema={ consts.REBOOT_TYPE: schema.StringParam( _("Type of reboot which can be 'SOFT' or 'HARD'."), default=consts.REBOOT_SOFT, constraints=[ constraints.AllowedValues(consts.REBOOT_TYPES), ] ) } ), OP_REBUILD: schema.Operation( _("Rebuild the server using current image and admin password."), ), OP_CHANGE_PASSWORD: schema.Operation( _("Change the administrator password."), schema={ ADMIN_PASSWORD: schema.StringParam( _("New password for the administrator.") ) } ), OP_PAUSE: schema.Operation( _("Pause the server from running."), ), OP_UNPAUSE: schema.Operation( _("Unpause the server to running state."), ), OP_SUSPEND: schema.Operation( _("Suspend the running of the server."), ), OP_RESUME: schema.Operation( _("Resume the running of the server."), ), OP_LOCK: schema.Operation( _("Lock the server."), ), OP_UNLOCK: schema.Operation( _("Unlock the server."), ), OP_START: schema.Operation( _("Start the server."), ), OP_STOP: schema.Operation( _("Stop the server."), ), OP_RESCUE: schema.Operation( _("Rescue the server."), schema={ RESCUE_IMAGE: schema.StringParam( _("A string referencing the image to use."), ), } ), OP_UNRESCUE: schema.Operation( _("Unrescue the server."), ), OP_EVACUATE: schema.Operation( _("Evacuate the server to a different host."), schema={ EVACUATE_HOST: schema.StringParam( _("The target host to evacuate the server."), ), EVACUATE_FORCE: schema.StringParam( _("Whether the evacuation should be a forced one.") ) } ) } def __init__(self, type_name, name, **kwargs): super(ServerProfile, self).__init__(type_name, name, **kwargs) self.server_id = None self.stop_timeout = cfg.CONF.default_nova_timeout def _validate_az(self, obj, az_name, reason=None): try: res = self.compute(obj).validate_azs([az_name]) except exc.InternalError as ex: if reason == 'create': raise exc.EResourceCreation(type='server', message=str(ex)) else: raise if not res: msg = _("The specified %(key)s '%(value)s' could not be found" ) % {'key': self.AVAILABILITY_ZONE, 'value': az_name} if reason == 'create': raise exc.EResourceCreation(type='server', message=msg) else: raise exc.InvalidSpec(message=msg) return az_name def _validate_flavor(self, obj, name_or_id, reason=None): flavor = None msg = '' try: flavor = self.compute(obj).flavor_find(name_or_id, False) except exc.InternalError as ex: msg = str(ex) if reason is None: if ex.code == 404: msg = _("The specified %(k)s '%(v)s' could not be found." ) % {'k': self.FLAVOR, 'v': name_or_id} raise exc.InvalidSpec(message=msg) else: raise if flavor is not None: if not flavor.is_disabled: return flavor msg = _("The specified %(k)s '%(v)s' is disabled" ) % {'k': self.FLAVOR, 'v': name_or_id} if reason == 'create': raise exc.EResourceCreation(type='server', message=msg) elif reason == 'update': raise exc.EResourceUpdate(type='server', id=obj.physical_id, message=msg) else: raise exc.InvalidSpec(message=msg) def _validate_image(self, obj, name_or_id, reason=None): try: return self.glance(obj).image_find(name_or_id, False) except exc.InternalError as ex: if reason == 'create': raise exc.EResourceCreation(type='server', message=str(ex)) elif reason == 'update': raise exc.EResourceUpdate(type='server', id=obj.physical_id, message=str(ex)) elif ex.code == 404: msg = _("The specified %(k)s '%(v)s' could not be found." ) % {'k': self.IMAGE, 'v': name_or_id} raise exc.InvalidSpec(message=msg) else: raise def _validate_keypair(self, obj, name_or_id, reason=None): try: return self.compute(obj).keypair_find(name_or_id, False) except exc.InternalError as ex: if reason == 'create': raise exc.EResourceCreation(type='server', message=str(ex)) elif reason == 'update': raise exc.EResourceUpdate(type='server', id=obj.physical_id, message=str(ex)) elif ex.code == 404: msg = _("The specified %(k)s '%(v)s' could not be found." ) % {'k': self.KEY_NAME, 'v': name_or_id} raise exc.InvalidSpec(message=msg) else: raise def _validate_volume(self, obj, name_or_id, reason=None): try: volume = self.block_storage(obj).volume_get(name_or_id) if volume.status == 'available': return volume msg = _("The volume %(k)s should be in 'available' status " "but is in '%(v)s' status." ) % {'k': name_or_id, 'v': volume.status} raise exc.InvalidSpec(message=msg) except exc.InternalError as ex: if reason == 'create': raise exc.EResourceCreation(type='server', message=str(ex)) elif ex.code == 404: msg = _("The specified volume '%(k)s' could not be found." ) % {'k': name_or_id} raise exc.InvalidSpec(message=msg) else: raise def do_validate(self, obj): az_name = self.properties[self.AVAILABILITY_ZONE] if az_name is not None: self._validate_az(obj, az_name) flavor = self.properties[self.FLAVOR] self._validate_flavor(obj, flavor) image = self.properties[self.IMAGE] if image is not None: self._validate_image(obj, image) keypair = self.properties[self.KEY_NAME] if keypair is not None: self._validate_keypair(obj, keypair) networks = self.properties[self.NETWORKS] for net in networks: self._validate_network(obj, net) return True def _resolve_bdm(self, obj, bdm, reason=None): for bd in bdm: for key in self.BDM2_KEYS: if bd[key] is None: del bd[key] if 'uuid' in bd and 'source_type' in bd: if bd['source_type'] == 'image': self._validate_image(obj, bd['uuid'], reason) elif bd['source_type'] == 'volume': self._validate_volume(obj, bd['uuid'], reason) return bdm def _check_security_groups(self, nc, net_spec, result): sgs = net_spec.get(self.PORT_SECURITY_GROUPS) if not sgs: return res = [] try: for sg in sgs: try: sg_obj = nc.security_group_find( sg, project_id=self.project) except exc.InternalError: sg_obj = nc.security_group_find(sg) res.append(sg_obj.id) except exc.InternalError as ex: return str(ex) result[self.PORT_SECURITY_GROUPS] = res return def _check_network(self, nc, net, result): if net is None: return try: net_obj = nc.network_get(net) if net_obj is None: return _("The specified network %s could not be found.") % net result[self.NETWORK] = net_obj.id except exc.InternalError as ex: return str(ex) def _check_port(self, nc, port, result): if port is None: return try: port_obj = nc.port_find(port) if port_obj.status != 'DOWN': return _("The status of the port %(p)s must be DOWN" ) % {'p': port} result[self.PORT] = port_obj.id return except exc.InternalError as ex: return str(ex) def _check_floating_ip(self, nc, net_spec, result): net = net_spec.get(self.FLOATING_NETWORK) if net: try: net_obj = nc.network_get(net) if net_obj is None: return _("The floating network %s could not be found." ) % net result[self.FLOATING_NETWORK] = net_obj.id except exc.InternalError as ex: return str(ex) flt_ip = net_spec.get(self.FLOATING_IP) if not flt_ip: return try: fip = nc.floatingip_find(flt_ip) if fip: if fip.status == 'ACTIVE': return _('the floating IP %s has been used.') % flt_ip result['floating_ip_id'] = fip.id if not net: return _('Must specify a network to create floating IP') result[self.FLOATING_IP] = flt_ip return except exc.InternalError as ex: return str(ex) def _validate_network(self, obj, net_spec, reason=None): def _verify(error): if error is None: return if reason == 'create': raise exc.EResourceCreation(type='server', message=error) elif reason == 'update': raise exc.EResourceUpdate(type='server', id=obj.physical_id, message=error) else: raise exc.InvalidSpec(message=error) nc = self.network(obj) result = {} net = net_spec.get(self.NETWORK) error = self._check_network(nc, net, result) _verify(error) port = net_spec.get(self.PORT) error = self._check_port(nc, port, result) _verify(error) if port is None and net is None: _verify(_("One of '%(p)s' and '%(n)s' must be provided" ) % {'p': self.PORT, 'n': self.NETWORK}) fixed_ip = net_spec.get(self.FIXED_IP) if fixed_ip: if port is not None: _verify(_("The '%(p)s' property and the '%(fip)s' property " "cannot be specified at the same time" ) % {'p': self.PORT, 'fip': self.FIXED_IP}) result[self.FIXED_IP] = fixed_ip vnic_type = net_spec.get(self.VNIC_TYPE, None) if vnic_type is not None: if vnic_type not in ['normal', 'direct', 'macvtap']: _verify(_("vnic_type: '%(v)s' is not supported." "(supported types are: normal, direct, macvtap)" ) % {'v': vnic_type}) result[self.VNIC_TYPE] = vnic_type error = self._check_security_groups(nc, net_spec, result) _verify(error) error = self._check_floating_ip(nc, net_spec, result) _verify(error) return result def _get_port(self, obj, net_spec): port_id = net_spec.get(self.PORT, None) if port_id: try: port = self.network(obj).port_find(port_id) return port, None except exc.InternalError as ex: return None, ex port_attr = { 'network_id': net_spec.get(self.NETWORK), } fixed_ip = net_spec.get(self.FIXED_IP, None) if fixed_ip: port_attr['fixed_ips'] = [fixed_ip] security_groups = net_spec.get(self.PORT_SECURITY_GROUPS, []) if security_groups: port_attr['security_groups'] = security_groups vnic_type = net_spec.get(self.VNIC_TYPE, None) if vnic_type: port_attr['binding_vnic_type'] = vnic_type try: port = self.network(obj).port_create(**port_attr) LOG.debug('Network port_attr : %s', port) return port, None except exc.InternalError as ex: return None, ex def _delete_ports(self, obj, ports): pp = copy.deepcopy(ports) for port in pp: if port.get('remove', False): try: if port.get('floating', None) and port[ 'floating'].get('remove', False): self.network(obj).floatingip_delete( port['floating']['id']) self.network(obj).port_delete(port['id']) except exc.InternalError as ex: return ex ports.remove(port) node_data = obj.data node_data['internal_ports'] = ports node_obj.Node.update(self.context, obj.id, {'data': node_data}) def _get_floating_ip(self, obj, fip_spec, port_id): floating_ip_id = fip_spec.get('floating_ip_id', None) if floating_ip_id: try: fip = self.network(obj).floatingip_find(floating_ip_id) if fip.port_id is None: attr = {'port_id': port_id} fip = self.network(obj).floatingip_update(fip, **attr) return fip, None except exc.InternalError as ex: return None, ex net_id = fip_spec.get(self.FLOATING_NETWORK) fip_addr = fip_spec.get(self.FLOATING_IP) attr = { 'port_id': port_id, 'floating_network_id': net_id, } if fip_addr: attr.update({'floating_ip_address': fip_addr}) try: fip = self.network(obj).floatingip_create(**attr) return fip, None except exc.InternalError as ex: return None, ex
Apache License 2.0
pyqode/pyqode.core
pyqode/core/widgets/splittable_tab_widget.py
BaseTabWidget.close_others
python
def close_others(self): current_widget = self.widget(self.tab_under_menu()) if self._try_close_dirty_tabs(exept=current_widget): i = 0 while self.count() > 1: widget = self.widget(i) if widget != current_widget: self.remove_tab(i) else: i = 1
Closes every editors tabs except the current one.
https://github.com/pyqode/pyqode.core/blob/0ffabebe4f0397d53429024f6f44db3fe97b0828/pyqode/core/widgets/splittable_tab_widget.py#L173-L185
import inspect import logging import mimetypes import os import io import uuid import weakref from pyqode.qt import QtCore, QtWidgets, QtGui from pyqode.core.api import utils from pyqode.core.dialogs import DlgUnsavedFiles from pyqode.core._forms import popup_open_files_ui from .tab_bar import TabBar from .code_edits import GenericCodeEdit, TextCodeEdit from pyqode.core._forms import pyqode_core_rc assert pyqode_core_rc def _logger(): return logging.getLogger(__name__) class DraggableTabBar(TabBar): tab_move_request = QtCore.Signal(QtWidgets.QWidget, int) def __init__(self, parent): super(DraggableTabBar, self).__init__(parent) self._pos = QtCore.QPoint() self.setAcceptDrops(True) self.setMouseTracking(True) self.setElideMode(QtCore.Qt.ElideNone) def mousePressEvent(self, event): if event.button() == QtCore.Qt.LeftButton: self._pos = event.pos() super(DraggableTabBar, self).mousePressEvent(event) def widget_under_mouse(self, event): index = self.tabAt(event.pos()) tab = self.parent().widget(index) return tab def mouseMoveEvent(self, event): tab = self.widget_under_mouse(event) if tab is not None: tooltip = tab.toolTip() if not tooltip: try: tooltip = tab.file.path except AttributeError: pass self.setToolTip(tooltip) if (event.pos() - self._pos).manhattanLength() < QtWidgets.QApplication.startDragDistance(): return if not event.buttons() & QtCore.Qt.LeftButton: return drag = QtGui.QDrag(self) data = QtCore.QMimeData() data.tab = tab data.widget = self data.setData("action", b"tab-reordering") drag.setMimeData(data) drag.setPixmap(self.tabIcon(self.tabAt(event.pos())).pixmap(32, 32)) drag.exec_() def dragEnterEvent(self, event): m = event.mimeData() formats = m.formats() if "action" in formats and m.data("action") == "tab-reordering": event.acceptProposedAction() def dropEvent(self, event): m = event.mimeData() index = self.tabAt(event.pos()) if m.tab != self.parent().widget(index): self.tab_move_request.emit(m.tab, index) event.acceptProposedAction() class BaseTabWidget(QtWidgets.QTabWidget): last_tab_closed = QtCore.Signal() tab_closed = QtCore.Signal(QtWidgets.QWidget) split_requested = QtCore.Signal(QtWidgets.QWidget, int) tab_detached = QtCore.Signal(QtWidgets.QWidget, QtWidgets.QWidget) _detached_window_class = None def __init__(self, parent, tab_bar_shortcuts=True): super(BaseTabWidget, self).__init__(parent) self._current = None self.currentChanged.connect(self._on_current_changed) self.tabCloseRequested.connect(self._on_tab_close_requested) tab_bar = DraggableTabBar(self) tab_bar.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) tab_bar.customContextMenuRequested.connect(self._show_tab_context_menu) tab_bar.tab_move_request.connect(self._on_tab_move_request) self.setTabBar(tab_bar) self.setAcceptDrops(True) self.setUsesScrollButtons(True) self._tab_bar_shortcuts = tab_bar_shortcuts self.context_actions = [] self.a_close = None self.a_close_all = None self._menu_pos = None self._create_tab_bar_menu() self.detached_tabs = [] def tab_under_menu(self): if self._menu_pos: return self.tabBar().tabAt(self._menu_pos) else: return self.currentIndex() def close(self): self.tabCloseRequested.emit(self.tab_under_menu())
MIT License
cloudboltsoftware/cloudbolt-forge
blueprints/gcp_storage/create.py
create_bucket
python
def create_bucket( wrapper: GCPResource, project_id: str, bucket_name: str, storage_type: str ) -> dict: body = {"name": bucket_name, "storageClass": storage_type} buckets_resource = wrapper.buckets() insert_request = buckets_resource.insert(project=project_id, body=body) created_bucket = insert_request.execute() return created_bucket
Create a bucket (many other aspects can be specified - see api docs for details) https://googleapis.github.io/google-api-python-client/docs/dyn/storage_v1.buckets.html#insert
https://github.com/cloudboltsoftware/cloudbolt-forge/blob/b6fd9523560ff16e5812398370508d297eee8d12/blueprints/gcp_storage/create.py#L111-L122
from __future__ import unicode_literals import json from logging import error from typing import Optional from common.methods import set_progress from google.oauth2.credentials import Credentials from googleapiclient.discovery import Resource as GCPResource, build from infrastructure.models import CustomField, Environment from resources.models import Resource from resourcehandlers.gcp.models import GCPHandler STORAGE_TYPE = "{{ storage_type }}" BUCKET_NAME = "{{ bucket_name }}" ENVIRONMENT_ID = "{{ env_id }}" def generate_options_for_env_id(server=None, **kwargs): gcp_environments = Environment.objects.filter( resource_handler__resource_technology__slug="gcp" ) options = [(env.id, env.name) for env in gcp_environments] if not options: raise RuntimeError( "No valid Environments on Google Compute Platform resource handlers in CloudBolt" ) return options def generate_options_for_storage_type(server=None, **kwargs): return [ ("MULTI_REGIONAL", "Multi-Regional"), ("REGIONAL", "Regional"), ("NEARLINE", "Nearline"), ("COLDLINE", "Coldline"), ("ARCHIVE", "Archive"), ("STANDARD", "Standard"), ("DURABLE_REDUCED_AVAILABILITY", "Durable Reduced Availability"), ] def create_custom_field_objects_if_missing(): CustomField.objects.get_or_create( name="gcp_rh_id", defaults={ "label": "GCP Resource Handler ID", "type": "STR", "description": "Used by the GCP Storage blueprint", }, ) CustomField.objects.get_or_create( name="bucket_name", defaults={ "label": "Google Storage bucket name", "type": "STR", "description": "Used by the GCP Storage blueprint", }, ) CustomField.objects.get_or_create( name="gcp_project_id", defaults={ "label": "GCP Storage bucket project id", "type": "STR", "description": "Used by the GCP Storage blueprint", }, ) def update_resource( resource: Resource, bucket_name: str, project_id: str, resource_handler: GCPHandler ): resource.name = bucket_name resource.bucket_name = bucket_name resource.gcp_project_id = project_id resource.gcp_rh_id = resource_handler.id resource.save() def create_storage_api_wrapper(handler: GCPHandler) -> Optional[GCPResource]: if not handler.gcp_api_credentials: set_progress(f"Handler {handler} is missing gcp api credentials.") return None credentials_dict = json.loads(handler.gcp_api_credentials) credentials = Credentials(**credentials_dict) set_progress(f"Connecting to GCP for handler: {handler}") storage_wrapper: GCPResource = build( "storage", "v1", credentials=credentials, cache_discovery=False ) set_progress("Connection established") return storage_wrapper
Apache License 2.0
lisa-lab/pylearn2
pylearn2/scripts/dbm/dbm_metrics.py
neg_sampling
python
def neg_sampling(W_list, b_list, nsamples, beta=1.0, pa_bias=None, marginalize_odd=True, theano_rng=None): depth = len(b_list) new_nsamples = [nsamples[i] for i in xrange(depth)] _sample_even_odd(W_list, b_list, new_nsamples, beta, odd=marginalize_odd) _activation_even_odd(W_list, b_list, new_nsamples, beta, odd=not marginalize_odd) new_nsamples[not marginalize_odd] += pa_bias * (1. - beta) for i in xrange(not marginalize_odd, depth, 2): new_nsamples[i] = T.nnet.sigmoid(new_nsamples[i]) new_nsamples[i] = theano_rng.binomial( size=nsamples[i].get_value().shape, n=1, p=new_nsamples[i], dtype=floatX ) return new_nsamples
Generate a sample from the intermediate distribution defined at inverse temperature 'beta', starting from state 'nsamples'. See file docstring for equation of p_k(h1). Parameters ---------- W_list : array-like object of theano shared variables Weight matrices of the DBM. Its first element is ignored, since in the Pylearn2 framework a visible layer does not have a weight matrix. b_list : array-like object of theano shared variables Biases of the DBM nsamples : array-like object of theano shared variables Negative samples corresponding to the previous states beta : theano.tensor.scalar Inverse temperature parameter marginalize_odd : boolean Whether to marginalize odd layers theano_rng : theano RandomStreams Random number generator Returns ------- new_nsamples : array-like object of symbolic matrices new_nsamples[i] contains new samples for i-th layer.
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/scripts/dbm/dbm_metrics.py#L103-L152
__authors__ = "Vincent Dumoulin" __copyright__ = "Copyright 2013, Universite de Montreal" __credits__ = ["Guillaume Desjargins", "Vincent Dumoulin"] __license__ = "3-clause BSD" __maintainer__ = "Vincent Dumoulin" import argparse import warnings import numpy import logging from theano.compat.six.moves import xrange import theano import theano.tensor as T from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams from theano import scan import pylearn2 from pylearn2.compat import OrderedDict from pylearn2.datasets.mnist import MNIST from pylearn2.utils import serial from pylearn2 import utils floatX = theano.config.floatX logging.basicConfig(level=logging.INFO) rng = numpy.random.RandomState(9873242) theano_rng = RandomStreams(rng.randint(2**30)) def _sample_even_odd(W_list, b_list, samples, beta, odd=True): for i in xrange(odd, len(samples), 2): samples[i] = sample_hi_given(samples, i, W_list, b_list, beta) def _activation_even_odd(W_list, b_list, samples, beta, odd=True): for i in xrange(odd, len(samples), 2): samples[i] = hi_given(samples, i, W_list, b_list, beta, apply_sigmoid=False)
BSD 3-Clause New or Revised License
packit/packit
packit/config/package_config.py
get_local_specfile_path
python
def get_local_specfile_path(dir: Path, exclude: List[str] = None) -> Optional[Path]: files = [path.relative_to(dir) for path in dir.glob("*.spec")] or [ path.relative_to(dir) for path in dir.rglob("*.spec") ] if len(files) > 0: sexclude = set(exclude) if exclude else {"tests"} files = [f for f in files if f.parts[0] not in sexclude] logger.debug(f"Local spec files found: {files}. Taking: {files[0]}") return files[0] return None
Get the path (relative to dir) of the local spec file if present. If the spec is not found in dir directly, try to search it recursively (rglob) :param dir: to find the spec file in :param exclude: don't include files found in these dirs (default "tests") :return: path (relative to dir) of the first found spec file
https://github.com/packit/packit/blob/a49247da721162c4e496e609f5ef4bfcccbb4a7f/packit/config/package_config.py#L380-L400
import json import logging from pathlib import Path from typing import Optional, List, Dict, Union, Set from ogr.abstract import GitProject from yaml import safe_load from packit.actions import ActionName from packit.config.common_package_config import CommonPackageConfig from packit.config.job_config import JobConfig, get_default_jobs, JobType from packit.config.notifications import NotificationsConfig from packit.config.sources import SourcesItem from packit.sync import SyncFilesItem from packit.constants import CONFIG_FILE_NAMES from packit.exceptions import PackitConfigException logger = logging.getLogger(__name__) class PackageConfig(CommonPackageConfig): def __init__( self, config_file_path: Optional[str] = None, specfile_path: Optional[str] = None, synced_files: Optional[List[SyncFilesItem]] = None, jobs: Optional[List[JobConfig]] = None, dist_git_namespace: str = None, upstream_project_url: str = None, upstream_package_name: str = None, downstream_project_url: str = None, downstream_package_name: str = None, dist_git_base_url: str = None, create_tarball_command: List[str] = None, current_version_command: List[str] = None, actions: Dict[ActionName, Union[str, List[str]]] = None, upstream_ref: Optional[str] = None, allowed_gpg_keys: Optional[List[str]] = None, create_pr: bool = True, sync_changelog: bool = False, spec_source_id: str = "Source0", upstream_tag_template: str = "{version}", archive_root_dir_template: str = "{upstream_pkg_name}-{version}", patch_generation_ignore_paths: List[str] = None, patch_generation_patch_id_digits: int = 4, notifications: Optional[NotificationsConfig] = None, copy_upstream_release_description: bool = False, sources: Optional[List[SourcesItem]] = None, ): super().__init__( config_file_path=config_file_path, specfile_path=specfile_path, synced_files=synced_files, dist_git_namespace=dist_git_namespace, upstream_project_url=upstream_project_url, upstream_package_name=upstream_package_name, downstream_project_url=downstream_project_url, downstream_package_name=downstream_package_name, dist_git_base_url=dist_git_base_url, create_tarball_command=create_tarball_command, current_version_command=current_version_command, actions=actions, upstream_ref=upstream_ref, allowed_gpg_keys=allowed_gpg_keys, create_pr=create_pr, sync_changelog=sync_changelog, spec_source_id=spec_source_id, upstream_tag_template=upstream_tag_template, archive_root_dir_template=archive_root_dir_template, patch_generation_ignore_paths=patch_generation_ignore_paths, patch_generation_patch_id_digits=patch_generation_patch_id_digits, notifications=notifications, copy_upstream_release_description=copy_upstream_release_description, sources=sources, ) self.jobs: List[JobConfig] = jobs or [] if current_version_command: logger.warning( "current_version_command is deprecated and will be removed. Please " "switch to the `get-current-version` action: https://packit.dev/docs/actions/" ) if create_tarball_command: logger.warning( "create_tarball_command is deprecated and will be removed. Please" "switch to the `create-archive` action: https://packit.dev/docs/actions/" ) def __repr__(self): return ( "PackageConfig(" f"config_file_path='{self.config_file_path}', " f"specfile_path='{self.specfile_path}', " f"synced_files='{self.synced_files}', " f"jobs='{self.jobs}', " f"dist_git_namespace='{self.dist_git_namespace}', " f"upstream_project_url='{self.upstream_project_url}', " f"upstream_package_name='{self.upstream_package_name}', " f"downstream_project_url='{self.downstream_project_url}', " f"downstream_package_name='{self.downstream_package_name}', " f"dist_git_base_url='{self.dist_git_base_url}', " f"create_tarball_command='{self.create_tarball_command}', " f"current_version_command='{self.current_version_command}', " f"actions='{self.actions}', " f"upstream_ref='{self.upstream_ref}', " f"allowed_gpg_keys='{self.allowed_gpg_keys}', " f"create_pr='{self.create_pr}', " f"sync_changelog='{self.sync_changelog}', " f"spec_source_id='{self.spec_source_id}', " f"upstream_tag_template='{self.upstream_tag_template}', " f"archive_root_dir_template={self.archive_root_dir_template}', " f"patch_generation_ignore_paths='{self.patch_generation_ignore_paths}', " f"patch_generation_patch_id_digits='{self.patch_generation_patch_id_digits}', " f"copy_upstream_release_description='{self.copy_upstream_release_description}'," f"sources='{self.sources}')" ) @classmethod def get_from_dict( cls, raw_dict: dict, config_file_path: str = None, repo_name: str = None, spec_file_path: str = None, ) -> "PackageConfig": from packit.schema import PackageConfigSchema if config_file_path and not raw_dict.get("config_file_path", None): raw_dict.update(config_file_path=config_file_path) if "jobs" not in raw_dict: raw_dict["jobs"] = get_default_jobs() if not raw_dict.get("specfile_path", None): if spec_file_path: raw_dict["specfile_path"] = spec_file_path if not raw_dict.get("upstream_package_name", None) and repo_name: raw_dict["upstream_package_name"] = repo_name if not raw_dict.get("downstream_package_name", None) and repo_name: raw_dict["downstream_package_name"] = repo_name package_config = PackageConfigSchema().load(raw_dict) if not package_config.specfile_path and not all( [ job.type == JobType.tests and job.metadata.skip_build for job in package_config.jobs ] ): raise PackitConfigException("Spec file was not found!") return package_config def get_copr_build_project_value(self) -> Optional[str]: projects_list = [ job.metadata.project for job in self.jobs if job.type == JobType.copr_build and job.metadata.project ] if not projects_list: return None if len(set(projects_list)) > 1: logger.warning( f"You have defined multiple copr projects to build in, we are going " f"to pick the first one: {projects_list[0]}, reorder the job definitions" f" if this is not the one you want." ) return projects_list[0] def get_propose_downstream_dg_branches_value(self) -> Optional[Set]: for job in self.jobs: if job.type == JobType.propose_downstream: return job.metadata.dist_git_branches return set() def __eq__(self, other: object): if not isinstance(other, self.__class__): return NotImplemented logger.debug(f"our configuration:\n{self.__dict__}") logger.debug(f"the other configuration:\n{other.__dict__}") return ( self.specfile_path == other.specfile_path and self.synced_files == other.synced_files and self.jobs == other.jobs and self.dist_git_namespace == other.dist_git_namespace and self.upstream_project_url == other.upstream_project_url and self.upstream_package_name == other.upstream_package_name and self.downstream_project_url == other.downstream_project_url and self.downstream_package_name == other.downstream_package_name and self.dist_git_base_url == other.dist_git_base_url and self.current_version_command == other.current_version_command and self.create_tarball_command == other.create_tarball_command and self.actions == other.actions and self.allowed_gpg_keys == other.allowed_gpg_keys and self.create_pr == other.create_pr and self.sync_changelog == other.sync_changelog and self.spec_source_id == other.spec_source_id and self.upstream_tag_template == other.upstream_tag_template and self.copy_upstream_release_description == other.copy_upstream_release_description and self.sources == other.sources ) def find_packit_yaml( *directory: Union[Path, str], try_local_dir_first: bool = False, try_local_dir_last: bool = False, ) -> Path: directories = [Path(config_dir) for config_dir in directory] cwd = Path.cwd() if try_local_dir_first and try_local_dir_last: logger.error( "Ambiguous usage of 'try_local_dir_first' and 'try_local_dir_last'." ) if try_local_dir_first: if cwd in directories: directories.remove(cwd) directories.insert(0, cwd) if try_local_dir_last: if cwd in directories: directories.remove(cwd) directories.append(cwd) for config_dir in directories: for config_file_name in CONFIG_FILE_NAMES: config_file_name_full = config_dir / config_file_name if config_file_name_full.is_file(): logger.debug(f"Local package config found: {config_file_name_full}") return config_file_name_full raise PackitConfigException("No packit config found.") def load_packit_yaml(config_file_path: Path) -> Dict: try: return safe_load(config_file_path.read_text()) except Exception as ex: logger.error(f"Cannot load package config {config_file_path}.") raise PackitConfigException(f"Cannot load package config: {ex!r}.") def get_local_package_config( *directory: Union[Path, str], repo_name: Optional[str] = None, try_local_dir_first: bool = False, try_local_dir_last: bool = False, package_config_path: Optional[str] = None, ) -> PackageConfig: if package_config_path: config_file_name = Path(package_config_path) else: config_file_name = find_packit_yaml( *directory, try_local_dir_first=try_local_dir_first, try_local_dir_last=try_local_dir_last, ) loaded_config = load_packit_yaml(config_file_name) return parse_loaded_config( loaded_config=loaded_config, config_file_path=config_file_name.name, repo_name=repo_name, spec_file_path=str(get_local_specfile_path(config_file_name.parent)), ) def get_package_config_from_repo( project: GitProject, ref: str = None, spec_file_path: Optional[str] = None ) -> Optional[PackageConfig]: for config_file_name in CONFIG_FILE_NAMES: try: config_file_content = project.get_file_content( path=config_file_name, ref=ref ) except FileNotFoundError: pass else: logger.debug( f"Found a config file {config_file_name!r} " f"on ref {ref!r} " f"of the {project.full_repo_name!r} repository." ) break else: logger.warning( f"No config file ({CONFIG_FILE_NAMES}) found on ref {ref!r} " f"of the {project.full_repo_name!r} repository." ) return None try: loaded_config = safe_load(config_file_content) except Exception as ex: logger.error(f"Cannot load package config {config_file_name!r}. {ex}") raise PackitConfigException( f"Cannot load package config {config_file_name!r}. {ex}" ) if not spec_file_path: logger.warning(f"Spec file path is not specified in {config_file_name}.") spec_file_path = get_specfile_path_from_repo(project=project, ref=ref) return parse_loaded_config( loaded_config=loaded_config, config_file_path=config_file_name, repo_name=project.repo, spec_file_path=spec_file_path, ) def parse_loaded_config( loaded_config: dict, config_file_path: Optional[str] = None, repo_name: Optional[str] = None, spec_file_path: Optional[str] = None, ) -> PackageConfig: logger.debug(f"Package config:\n{json.dumps(loaded_config, indent=4)}") try: return PackageConfig.get_from_dict( raw_dict=loaded_config, config_file_path=config_file_path, repo_name=repo_name, spec_file_path=spec_file_path, ) except Exception as ex: logger.error(f"Cannot parse package config. {ex}.") raise PackitConfigException(f"Cannot parse package config: {ex!r}.")
MIT License
jameskeaveney/elecsus
elecsus/libs/numberDensityEqs.py
numDenK
python
def numDenK(T): if T<336.8: p=10.0**(4.961-4646.0/T) else: p=10.0**(8.233-4693.0/T-1.2403*log10(T)) NumberDensity=101325.0*p/(kB*T) return NumberDensity
Potassium number density
https://github.com/jameskeaveney/elecsus/blob/10480218254270390fdb935a8bbf9ff95337dada/elecsus/libs/numberDensityEqs.py#L63-L70
from __future__ import (division, print_function, absolute_import) from FundamentalConstants import kB from numpy import log10 def CalcNumberDensity(T,atom): if atom in ['Rb85','Rb87','Rb']: return numDenRb(T) elif atom=='Cs': return numDenCs(T) elif atom in ['K','K39','K40','K41']: return numDenK(T) elif atom=='Na': return numDenNa(T) def numDenRb(T): if T<312.46: p=10.0**(4.857-4215./T) else: p=10.0**(8.316-4275./T-1.3102*log10(T)) NumberDensity=101325.0*p/(kB*T) return NumberDensity
Apache License 2.0
facebookresearch/protein-ebm
mmcif_utils.py
rotate_dihedral
python
def rotate_dihedral(angles, par, child, pos, pos_exist, chis, chi_valid): angles = angles / 180 * np.pi chis = chis / 180 * np.pi pos_orig = pos pos = pos.copy() for i in range(4): p2 = pos[:, 4 + i] index = np.tile(4 + i, (pos.shape[0], 1)) + par[:, 4 + i : 5 + i] p1 = np.take_along_axis(pos, index[:, :, None], axis=1)[:, 0, :] rot_angle = chis[:, i] - angles[:, 4 + i] diff_vec = p2 - p1 diff_vec_normalize = diff_vec / (np.linalg.norm(diff_vec, axis=1, keepdims=True) + 1e-10) rot_points = pos[:, 5 + i :].copy() - p1[:, None, :] par_points = (rot_points * diff_vec_normalize[:, None, :]).sum( axis=2, keepdims=True ) * diff_vec_normalize[:, None, :] perp_points = rot_points - par_points perp_points_norm = np.linalg.norm(perp_points, axis=2, keepdims=True) + 1e-10 perp_points_normalize = perp_points / perp_points_norm a3 = np.cross(diff_vec_normalize[:, None, :], perp_points_normalize) rot_points = ( perp_points * np.cos(rot_angle)[:, None, None] + np.sin(rot_angle)[:, None, None] * a3 * perp_points_norm + par_points + p1[:, None, :] ) rot_points[np.isnan(rot_points)] = 10000 first_term = rot_points * chi_valid[:, i : i + 1, None] second_term = pos[:, 5 + i :] * (1 - chi_valid[:, i : i + 1, None]) pos[:, 5 + i :] = first_term + second_term return pos
Rotate a protein representation by a set of dihedral angles: N represents the number of amino acids in the batch, 20 is the number of atoms. angles: N x 20 set of angles to rotate each atom by par: A N x 20 encoding of the relative offset of the parent of each atom. For example, the amino acid glycine would be represented at [-18 -1 -1 -1 0, ...] child: A N x 20 encoding of the child of each atom. For example, the amino acid glycine would be represented as [1 1 18 0 0 0 ..] pos_exist: A N x 20 mask encoding of which atoms are valid for each amino acid so for example the amino acid glycine would be represented as [1 1 1 1 0 0 ...] chis: A N x 20 representation of the existing chi angles chi_valid: A N x 5 mask encoding which chi angles are valid, so for example glycine would be represented as [0 0 0 0 0]
https://github.com/facebookresearch/protein-ebm/blob/7e14adbe060031d3de185d4243b4b54fc8f46ae7/mmcif_utils.py#L453-L513
import collections import os import os.path as osp import pickle import random from itertools import product from multiprocessing import Pool import numpy as np import pandas as pd import gemmi from amino_acid_config import kvs, res_atoms, res_children, res_chis, res_parents from config import MMCIF_PATH, ROTAMER_LIBRARY_PATH from constants import atom_names, residue_names from math_utils import rotate_v1_v2, rotate_v1_v2_vec def parse_dense_format(node_embed): start = 0 pars = [] childs = [] pos = [] pos_exists = [] residues = [] chis_valid = [] while start < node_embed.shape[0]: idx = int(node_embed[start, 0]) residue = residue_names[idx] par = res_parents[residue].copy() child = res_children[residue].copy() n = len(par) pos_exist = [1] * n + [0] * (20 - n) par = par + [0] * (20 - n) child = child + [0] * (20 - len(child)) pos_temp = np.concatenate( [node_embed[start : start + n, -3:], np.zeros((20 - n, 3))], axis=0 ) if start + n <= node_embed.shape[0]: pars.append(par) childs.append(child) pos.append(pos_temp) pos_exists.append(pos_exist) chis = res_chis[residue] chis_valid.append([1] * len(chis) + [0] * (20 - len(chis))) residues.append(residue.lower()) if not (node_embed[start : start + n, 0] == idx).all(): return None, None, None, None, None, None start = start + n if len(pos) < 2: return None, None, None, None, None, None pars, childs, pos, pos_exists, chis_valid = ( np.array(pars), np.array(childs), np.stack(pos, axis=0), np.array(pos_exists), np.array(chis_valid), ) pars[0, 0] = 0 childs[-1, 2] = 0 return pars, childs, pos, pos_exists, residues, chis_valid def reencode_dense_format(node_embed, pos_new, pos_exist): node_embed_new = node_embed.copy() pos_mask = pos_exist.astype(np.bool) elem_num = pos_mask.sum() node_embed_new[:elem_num, -3:] = pos_new[pos_mask] return node_embed_new def cif_to_embed(cif_file, ix=None, parse_skip=False): st = gemmi.read_structure(cif_file) results = [] skips = [] for model in st: for i, chain in enumerate(model): if (ix is not None) and (ix != i): continue atoms = [] node_embeddings = [] for j, residue in enumerate(chain): translation = [] if residue.name not in residue_names: if residue.name in ["DA", "DC", "DG", "DT"]: return None, None else: continue residue_counter = 0 namino_elements = len(res_parents[residue.name]) amino_atoms = res_atoms[residue.name] residue_atoms = [] residue_embed = [] node_embed = parse_residue_embed(residue) if len(node_embed) == 0: skips.append(j) node_embeddings.extend(node_embed) node_embeddings = np.array(node_embeddings) result = (node_embeddings,) results.append(result) if parse_skip: return st, results, skips else: return st, results def vis_cif(cif_path, im_path): import pymol from pymol import cmd cmd.load(cif_path, "mov") cmd.zoom() cmd.png(im_path, 300, 200) def compute_chi_angle_st(st, ix): angles = [] num = int(ix) chain_counter = 0 for model in st: for chain in model: if num != chain_counter: chain_counter += 1 continue else: for residue in chain: if residue.name in residue_names: chi_angles = compute_chi_angle_residue(residue) if chi_angles is not None: angles.append(chi_angles) return angles def compute_chi_angle_residue(residue): chi_angles_atoms = kvs[residue.name] angles = [] try: for chi_angles_atom in chi_angles_atoms: atoms = chi_angles_atom.split("-") pos = [] for atom in atoms: if atom == "CD": if "CD" not in residue: atom = residue["CD1"] else: atom = residue[atom] else: atom = residue[atom] pos.append((atom.pos.x, atom.pos.y, atom.pos.z)) pos = np.array(pos) diff_vec = pos[2] - pos[1] diff_vec_normalize = diff_vec / np.linalg.norm(diff_vec) diff_bot = pos[0] - pos[1] diff_top = pos[3] - pos[2] diff_bot = diff_bot - diff_bot.dot(diff_vec_normalize) * diff_vec_normalize diff_top = diff_top - diff_top.dot(diff_vec_normalize) * diff_vec_normalize diff_bot_normalize = diff_bot / np.linalg.norm(diff_bot) diff_top_normalize = diff_top / np.linalg.norm(diff_top) sin = (np.cross(diff_bot_normalize, diff_top_normalize) * diff_vec_normalize).sum( axis=1 ) cos = diff_bot_normalize.dot(diff_top_normalize) angle = np.arctan2(sin, cos) angles.append(angle) except Exception as e: return None return angles def parse_cif(path): base_folder, f = osp.split(path) base_name, *junk = f.split(".") st, infos = cif_to_embed(path) if infos is not None: for i, info in enumerate(infos): pickle_file = osp.join(base_folder, "{}.{}.p".format(base_name, i)) pickle.dump(info, open(pickle_file, "wb")) return None def script_parse_cif(): mmcif_path = osp.join(MMCIF_PATH, "mmCIF") files = [] dirs = os.listdir(mmcif_path) pool = Pool() for d in dirs: directory = osp.join(mmcif_path, d) d_files = os.listdir(directory) files_tmp = [osp.join(directory, d_file) for d_file in d_files if ".cif" in d_file] files.extend(files_tmp) pool.map(parse_cif, files) def clean_cif(): mmcif_path = osp.join(MMCIF_PATH, mmCIF) dirs = os.listdir(mmcif_path) for d in dirs: directory = osp.join(mmcif_path, d) d_files = os.listdir(directory) files_tmp = [osp.join(directory, d_file) for d_file in d_files if ".p" in d_file] for f in files_tmp: os.remove(f) def recorrect_name(name): if (name[-1]).isdigit() and name[-1] == "1": return name[:-1] elif not (name[-1].isdigit()): return name + "1" else: return name def _parse_residue(residue): atoms = res_atoms[residue.name] parents = res_parents[residue.name] children = res_children[residue.name] chis = res_chis[residue.name] pos, node_embeds = [], [] residue_counter = 0 for atom in atoms: if atom in residue: atom = residue[atom] elif recorrect_name(atom) in residue: atom = residue[recorrect_name(atom)] else: return None pos.append((atom.pos.x, atom.pos.y, atom.pos.z)) node_embeds.append( ( residue_names.index(residue.name), atom_names.index(atom.element.name), residue_counter, atom.pos.x, atom.pos.y, atom.pos.z, ) ) residue_counter = residue_counter + 1 exist = [1] * len(parents) + [0] * (20 - len(parents)) parents = parents + [0] * (20 - len(parents)) children = children + [0] * (20 - len(children)) pos_fill = np.zeros((20, 3)) pos_fill[: len(pos)] = pos chis = [1] * len(chis) + [0] * (5 - len(chis)) return parents, children, pos_fill, exist, chis, node_embeds def parse_residue(residue): ret = _parse_residue(residue, 0) if ret: parents, children, pos_fill, exist, chis, _, _ = ret return parents, children, pos_fill, exist, chis else: return None, None, None, None, None def parse_residue_embed(residue): ret = _parse_residue(residue) if ret: _, _, _, _, _, node_embeds = ret return node_embeds else: return [] def flatten(arr): return arr.reshape((-1, *arr.shape[2:])) def rotate_dihedral_fast(a, p, c, pos, pos_e, ch, chv, idx): pos = pos.copy() ai, pi, ci, pos_i, pos_ei, chi, chvi = ( a[idx - 1 : idx + 1], p[idx - 1 : idx + 1], c[idx - 1 : idx + 1], pos[idx - 1 : idx + 1], pos_e[idx - 1 : idx + 1], ch[idx - 1 : idx + 1], chv[idx - 1 : idx + 1], ) pnew = rotate_dihedral(ai, pi, ci, pos_i, pos_ei, chi, chvi) pos[idx - 1 : idx + 1] = pnew return pos
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/generic_s3_input.py
GenericS3Input.ssl
python
def ssl(self): return self._ssl
Gets the ssl of this GenericS3Input. Controls whether SSL is used or not :return: The ssl of this GenericS3Input. :rtype: bool
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/generic_s3_input.py#L180-L189
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.input import Input from bitmovin_api_sdk.models.s3_signature_version import S3SignatureVersion import pprint import six class GenericS3Input(Input): @poscheck_model def __init__(self, id_=None, name=None, description=None, created_at=None, modified_at=None, custom_data=None, bucket_name=None, host=None, port=None, ssl=None, signature_version=None, access_key=None, secret_key=None): super(GenericS3Input, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data) self._bucket_name = None self._host = None self._port = None self._ssl = None self._signature_version = None self._access_key = None self._secret_key = None self.discriminator = None if bucket_name is not None: self.bucket_name = bucket_name if host is not None: self.host = host if port is not None: self.port = port if ssl is not None: self.ssl = ssl if signature_version is not None: self.signature_version = signature_version if access_key is not None: self.access_key = access_key if secret_key is not None: self.secret_key = secret_key @property def openapi_types(self): types = {} if hasattr(super(GenericS3Input, self), 'openapi_types'): types = getattr(super(GenericS3Input, self), 'openapi_types') types.update({ 'bucket_name': 'string_types', 'host': 'string_types', 'port': 'int', 'ssl': 'bool', 'signature_version': 'S3SignatureVersion', 'access_key': 'string_types', 'secret_key': 'string_types' }) return types @property def attribute_map(self): attributes = {} if hasattr(super(GenericS3Input, self), 'attribute_map'): attributes = getattr(super(GenericS3Input, self), 'attribute_map') attributes.update({ 'bucket_name': 'bucketName', 'host': 'host', 'port': 'port', 'ssl': 'ssl', 'signature_version': 'signatureVersion', 'access_key': 'accessKey', 'secret_key': 'secretKey' }) return attributes @property def bucket_name(self): return self._bucket_name @bucket_name.setter def bucket_name(self, bucket_name): if bucket_name is not None: if not isinstance(bucket_name, string_types): raise TypeError("Invalid type for `bucket_name`, type has to be `string_types`") self._bucket_name = bucket_name @property def host(self): return self._host @host.setter def host(self, host): if host is not None: if not isinstance(host, string_types): raise TypeError("Invalid type for `host`, type has to be `string_types`") self._host = host @property def port(self): return self._port @port.setter def port(self, port): if port is not None: if not isinstance(port, int): raise TypeError("Invalid type for `port`, type has to be `int`") self._port = port @property
MIT License
pmarkowsky/dash
app/assembly_store.py
AssemblyStore.DeleteRow
python
def DeleteRow(self, index): self.rows.pop(index) for i in xrange(0, len(self.rows)): self.rows[i].index = i self.UpdateOffsetsAndAddresses()
Delete a row in the assembly store. Args: index: a positive integer index of the row data to delete. Returns: N / A
https://github.com/pmarkowsky/dash/blob/1743f7c8da100c92f1a50d5209719debaa50b357/app/assembly_store.py#L352-L368
import binascii import cPickle import struct X86 = 'x86' X64 = 'x64' ARM = 'arm' ARM64 = 'arm64' MIPS = 'mips' class RowData(object): def __init__(self, offset, label, address, opcode, mnemonic, comment, index=0, in_use=False, stack_delta=0): self.offset = offset self.label = label self.address = address self.opcode = opcode self.mnemonic = mnemonic self.comment = comment self.index = index self.in_use = in_use self.error = False self.targets = [0] self.is_a_data_defintion_inst = False self.is_branch_or_call = False self.stack_delta = stack_delta def ToDict(self): error_st = 0 if self.error: error_st = 1 return {'offset': self.offset, 'label': self.label, 'address': self.DisplayAddress(), 'opcode': self.DisplayOpcode(), 'mnemonic': self.mnemonic, 'comment': self.comment, 'index': self.index, 'error': error_st, 'in_use': self.in_use, 'targets': self.targets, 'is_a_data_definition_inst': self.is_a_data_defintion_inst, 'is_a_branch_or_call': self.is_branch_or_call} def SetComment(self, comment): try: self.comment = comment.encode('ascii') except UnicodeDecodeError: pass def SetLabel(self, label): try: self.label = label.encode('ascii').upper() except UnicodeDecodeError: pass def SetAddress(self, address): try: if address.startswith('0x'): self.address = int(address, 16) else: self.address = int(address) except ValueError: pass def DisplayAddress(self): return hex(self.address).replace('L', '') def SetOpcode(self, hex_str): try: self.opcode = binascii.unhexlify(hex_str.replace(' ', '')) self.in_use = True except TypeError: self.in_use = False self.opcode = hex_str self.mnemonic = '<INVALID OPCODE SUPPLIED>' self.error = True def SetMnemonic(self, mnemonic): if mnemonic == '': self.opcodes = '' self.in_use = False return self.mnemonic = mnemonic normalized_mnemonic = mnemonic.lower().strip() if normalized_mnemonic.startswith('j') or normalized_mnemonic.startswith('call'): self.is_branch_or_call = True else: self.is_branch_or_call = False if normalized_mnemonic.split()[0] in ('db', 'dw', 'dd', 'dq'): self.is_a_data_defintion_inst = True else: self.is_a_data_defintion_inst = False new_mnemonic = self.mnemonic.split() self.mnemonic = '' self.mnemonic += new_mnemonic[0].upper() + ' ' + ''.join(new_mnemonic[1:]) self.in_use = True def DisplayOpcode(self): original_str = binascii.hexlify(self.opcode) hex_str = '' for i in xrange(len(original_str)): hex_str += original_str[i] if i % 2 == 1: hex_str += ' ' return hex_str.upper().strip() class AssemblyStoreError(Exception): pass class AssemblyStore(object): _instance = None def __new__(cls, *args, **kwargs): if not cls._instance: cls._instance = super(AssemblyStore, cls).__new__(cls, *args, **kwargs) return cls._instance def __init__(self): self.bits = 32 self.display_labels = True self.rows = [] self.filter_bytes = "" self.cfg = None self.labels = set([]) self.AddRows(20) def DeepCopyRow(self, index): if index < 0 or index >= len(self.rows): raise AssemblyStoreError("Invalid row index %s" % str(index)) row = self.rows[index] return cPickle.loads(cPickle.dumps(row, -1)) def SetBits(self, bits): if bits in (16, 32, 64): self.bits = bits return True else: return False def SetEndianess(self, little=True): self.little_endian = little def Reset(self): self.cfg = None self.rows = [] def CreateRowFromCapstoneInst(self, index, inst): mnemonic = "%s %s" % (inst.mnemonic.upper(), inst.op_str) row = RowData(0, '', inst.address, str(inst.bytes), mnemonic, '', index, in_use=True) self.InsertRowAt(index, row) self.UpdateOffsetsAndAddresses() def InsertDBRowAt(self, address, index, byte): mnemonic = "db 0x%02x" % ord(byte) row = RowData(0, '', address, byte, mnemonic, '', index, in_use=True) self.InsertRowAt(index, row) def InsertDBMultibyteRowAt(self, address, index, bytes_vals): mnemonic = "db " + ", ".join(map(lambda x: "0x%02x" % ord(x), byte_vals)) row = RowData(0, '', address, chr(byte), mnemonic, '', index, in_use=True) self.InsertRowAt(index, row) return len(byte_vals) def InsertDHRowAt(self, address, index, byte_vals, big_endian=False): if big_endian: val = struct.unpack(">H")[0] else: val = struct.unpack("<H", byte_vals)[0] mnemonic = "dh 0x%04x" % val row = RowData(0, '', address, byte_vals, mnemonic, '', index, in_use=True) self.InsertRowAt(index, row) def InsertDDRowAt(self, address, index, byte_vals, big_endian=False): if big_endian: val = struct.unpack(">I")[0] else: val = struct.unpack("<I", byte_vals)[0] mnemonic = "dd 0x%08x" % val row = RowData(0, '', address, byte_vals, mnemonic, '', index, in_use=True) self.InsertRowAt(index, row) def InsertRowAt(self, index, row): self.rows.insert(index, row) self.rows[index].index = index for i in xrange(index + 1, len(self.rows)): self.rows[i].index = i self.UpdateOffsetsAndAddresses() def AddRows(self, num_rows, starting_index=None): if not starting_index: starting_index = len(self.rows) for i in xrange(num_rows): self.rows.append(RowData(0, '', 0, '', '', '', starting_index)) starting_index += 1 def ContainsLabel(self, row_asm): for label in self.labels: if label in row_asm: return label return None def UpdateRow(self, i, new_row): self.rows[i] = new_row if new_row.label != '' and new_row.label not in self.labels: self.labels.add(new_row.label) self.UpdateOffsetsAndAddresses()
MIT License
lilianemomeni/kws-net
model/metric.py
APMeter.reset
python
def reset(self): self.scores = torch.FloatTensor(torch.FloatStorage()) self.targets = torch.LongTensor(torch.LongStorage()) self.weights = torch.FloatTensor(torch.FloatStorage()) self.last_precision = None
Resets the meter with empty member variables
https://github.com/lilianemomeni/kws-net/blob/ee45d7beb4ac9c65f08a83c256a147f8a264ba69/model/metric.py#L226-L231
import math import torch import numpy as np from tqdm import tqdm def my_metric(output, target): with torch.no_grad(): pred = torch.argmax(output, dim=1) assert pred.shape[0] == len(target) correct = 0 correct += torch.sum(pred == target).item() return correct / len(target) def my_metric2(output, target, k=3): with torch.no_grad(): pred = torch.topk(output, k, dim=1)[1] assert pred.shape[0] == len(target) correct = 0 for i in range(k): correct += torch.sum(pred[:, i] == target).item() return correct / len(target) def calcEER(Tar,Non,TarKeep='all',NonKeep='all'): if TarKeep=='all': TarKeep = np.asarray([True for i in range(0,Tar.shape[0])]) if NonKeep=='all': NonKeep = np.asarray([True for i in range(0,Non.shape[0])]) Tar = Tar[TarKeep] Non = Non[NonKeep] Tar_2 = [i.item() for i in Tar] Non_2 = [i.item() for i in Non] Mt = np.mean(Tar_2) Mn = np.mean(Non_2) Ns = 500 E = np.zeros((Ns,2)) S = np.linspace(Mn,Mt,Ns) for s in range(0,Ns): E[s,0] = np.sum((Tar_2<S[s]).astype('float'))/Tar.shape[0] E[s,1] = np.sum((Non_2>S[s]).astype('float'))/Non.shape[0] I = np.argmin(np.abs(E[:,0] - E[:,1])) return np.mean(E[I,:]), E[I,0], E[I,1], np.exp(S[I]) def accuracy_orig(output, target, topk=(1,)): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def accuracy(output, target, topk=(1,)): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def PrecRec(output, target, topk=(1,)): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t().float() target = target.float() correct = pred.eq(target.view(1, -1).expand_as(pred)) pred = pred.view(-1) Pred = pred.clone() target = target.view(-1) PR = torch.FloatTensor(2).fill_(0) N = torch.sum(Pred.mul_(target)) sumPr = torch.sum(pred) PR[0] = N/sumPr if sumPr>0 else 1.0 sumTar = torch.sum(target) PR[1] = N/sumTar if sumTar>0 else 1.0 return PR def calcPrRankMultiInstances(TarRank, NonRank, PrIn): N = len(PrIn) PrRank = np.zeros((len(TarRank),N)) t = np.asarray(TarRank) n = np.asarray(NonRank) for k in range(0,t.shape[0]): V = np.concatenate((t,n),axis=0) I = np.concatenate((np.ones(t.shape[0]),np.zeros(n.shape[0])),axis=0) Is = I[V.argsort()[::-1]] for i in range(0,N): PrRank[k][i] = np.any(Is[:PrIn[i]]==1).astype('int32') if k==0: MeanRank = np.mean(np.argwhere(Is.astype('int32')).astype('float'))/I.shape[0] t = np.delete(t,np.argmax(t),0) return PrRank, MeanRank def calcMeanRank(TarRank, NonRank): MeanRank = np.zeros((len(TarRank),1)) t = np.asarray(TarRank) n = np.asarray(NonRank) V = np.concatenate((t,n),axis=0) I = np.concatenate((np.ones(t.shape[0]),np.zeros(n.shape[0])),axis=0) Is = I[V.argsort()[::-1]] m = 0.0 k = 0 for c in range(0,Is.shape[0]): if Is[c] == 1: MeanRank[k] = m/(n.shape[0]+1) k+=1 else: m+=1 if k>len(TarRank): break return np.mean(MeanRank),MeanRank def calcPrRank(TarRank, NonRank, PrIn): N = len(PrIn) PrRank = np.zeros((N,)) t = np.asarray(TarRank) n = np.asarray(NonRank) V = np.concatenate((t,n),axis=0) I = np.concatenate((np.ones(t.shape[0]),np.zeros(n.shape[0])),axis=0) Is = I[V.argsort()[::-1]] for i in range(0,N): PrRank[i] = np.any(Is[:PrIn[i]]==1).astype('int32') MeanRank = np.mean(np.argwhere(Is.astype('int32')).astype('float'))/I.shape[0] return PrRank, MeanRank class AverageMeter(object): def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) class Meter(object): def reset(self): pass def add(self, value): pass def value(self): pass class APMeter(Meter): def __init__(self): super().__init__() self.reset()
MIT License
rietveld-codereview/rietveld
codereview/decorators.py
issue_editor_required
python
def issue_editor_required(func): @login_required @issue_required def issue_editor_wrapper(request, *args, **kwds): if not request.issue.edit_allowed: return HttpTextResponse( 'You do not have permission to edit this issue', status=403) return func(request, *args, **kwds) return issue_editor_wrapper
Decorator that processes the issue_id argument and insists the user has permission to edit it.
https://github.com/rietveld-codereview/rietveld/blob/82e415f6a291c58c714d3869c7a1de818546c7d5/codereview/decorators.py#L107-L119
import collections import functools import logging import mimetypes import urllib import json from . import models from .responses import HttpTextResponse, respond from google.appengine.api import users from django.conf import settings as django_settings from django.http import HttpResponse, HttpResponseRedirect STATUS_CODE = object() def access_control_allow_origin_star(func): def allow_origin_access_star_wrapper(request, *args, **kwds): response = func(request, *args, **kwds) response["Access-Control-Allow-Origin"] = "*" return response return allow_origin_access_star_wrapper def admin_required(func): def admin_wrapper(request, *args, **kwds): if request.user is None: return HttpResponseRedirect( users.create_login_url(request.get_full_path().encode('utf-8'))) if not request.user_is_admin: return HttpTextResponse( 'You must be admin in for this function', status=403) return func(request, *args, **kwds) return admin_wrapper def editor_required(func): @login_required def editor_wrapper(request, *args, **kwds): if not request.issue.edit_allowed: return HttpTextResponse('You do not own this issue', status=403) return func(request, *args, **kwds) return editor_wrapper def image_required(func): @patch_required def image_wrapper(request, image_type, *args, **kwds): content_key = None content = None if image_type == "0": content_key = request.patch.content_key elif image_type == "1": content_key = request.patch.patched_content_key if content_key: content = content_key.get() if not content or not content.data: return HttpResponseRedirect(django_settings.MEDIA_URL + "blank.jpg") request.mime_type = mimetypes.guess_type(request.patch.filename)[0] if not request.mime_type or not request.mime_type.startswith('image/'): return HttpResponseRedirect(django_settings.MEDIA_URL + "blank.jpg") request.content = content return func(request, *args, **kwds) return image_wrapper
Apache License 2.0
nasa-jpl/osr-rover-code
ROS/osr/src/roboclaw_wrapper.py
RoboclawWrapper.setup_encoders
python
def setup_encoders(self): for motor_name, properties in self.roboclaw_mapping.iteritems(): if "corner" in motor_name: enc_min, enc_max = self.read_encoder_limits(properties["address"], properties["channel"]) self.encoder_limits[motor_name] = (enc_min, enc_max) else: self.encoder_limits[motor_name] = (None, None) self.rc.ResetEncoders(properties["address"])
Set up the encoders
https://github.com/nasa-jpl/osr-rover-code/blob/353c7310061a19196c0ae5d751dba99745f12689/ROS/osr/src/roboclaw_wrapper.py#L166-L174
import serial import math import rospy from roboclaw import Roboclaw from sensor_msgs.msg import JointState from osr_msgs.msg import CommandDrive, CommandCorner, Status class RoboclawWrapper(object): def __init__(self): rospy.loginfo( "Initializing motor controllers") self.rc = None self.err = [None] * 5 self.address = [] self.current_enc_vals = None self.corner_cmd_buffer = None self.drive_cmd_buffer = None self.roboclaw_mapping = rospy.get_param('~roboclaw_mapping') self.encoder_limits = {} self.establish_roboclaw_connections() self.stop_motors() self.setup_encoders() for address in self.address: self.rc.WriteNVM(address) for address in self.address: self.rc.ReadNVM(address) self.corner_max_vel = 1000 accel_max = 2**15-1 accel_rate = rospy.get_param('/corner_acceleration_factor', 0.8) self.corner_accel = int(accel_max * accel_rate) self.roboclaw_overflow = 2**15-1 accel_max = 2**15-1 accel_rate = rospy.get_param('/drive_acceleration_factor', 0.5) self.drive_accel = int(accel_max * accel_rate) self.velocity_timeout = rospy.Duration(rospy.get_param('/velocity_timeout', 2.0)) self.stop_motors() self.corner_cmd_sub = rospy.Subscriber("/cmd_corner", CommandCorner, self.corner_cmd_cb, queue_size=1) self.drive_cmd_sub = rospy.Subscriber("/cmd_drive", CommandDrive, self.drive_cmd_cb, queue_size=1) self.enc_pub = rospy.Publisher("/encoder", JointState, queue_size=1) self.status_pub = rospy.Publisher("/status", Status, queue_size=1) def run(self): rate = rospy.Rate(8) status = Status() time_last_cmd = rospy.Time.now() idle_ramp = False idle = False counter = 0 while not rospy.is_shutdown(): now = rospy.Time.now() if self.drive_cmd_buffer: drive_fcn = self.send_drive_buffer_velocity drive_fcn(self.drive_cmd_buffer) self.drive_cmd_buffer = None time_last_cmd = now idle_ramp = False idle = False if self.corner_cmd_buffer: self.send_corner_buffer(self.corner_cmd_buffer) self.corner_cmd_buffer = None time_last_cmd = now idle_ramp = False idle = False try: self.read_encoder_values() self.enc_pub.publish(self.current_enc_vals) except AssertionError as read_exc: rospy.logwarn( "Failed to read encoder values") if (counter >= 5): status.battery = self.read_battery() status.temp = self.read_temperatures() status.current = self.read_currents() status.error_status = self.read_errors() counter = 0 if not idle and (now - time_last_cmd > self.velocity_timeout): if not idle_ramp: rospy.loginfo( "Idling: ramping down velocity to zero") idle_ramp = True drive_cmd_buffer = CommandDrive() self.send_drive_buffer_velocity(drive_cmd_buffer) else: rospy.loginfo( "Idling: full stopping motors") self.stop_motors() idle = True time_last_cmd = now self.status_pub.publish(status) counter += 1 rate.sleep() def establish_roboclaw_connections(self): self.rc = Roboclaw(rospy.get_param('/motor_controller/device', "/dev/serial0"), rospy.get_param('/motor_controller/baud_rate', 115200)) self.rc.Open() address_raw = rospy.get_param('motor_controller/addresses') address_list = (address_raw.split(',')) self.address = [None]*len(address_list) for i in range(len(address_list)): self.address[i] = int(address_list[i]) all_connected = True for address in self.address: rospy.logdebug("Attempting to talk to motor controller '{}'".format(address)) version_response = self.rc.ReadVersion(address) connected = bool(version_response[0]) if not connected: rospy.logerr("Unable to connect to roboclaw at '{}'".format(address)) all_connected = False else: rospy.logdebug("Roboclaw version for address '{}': '{}'".format(address, version_response[1])) if all_connected: rospy.loginfo("Sucessfully connected to RoboClaw motor controllers") else: raise Exception("Unable to establish connection to one or more of the Roboclaw motor controllers")
Apache License 2.0
erigones/esdc-ce
api/mon/backends/zabbix/monitor.py
Zabbix._get_filtered_alerts
python
def _get_filtered_alerts(self, vms=None, nodes=None, **kwargs): if vms is None and nodes is None: host_ids = None else: vm_host_ids = (ExternalZabbix.get_cached_hostid(vm) for vm in vms or ()) node_host_ids = (InternalZabbix.get_cached_hostid(node) for node in nodes or ()) host_ids = [hostid for hostid in vm_host_ids if hostid] + [hostid for hostid in node_host_ids if hostid] return self.ezx.show_alerts(hostids=host_ids, **kwargs)
This is a generator function
https://github.com/erigones/esdc-ce/blob/f83a62d0d430e3c8f9aac23d958583b0efce4312/api/mon/backends/zabbix/monitor.py#L564-L573
from logging import INFO, WARNING, ERROR, DEBUG, getLogger from api.decorators import catch_exception from api.mon.backends.abstract import AbstractMonitoringBackend, LOG from api.mon.backends.zabbix.containers import (ZabbixHostGroupContainer, ZabbixUserContainer, ZabbixUserGroupContainer, ZabbixActionContainer, ZabbixTemplateContainer) from api.mon.backends.zabbix.base import ZabbixBase from api.mon.backends.zabbix.internal import InternalZabbix from api.mon.backends.zabbix.external import ExternalZabbix from gui.models import User, AdminPermission from vms.models import DefaultDc logger = getLogger(__name__) __ZABBIX__ = {} def get_zabbix(dc, **kwargs): global __ZABBIX__ if dc.id in __ZABBIX__: return __ZABBIX__[dc.id] zx = Zabbix(dc, **kwargs) if zx.connected: __ZABBIX__[dc.id] = zx return zx def del_zabbix(dc): global __ZABBIX__ if dc.id in __ZABBIX__: del __ZABBIX__[dc.id] return True return False class Zabbix(AbstractMonitoringBackend): zbx = ZabbixBase def __init__(self, dc, **kwargs): super(Zabbix, self).__init__(dc, **kwargs) if dc.is_default(): default_dc = dc reuse_zapi = True else: default_dc = DefaultDc() dc_settings, dc1_settings = dc.settings, default_dc.settings reuse_zapi = (dc_settings.MON_ZABBIX_SERVER == dc1_settings.MON_ZABBIX_SERVER and dc_settings.MON_ZABBIX_USERNAME == dc1_settings.MON_ZABBIX_USERNAME and dc_settings.MON_ZABBIX_PASSWORD == dc1_settings.MON_ZABBIX_PASSWORD) self.izx = InternalZabbix(default_dc, **kwargs) self._connections = {self.izx.zapi} if reuse_zapi: kwargs['zapi'] = self.izx.zapi self.ezx = ExternalZabbix(dc, **kwargs) if not reuse_zapi: self._connections.add(self.ezx.zapi) def __hash__(self): return hash((self.izx, self.ezx)) @property def enabled(self): return self.izx.enabled and self.ezx.enabled @property def connected(self): return self.izx.connected and self.ezx.connected def is_default_dc_connection_reused(self): return len(self._connections) == 1 def reset_cache(self): self.izx.reset_cache() self.ezx.reset_cache() @catch_exception def task_log_success(self, task_id, obj=None, msg='', detail='', **kwargs): from api.task.utils import task_log_success if obj is None: obj = self.server_class(self.dc) task_log_success(task_id, msg, obj=obj, detail=detail, **kwargs) @catch_exception def task_log_error(self, task_id, obj=None, msg='', detail='', **kwargs): from api.task.utils import task_log_error if obj is None: obj = self.server_class(self.dc) task_log_error(task_id, msg, obj=obj, detail=detail, **kwargs) @classmethod @catch_exception def vm_send_alert(cls, vm, msg, priority=ZabbixBase.HIGH, **kwargs): dc = vm.dc dcs = dc.settings if not (dcs.MON_ZABBIX_ENABLED and vm.is_zabbix_sync_active()): logger.warning('Not sending alert for VM %s, because it has monitoring disabled', vm) return if vm.is_notcreated(): logger.warning('Not sending alert for VM %s, because it is not created', vm) return izx = get_zabbix(dc).izx return izx.send_alert(izx.host_id(vm), msg, priority=priority, **kwargs) @classmethod @catch_exception def node_send_alert(cls, node, msg, priority=ZabbixBase.HIGH, **kwargs): dc = DefaultDc() dcs = dc.settings if not (dcs.MON_ZABBIX_ENABLED and dcs.MON_ZABBIX_NODE_SYNC): logger.warning('Not sending alert for Node %s, because global node monitoring disabled', node) return if node.is_online(): logger.warning('Not sending alert for Node %s, because it is not online', node) return izx = get_zabbix(dc).izx return izx.send_alert(izx.host_id(node), msg, priority=priority, **kwargs) def vm_sla(self, vm_node_history): return self.izx.vm_get_sla(vm_node_history) def vm_history(self, vm_host_id, items, zhistory, since, until, **kwargs): return self.izx.get_history((vm_host_id,), items, zhistory, since, until, **kwargs) def vms_history(self, vm_host_ids, items, zhistory, since, until, **kwargs): return self.izx.get_history(vm_host_ids, items, zhistory, since, until, **kwargs) @staticmethod def _vm_disable_sync(zx, vm, log=None): log = log or zx.log hostid = zx.host_info(vm).get('hostid', None) if hostid: log(INFO, 'Zabbix synchronization switched to disabled for VM %s', vm) log(WARNING, 'Deleting Zabbix host ID "%s" for VM %s', hostid, vm) if zx.delete_host(hostid, log=log): log(INFO, 'Deleted Zabbix host ID "%s"', hostid) zx.save_host_info(vm, host={}, log=log) return True else: log(ERROR, 'Could not delete Zabbix host ID "%s"', hostid) return False else: log(INFO, 'Zabbix synchronization disabled for VM %s', vm) return None @staticmethod def _vm_create_host(zx, vm, log=None): log = log or zx.log log(WARNING, 'VM %s is not defined in Zabbix. Creating...', vm) hostid = zx.create_vm_host(vm, log=log) if hostid: log(INFO, 'Created new Zabbix host ID "%s" for VM %s', hostid, vm) zx.save_host_info(vm, log=log) return True else: log(ERROR, 'Could not create new Zabbix host for VM %s', vm) return False @staticmethod def _vm_update_host(zx, vm, host, log=None): log = log or zx.log hostid = host['hostid'] log(DEBUG, 'VM %s already defined in Zabbix as host ID "%s"', vm, hostid) params = zx.diff_vm_host(vm, host, log=log) if params: log(WARNING, 'Zabbix host ID "%s" configuration differs from current VM %s configuration', hostid, vm) log(INFO, 'Updating Zabbix host ID "%s" according to VM %s with following parameters: %s', hostid, vm, params) if zx.update_host(hostid, log=log, **params): log(INFO, 'Updated Zabbix host ID "%s"', hostid) zx.save_host_info(vm, log=log) else: log(ERROR, 'Could not update Zabbix host ID "%s"', hostid) return False else: log(INFO, 'Zabbix host ID "%s" configuration is synchronized with current VM %s configuration', hostid, vm) return True return True @staticmethod def _vm_disable_host(zx, vm, log=None): log = log or zx.log hostid = zx.get_hostid(vm, log=log) if not hostid: log(ERROR, 'Zabbix host for VM %s does not exist!', vm) return False log(WARNING, 'Setting Zabbix host ID "%s" status to unmonitored for VM %s', hostid, vm) if zx.update_host(hostid, log=log, status=zx.HOST_UNMONITORED): log(INFO, 'Updated Zabbix host ID "%s" status to unmonitored', hostid) zx.save_host_info(vm, log=log) return True else: log(ERROR, 'Could not update Zabbix host ID "%s" status to unmonitored', hostid) return False @staticmethod def _vm_delete_host(zx, vm, log=None): log = log or zx.log vm_uuid = zx.host_id(vm) host = zx.get_host(vm_uuid, log=log) if not host: log(WARNING, 'Zabbix host for VM %s does not exist!', vm_uuid) return False hostid = host['hostid'] log(WARNING, 'Deleting Zabbix host ID "%s" for VM %s', hostid, vm_uuid) if zx.delete_host(hostid, log=log): log(INFO, 'Deleted Zabbix host ID "%s"', hostid) return True else: log(ERROR, 'Could not delete Zabbix host ID "%s"', hostid) return False def is_vm_host_created(self, vm): return vm.is_zabbix_sync_active() and self.izx.has_host_info(vm) def vm_sync(self, vm, force_update=False, task_log=LOG): dc_settings = vm.dc.settings result = [] for zx_sync, vm_sync, zx in ((vm.is_zabbix_sync_active(), dc_settings._MON_ZABBIX_VM_SYNC, self.izx), (vm.is_external_zabbix_sync_active(), dc_settings.MON_ZABBIX_VM_SYNC, self.ezx)): log = zx.get_log_fun(task_log) if not zx.enabled: log(INFO, 'Monitoring is disabled') result.append(None) continue if zx_sync: if force_update and zx.has_host_info(vm): host = zx.host_info(vm) else: host = zx.get_host(zx.host_id(vm), log=log) if host: result.append(self._vm_update_host(zx, vm, host, log=log)) elif force_update: log(WARNING, 'Could not update zabbix host for VM %s, because it is not defined in Zabbix', vm) result.append(False) else: if vm_sync: result.append(self._vm_create_host(zx, vm, log=log)) else: log(INFO, 'Zabbix synchronization disabled for VM %s in DC %s', vm, vm.dc) result.append(None) else: result.append(self._vm_disable_sync(zx, vm, log=log)) return result def vm_disable(self, vm, task_log=LOG): result = [] izx_log = self.izx.get_log_fun(task_log) ezx_log = self.ezx.get_log_fun(task_log) if self.izx.enabled: if vm.is_zabbix_sync_active(): result.append(self._vm_disable_host(self.izx, vm, log=izx_log)) else: izx_log(INFO, 'Internal zabbix synchronization disabled for VM %s', vm) result.append(None) else: izx_log(INFO, 'Monitoring is disabled') result.append(None) if self.ezx.enabled: if vm.is_external_zabbix_sync_active(): result.append(self._vm_disable_host(self.ezx, vm, log=ezx_log)) else: ezx_log(INFO, 'External zabbix synchronization disabled for VM %s', vm) result.append(None) else: ezx_log(INFO, 'Monitoring is disabled') result.append(None) return result def vm_delete(self, vm, internal=True, external=True, task_log=LOG): result = [] izx_log = self.izx.get_log_fun(task_log) ezx_log = self.ezx.get_log_fun(task_log) if self.izx.enabled: if internal: result.append(self._vm_delete_host(self.izx, vm, log=izx_log)) else: izx_log(INFO, 'Internal zabbix synchronization disabled for VM %s', vm.uuid) result.append(None) else: izx_log(INFO, 'Monitoring is disabled') result.append(None) if self.ezx.enabled: if external: result.append(self._vm_delete_host(self.ezx, vm, log=ezx_log)) else: ezx_log(INFO, 'External zabbix synchronization disabled for VM %s', vm.uuid) result.append(None) else: ezx_log(INFO, 'Monitoring is disabled') result.append(None) return result def node_sla(self, node_hostname, since, until): return self.izx.node_get_sla(node_hostname, since, until) def node_sync(self, node, task_log=LOG): zx = self.izx log = zx.get_log_fun(task_log) host = zx.get_host(zx.host_id(node), log=log) if not zx.enabled: log(INFO, 'Monitoring is disabled') return None if not host: log(WARNING, 'Node %s is not defined in Zabbix. Creating...', node) hostid = zx.create_node_host(node, log=log) if hostid: log(INFO, 'Created new Zabbix host ID "%s" for Node %s', hostid, node) zx.save_host_info(node, log=log) its = zx.create_node_service(node) if its: log(INFO, 'Create new Zabbix IT Service ID "%s" for Node %s', its, node) else: log(ERROR, 'Could not create new Zabbix IT Services for Node %s', node) return True else: log(ERROR, 'Could not create new Zabbix host for Node %s', node) return False hostid = host['hostid'] log(DEBUG, 'Node %s already defined in Zabbix as host ID "%s"', node, hostid) params = zx.diff_node_host(node, host, log=log) if params: log(WARNING, 'Zabbix host ID "%s" configuration differs from current Node %s configuration', hostid, node) log(INFO, 'Updating Zabbix host ID "%s" according to Node %s with following parameters: %s', hostid, node, params) old_hostname = host['name'] if zx.update_host(hostid, log=log, **params): log(INFO, 'Updated Zabbix host ID "%s"', hostid) zx.save_host_info(node, log=log) result = True else: log(ERROR, 'Could not update Zabbix host ID "%s"', hostid) result = False if 'name' in params: its = zx.update_node_service(old_hostname, name=params['name']) log(WARNING, 'Node %s hostname changed - updated Zabbix IT Service ID "%s"', node, its) return result log(INFO, 'Zabbix host ID "%s" configuration is synchronized with current Node %s configuration', hostid, node) return True def node_status_sync(self, node, task_log=LOG): zx = self.izx log = zx.get_log_fun(task_log) hostid = zx.get_hostid(node, log=log) if not zx.enabled: log(INFO, 'Monitoring is disabled') return None if not hostid: log(ERROR, 'Zabbix host for Node %s does not exist!', node) return False status = zx.node_host_status(node) status_display = node.get_status_display() log(WARNING, 'Setting Zabbix host ID "%s" status to %s for Node %s', hostid, status_display, node) if zx.update_host(hostid, log=log, status=status): log(INFO, 'Updated Zabbix host ID "%s" status to %s', hostid, status_display) zx.save_host_info(node, log=log) return True else: log(ERROR, 'Could not update Zabbix host ID "%s" status to %s', hostid, status_display) return False def node_delete(self, node, task_log=LOG): zx = self.izx log = zx.get_log_fun(task_log) node_uuid = zx.host_id(node) host = zx.get_host(node_uuid, log=log) if not zx.enabled: log(INFO, 'Monitoring is disabled') return None if not host: log(WARNING, 'Zabbix host for Node %s does not exist!', node_uuid) return False hostid = host['hostid'] name = host['name'] log(WARNING, 'Deleting Zabbix IT Service with name "%s" for Node %s', name, node_uuid) its = zx.delete_node_service(name) if its: log(INFO, 'Deleted Zabbix IT Service ID "%s" for Node %s', its, node_uuid) else: log(ERROR, 'Could not delete Zabbix IT Service with name "%s"', name) log(WARNING, 'Deleting Zabbix host ID "%s" for Node %s', hostid, node_uuid) if zx.delete_host(hostid, log=log): log(INFO, 'Deleted Zabbix host ID "%s"', hostid) result = True else: log(ERROR, 'Could not delete Zabbix host ID "%s"', hostid) result = False zx.reset_cache() return result def node_history(self, node_id, items, zhistory, since, until, items_search=None): return self.izx.get_history((node_id,), items, zhistory, since, until, items_search=items_search) def template_list(self, full=False, extended=False): if full or extended: display_attr = 'as_mgmt_data' else: display_attr = 'name' return [getattr(ztc, display_attr) for ztc in ZabbixTemplateContainer.all(self.ezx.zapi)] def hostgroup_list(self, dc_bound=True, full=False, extended=False): if full or extended: display_attr = 'as_mgmt_data' else: display_attr = 'name_without_dc_prefix' return [getattr(zgc, display_attr) for zgc in ZabbixHostGroupContainer.all(self.ezx.zapi, self.dc.name, include_global=True, count_hosts=True, dc_bound=dc_bound)] def hostgroup_detail(self, name, dc_bound=True): if dc_bound: dc_name = self.dc.name else: dc_name = None zbx_name = ZabbixHostGroupContainer.hostgroup_name_factory(dc_name, name) zgc = ZabbixHostGroupContainer.from_zabbix_name(self.ezx.zapi, zbx_name, dc_bound=dc_bound) return zgc.as_mgmt_data def hostgroup_create(self, name, dc_bound=True): if dc_bound: dc_name = self.dc.name else: dc_name = None zbx_name = ZabbixHostGroupContainer.hostgroup_name_factory(dc_name, name) zgc = ZabbixHostGroupContainer.from_mgmt_data(self.ezx.zapi, zbx_name, dc_bound=dc_bound) zgc.create() return zgc.as_mgmt_data def hostgroup_delete(self, name, dc_bound=True): if dc_bound: dc_name = self.dc.name else: dc_name = None zbx_name = ZabbixHostGroupContainer.hostgroup_name_factory(dc_name, name) zgc = ZabbixHostGroupContainer.from_zabbix_name(self.ezx.zapi, zbx_name, dc_bound=dc_bound) zgc.delete() return None
Apache License 2.0
ceph/teuthology
teuthology/repo_utils.py
is_fresh
python
def is_fresh(path): if not os.path.exists(path): return False elif time.time() - os.stat(path).st_mtime < FRESHNESS_INTERVAL: return True return False
Has this file been modified in the last FRESHNESS_INTERVAL seconds? Returns False if the file does not exist
https://github.com/ceph/teuthology/blob/b35344e81ac507b6cad7c1cae575ce08b9c766f2/teuthology/repo_utils.py#L28-L38
import logging import os import re import shutil import subprocess import time from teuthology import misc from teuthology.util.flock import FileLock from teuthology.config import config from teuthology.contextutil import MaxWhileTries, safe_while from teuthology.exceptions import BootstrapError, BranchNotFoundError, CommitNotFoundError, GitError log = logging.getLogger(__name__) FRESHNESS_INTERVAL = 60 def touch_file(path): out = subprocess.check_output(('touch', path)) if out: log.info(out)
MIT License
redditsysadmin/scripts
gaehelper/dnslib/dns.py
DNSRecord.question
python
def question(cls,qname,qtype="A",qclass="IN"): return DNSRecord(q=DNSQuestion(qname,getattr(QTYPE,qtype), getattr(CLASS,qclass)))
Shortcut to create question >>> q = DNSRecord.question("www.google.com") >>> print(q) ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ... ;; flags: rd; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0 ;; QUESTION SECTION: ;www.google.com. IN A >>> q = DNSRecord.question("www.google.com","NS") >>> print(q) ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ... ;; flags: rd; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0 ;; QUESTION SECTION: ;www.google.com. IN NS
https://github.com/redditsysadmin/scripts/blob/70e4b96fc6940f3d0073ec4afb4d851fb83e2b57/gaehelper/dnslib/dns.py#L118-L137
from __future__ import print_function import base64,binascii,collections,copy,os.path,random,socket, string,struct,textwrap,time from itertools import chain try: from itertools import zip_longest except ImportError: from itertools import izip_longest as zip_longest from dnslib.bit import get_bits,set_bits from dnslib.bimap import Bimap,BimapError from dnslib.buffer import Buffer,BufferError from dnslib.label import DNSLabel,DNSLabelError,DNSBuffer from dnslib.lex import WordLexer from dnslib.ranges import B,H,I,IP4,IP6,ntuple_range class DNSError(Exception): pass QTYPE = Bimap('QTYPE', {1:'A', 2:'NS', 5:'CNAME', 6:'SOA', 12:'PTR', 15:'MX', 16:'TXT', 17:'RP', 18:'AFSDB', 24:'SIG', 25:'KEY', 28:'AAAA', 29:'LOC', 33:'SRV', 35:'NAPTR', 36:'KX', 37:'CERT', 39:'DNAME', 41:'OPT', 42:'APL', 43:'DS', 44:'SSHFP', 45:'IPSECKEY', 46:'RRSIG', 47:'NSEC', 48:'DNSKEY', 49:'DHCID', 50:'NSEC3', 51:'NSEC3PARAM', 52:'TLSA', 55:'HIP', 99:'SPF', 249:'TKEY', 250:'TSIG', 251:'IXFR', 252:'AXFR', 255:'ANY', 257:'TYPE257', 32768:'TA', 32769:'DLV'}, DNSError) CLASS = Bimap('CLASS', {1:'IN', 2:'CS', 3:'CH', 4:'Hesiod', 254:'None', 255:'*'}, DNSError) QR = Bimap('QR', {0:'QUERY', 1:'RESPONSE'}, DNSError) RCODE = Bimap('RCODE', {0:'NOERROR', 1:'FORMERR', 2:'SERVFAIL', 3:'NXDOMAIN', 4:'NOTIMP', 5:'REFUSED', 6:'YXDOMAIN', 7:'YXRRSET', 8:'NXRRSET', 9:'NOTAUTH', 10:'NOTZONE'}, DNSError) OPCODE = Bimap('OPCODE',{0:'QUERY', 1:'IQUERY', 2:'STATUS', 5:'UPDATE'}, DNSError) def label(label,origin=None): if label.endswith("."): return DNSLabel(label) else: return (origin if isinstance(origin,DNSLabel) else DNSLabel(origin)).add(label) class DNSRecord(object): @classmethod def parse(cls,packet): buffer = DNSBuffer(packet) try: header = DNSHeader.parse(buffer) questions = [] rr = [] auth = [] ar = [] for i in range(header.q): questions.append(DNSQuestion.parse(buffer)) for i in range(header.a): rr.append(RR.parse(buffer)) for i in range(header.auth): auth.append(RR.parse(buffer)) for i in range(header.ar): ar.append(RR.parse(buffer)) return cls(header,questions,rr,auth=auth,ar=ar) except DNSError: raise except (BufferError,BimapError) as e: raise DNSError("Error unpacking DNSRecord [offset=%d]: %s" % ( buffer.offset,e)) @classmethod
MIT License
zzzeek/alembic
alembic/ddl/impl.py
DefaultImpl.autogen_column_reflect
python
def autogen_column_reflect(self, inspector, table, column_info):
A hook that is attached to the 'column_reflect' event for when a Table is reflected from the database during the autogenerate process. Dialects can elect to modify the information gathered here.
https://github.com/zzzeek/alembic/blob/9b01e5fa7178333f2e78ee0fc1322112307b51dd/alembic/ddl/impl.py#L597-L604
from collections import namedtuple import re from typing import Any from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import Sequence from typing import Set from typing import Tuple from typing import Type from typing import TYPE_CHECKING from typing import Union from sqlalchemy import cast from sqlalchemy import schema from sqlalchemy import text from . import base from .. import util from ..util import sqla_compat from ..util.compat import string_types from ..util.compat import text_type if TYPE_CHECKING: from io import StringIO from typing import Literal from sqlalchemy.engine import Connection from sqlalchemy.engine import Dialect from sqlalchemy.engine.cursor import CursorResult from sqlalchemy.engine.cursor import LegacyCursorResult from sqlalchemy.engine.reflection import Inspector from sqlalchemy.sql.dml import Update from sqlalchemy.sql.elements import ClauseElement from sqlalchemy.sql.elements import ColumnElement from sqlalchemy.sql.elements import quoted_name from sqlalchemy.sql.elements import TextClause from sqlalchemy.sql.schema import Column from sqlalchemy.sql.schema import Constraint from sqlalchemy.sql.schema import ForeignKeyConstraint from sqlalchemy.sql.schema import Index from sqlalchemy.sql.schema import Table from sqlalchemy.sql.schema import UniqueConstraint from sqlalchemy.sql.selectable import TableClause from sqlalchemy.sql.type_api import TypeEngine from .base import _ServerDefault from ..autogenerate.api import AutogenContext from ..operations.batch import ApplyBatchImpl from ..operations.batch import BatchOperationsImpl class ImplMeta(type): def __init__( cls, classname: str, bases: Tuple[Type["DefaultImpl"]], dict_: Dict[str, Any], ): newtype = type.__init__(cls, classname, bases, dict_) if "__dialect__" in dict_: _impls[dict_["__dialect__"]] = cls return newtype _impls: dict = {} Params = namedtuple("Params", ["token0", "tokens", "args", "kwargs"]) class DefaultImpl(metaclass=ImplMeta): __dialect__ = "default" transactional_ddl = False command_terminator = ";" type_synonyms: Tuple[Set[str], ...] = ({"NUMERIC", "DECIMAL"},) type_arg_extract: Sequence[str] = () identity_attrs_ignore: Tuple[str, ...] = ("on_null",) def __init__( self, dialect: "Dialect", connection: Optional["Connection"], as_sql: bool, transactional_ddl: Optional[bool], output_buffer: Optional["StringIO"], context_opts: Dict[str, Any], ) -> None: self.dialect = dialect self.connection = connection self.as_sql = as_sql self.literal_binds = context_opts.get("literal_binds", False) self.output_buffer = output_buffer self.memo: dict = {} self.context_opts = context_opts if transactional_ddl is not None: self.transactional_ddl = transactional_ddl if self.literal_binds: if not self.as_sql: raise util.CommandError( "Can't use literal_binds setting without as_sql mode" ) @classmethod def get_by_dialect(cls, dialect: "Dialect") -> Any: return _impls[dialect.name] def static_output(self, text: str) -> None: assert self.output_buffer is not None self.output_buffer.write(text_type(text + "\n\n")) self.output_buffer.flush() def requires_recreate_in_batch( self, batch_op: "BatchOperationsImpl" ) -> bool: return False def prep_table_for_batch( self, batch_impl: "ApplyBatchImpl", table: "Table" ) -> None: @property def bind(self) -> Optional["Connection"]: return self.connection def _exec( self, construct: Union["ClauseElement", str], execution_options: None = None, multiparams: Sequence[dict] = (), params: Dict[str, int] = util.immutabledict(), ) -> Optional[Union["LegacyCursorResult", "CursorResult"]]: if isinstance(construct, string_types): construct = text(construct) if self.as_sql: if multiparams or params: raise Exception("Execution arguments not allowed with as_sql") if self.literal_binds and not isinstance( construct, schema.DDLElement ): compile_kw = dict(compile_kwargs={"literal_binds": True}) else: compile_kw = {} self.static_output( text_type( construct.compile(dialect=self.dialect, **compile_kw) ) .replace("\t", " ") .strip() + self.command_terminator ) return None else: conn = self.connection assert conn is not None if execution_options: conn = conn.execution_options(**execution_options) if params: assert isinstance(multiparams, tuple) multiparams += (params,) return conn.execute(construct, multiparams) def execute( self, sql: Union["Update", "TextClause", str], execution_options: None = None, ) -> None: self._exec(sql, execution_options) def alter_column( self, table_name: str, column_name: str, nullable: Optional[bool] = None, server_default: Union["_ServerDefault", "Literal[False]"] = False, name: Optional[str] = None, type_: Optional["TypeEngine"] = None, schema: Optional[str] = None, autoincrement: Optional[bool] = None, comment: Optional[Union[str, "Literal[False]"]] = False, existing_comment: Optional[str] = None, existing_type: Optional["TypeEngine"] = None, existing_server_default: Optional["_ServerDefault"] = None, existing_nullable: Optional[bool] = None, existing_autoincrement: Optional[bool] = None, **kw: Any ) -> None: if autoincrement is not None or existing_autoincrement is not None: util.warn( "autoincrement and existing_autoincrement " "only make sense for MySQL", stacklevel=3, ) if nullable is not None: self._exec( base.ColumnNullable( table_name, column_name, nullable, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_comment=existing_comment, ) ) if server_default is not False: kw = {} cls_: Type[ Union[ base.ComputedColumnDefault, base.IdentityColumnDefault, base.ColumnDefault, ] ] if sqla_compat._server_default_is_computed( server_default, existing_server_default ): cls_ = base.ComputedColumnDefault elif sqla_compat._server_default_is_identity( server_default, existing_server_default ): cls_ = base.IdentityColumnDefault kw["impl"] = self else: cls_ = base.ColumnDefault self._exec( cls_( table_name, column_name, server_default, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_comment=existing_comment, **kw ) ) if type_ is not None: self._exec( base.ColumnType( table_name, column_name, type_, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_comment=existing_comment, ) ) if comment is not False: self._exec( base.ColumnComment( table_name, column_name, comment, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_comment=existing_comment, ) ) if name is not None: self._exec( base.ColumnName( table_name, column_name, name, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, ) ) def add_column( self, table_name: str, column: "Column", schema: Optional[Union[str, "quoted_name"]] = None, ) -> None: self._exec(base.AddColumn(table_name, column, schema=schema)) def drop_column( self, table_name: str, column: "Column", schema: Optional[str] = None, **kw ) -> None: self._exec(base.DropColumn(table_name, column, schema=schema)) def add_constraint(self, const: Any) -> None: if const._create_rule is None or const._create_rule(self): self._exec(schema.AddConstraint(const)) def drop_constraint(self, const: "Constraint") -> None: self._exec(schema.DropConstraint(const)) def rename_table( self, old_table_name: str, new_table_name: Union[str, "quoted_name"], schema: Optional[Union[str, "quoted_name"]] = None, ) -> None: self._exec( base.RenameTable(old_table_name, new_table_name, schema=schema) ) def create_table(self, table: "Table") -> None: table.dispatch.before_create( table, self.connection, checkfirst=False, _ddl_runner=self ) self._exec(schema.CreateTable(table)) table.dispatch.after_create( table, self.connection, checkfirst=False, _ddl_runner=self ) for index in table.indexes: self._exec(schema.CreateIndex(index)) with_comment = ( self.dialect.supports_comments and not self.dialect.inline_comments ) comment = table.comment if comment and with_comment: self.create_table_comment(table) for column in table.columns: comment = column.comment if comment and with_comment: self.create_column_comment(column) def drop_table(self, table: "Table") -> None: self._exec(schema.DropTable(table)) def create_index(self, index: "Index") -> None: self._exec(schema.CreateIndex(index)) def create_table_comment(self, table: "Table") -> None: self._exec(schema.SetTableComment(table)) def drop_table_comment(self, table: "Table") -> None: self._exec(schema.DropTableComment(table)) def create_column_comment(self, column: "ColumnElement") -> None: self._exec(schema.SetColumnComment(column)) def drop_index(self, index: "Index") -> None: self._exec(schema.DropIndex(index)) def bulk_insert( self, table: Union["TableClause", "Table"], rows: List[dict], multiinsert: bool = True, ) -> None: if not isinstance(rows, list): raise TypeError("List expected") elif rows and not isinstance(rows[0], dict): raise TypeError("List of dictionaries expected") if self.as_sql: for row in rows: self._exec( sqla_compat._insert_inline(table).values( **dict( ( k, sqla_compat._literal_bindparam( k, v, type_=table.c[k].type ) if not isinstance( v, sqla_compat._literal_bindparam ) else v, ) for k, v in row.items() ) ) ) else: if not hasattr(table, "_autoincrement_column"): table._autoincrement_column = None if rows: if multiinsert: self._exec( sqla_compat._insert_inline(table), multiparams=rows ) else: for row in rows: self._exec( sqla_compat._insert_inline(table).values(**row) ) def _tokenize_column_type(self, column: "Column") -> Params: definition = self.dialect.type_compiler.process(column.type).lower() tokens = re.findall(r"[\w\-_]+|\(.+?\)", definition) term_tokens = [] paren_term = None for token in tokens: if re.match(r"^\(.*\)$", token): paren_term = token else: term_tokens.append(token) params = Params(term_tokens[0], term_tokens[1:], [], {}) if paren_term: for term in re.findall("[^(),]+", paren_term): if "=" in term: key, val = term.split("=") params.kwargs[key.strip()] = val.strip() else: params.args.append(term.strip()) return params def _column_types_match( self, inspector_params: "Params", metadata_params: "Params" ) -> bool: if inspector_params.token0 == metadata_params.token0: return True synonyms = [{t.lower() for t in batch} for batch in self.type_synonyms] inspector_all_terms = " ".join( [inspector_params.token0] + inspector_params.tokens ) metadata_all_terms = " ".join( [metadata_params.token0] + metadata_params.tokens ) for batch in synonyms: if {inspector_all_terms, metadata_all_terms}.issubset(batch) or { inspector_params.token0, metadata_params.token0, }.issubset(batch): return True return False def _column_args_match( self, inspected_params: "Params", meta_params: "Params" ) -> bool: if ( len(meta_params.tokens) == len(inspected_params.tokens) and meta_params.tokens != inspected_params.tokens ): return False if ( len(meta_params.args) == len(inspected_params.args) and meta_params.args != inspected_params.args ): return False insp = " ".join(inspected_params.tokens).lower() meta = " ".join(meta_params.tokens).lower() for reg in self.type_arg_extract: mi = re.search(reg, insp) mm = re.search(reg, meta) if mi and mm and mi.group(1) != mm.group(1): return False return True def compare_type( self, inspector_column: "Column", metadata_column: "Column" ) -> bool: inspector_params = self._tokenize_column_type(inspector_column) metadata_params = self._tokenize_column_type(metadata_column) if not self._column_types_match(inspector_params, metadata_params): return True if not self._column_args_match(inspector_params, metadata_params): return True return False def compare_server_default( self, inspector_column, metadata_column, rendered_metadata_default, rendered_inspector_default, ): return rendered_inspector_default != rendered_metadata_default def correct_for_autogen_constraints( self, conn_uniques: Union[Set["UniqueConstraint"]], conn_indexes: Union[Set["Index"]], metadata_unique_constraints: Set["UniqueConstraint"], metadata_indexes: Set["Index"], ) -> None: pass def cast_for_batch_migrate(self, existing, existing_transfer, new_type): if existing.type._type_affinity is not new_type._type_affinity: existing_transfer["expr"] = cast( existing_transfer["expr"], new_type ) def render_ddl_sql_expr( self, expr: "ClauseElement", is_server_default: bool = False, **kw ) -> str: compile_kw = dict( compile_kwargs={"literal_binds": True, "include_table": False} ) return text_type(expr.compile(dialect=self.dialect, **compile_kw)) def _compat_autogen_column_reflect( self, inspector: "Inspector" ) -> Callable: return self.autogen_column_reflect def correct_for_autogen_foreignkeys( self, conn_fks: Set["ForeignKeyConstraint"], metadata_fks: Set["ForeignKeyConstraint"], ) -> None: pass
MIT License
andysalerno/reversi_ai
agents/q_learning_agent.py
QLearningAgent.reset
python
def reset(self): self.reset_learning()
Resets the agent to prepare it to play another game.
https://github.com/andysalerno/reversi_ai/blob/ef04b04c94dc71f233d1e8666b5a9db3501ef73a/agents/q_learning_agent.py#L124-L126
import random from agents import Agent from keras.layers import Dense from keras.models import Sequential, model_from_json from keras.optimizers import RMSprop, SGD from util import info, opponent, color_name, numpify, best_move_val MODEL_FILENAME = 'neural/q_model' WEIGHTS_FILENAME = 'neural/q_weights' HIDDEN_SIZE = 42 ALPHA = 1.0 BATCH_SIZE = 64 WIN_REWARD = 1 LOSE_REWARD = -1 optimizer = RMSprop() class QLearningAgent(Agent): def __init__(self, reversi, color, **kwargs): self.color = color self.reversi = reversi self.learning_enabled = kwargs.get('learning_enabled', False) self.model = self.get_model(kwargs.get('model_file', None)) self.minimax_enabled = kwargs.get('minimax', False) weights_num = kwargs.get('weights_num', '') self.load_weights(weights_num) self.epsilon = 0.0 if self.learning_enabled: self.epoch = 0 self.train_count = random.choice(range(BATCH_SIZE)) self.memory = None self.prev_move = None self.prev_state = None if kwargs.get('model_file', None) is None: self.save_model(self.model) def set_epsilon(self, val): self.epsilon = val if not self.learning_enabled: info('Warning -- set_epsilon() was called when learning was not enabled.') def set_memory(self, memory): self.memory = memory def set_epoch(self, epoch): self.epoch = epoch def get_action(self, state, legal_moves=None): if legal_moves is None: legal_moves = self.reversi.legal_moves(state) if not legal_moves: return None else: move = None if self.epsilon > random.random(): move = random.choice(legal_moves) else: move = self.policy(state, legal_moves) if self.learning_enabled: self.train(state, legal_moves) self.prev_move = move self.prev_state = state return move def minimax(self, state, depth=2, alpha=-float('inf'), beta=float('inf')): assert state[1] == self.color or state[1] == opponent[self.color] player_turn = True if state[1] == self.color else False legal = self.reversi.legal_moves(state) winner = self.reversi.winner(state) if not legal and winner is False: return self.minimax(self.reversi.next_state(state, None)) elif depth == 0 or winner is not False: if winner == self.color: return 9999999 elif winner == opponent[self.color]: return -9999999 else: q_vals = self.model.predict(numpify(state)) best_move, best_q = best_move_val(q_vals, legal) print('best_q: {}'.format(best_q)) return best_q if player_turn: val = -float('inf') for move in legal: new_state = self.reversi.next_state(state, move) val = max(val, self.minimax(new_state, depth - 1, alpha, beta)) alpha = max(alpha, val) if beta <= alpha: break return val else: val = float('inf') for move in legal: new_state = self.reversi.next_state(state, move) val = min(val, self.minimax(new_state, depth - 1, alpha, beta)) beta = min(beta, val) if beta <= alpha: break return val def observe_win(self, state): if self.learning_enabled: winner = self.reversi.winner(state) self.train(state, [], winner)
MIT License
demisto/demisto-sdk
demisto_sdk/commands/find_dependencies/find_dependencies.py
update_pack_metadata_with_dependencies
python
def update_pack_metadata_with_dependencies(pack_folder_name: str, first_level_dependencies: dict) -> None: found_path_results = find_pack_path(pack_folder_name) if not found_path_results: print_error(f"{pack_folder_name} {constants.PACKS_PACK_META_FILE_NAME} was not found") sys.exit(1) pack_metadata_path = found_path_results[0] with open(pack_metadata_path, 'r+') as pack_metadata_file: pack_metadata = json.load(pack_metadata_file) pack_metadata = {} if not isinstance(pack_metadata, dict) else pack_metadata pack_metadata['dependencies'] = first_level_dependencies pack_metadata['displayedImages'] = list(first_level_dependencies.keys()) pack_metadata_file.seek(0) json.dump(pack_metadata, pack_metadata_file, indent=4) pack_metadata_file.truncate()
Updates pack metadata with found parsed dependencies results. Args: pack_folder_name (str): pack folder name. first_level_dependencies (dict): first level dependencies data.
https://github.com/demisto/demisto-sdk/blob/8d8767c2dfec77b67c35f4e1022e30ed2893e864/demisto_sdk/commands/find_dependencies/find_dependencies.py#L119-L144
import glob import json import os import sys from copy import deepcopy from distutils.version import LooseVersion from typing import Optional, Union import click import networkx as nx from requests import RequestException from demisto_sdk.commands.common import constants from demisto_sdk.commands.common.constants import GENERIC_COMMANDS_NAMES from demisto_sdk.commands.common.tools import (get_content_id_set, is_external_repository, print_error, print_warning) from demisto_sdk.commands.common.update_id_set import merge_id_sets from demisto_sdk.commands.create_id_set.create_id_set import IDSetCreator MINIMUM_DEPENDENCY_VERSION = LooseVersion('6.0.0') COMMON_TYPES_PACK = 'CommonTypes' def parse_for_pack_metadata(dependency_graph: nx.DiGraph, graph_root: str, verbose: bool = False, complete_data: bool = False, id_set_data=None) -> tuple: if id_set_data is None: id_set_data = {} first_level_dependencies = {} parsed_dependency_graph = [(k, v) for k, v in dependency_graph.nodes(data=True) if dependency_graph.has_edge(graph_root, k)] for dependency_id, additional_data in parsed_dependency_graph: pack_name = find_pack_display_name(dependency_id) if not complete_data: additional_data['display_name'] = pack_name else: dependency_data = id_set_data.get('Packs', {}).get(dependency_id) if dependency_data: additional_data['name'] = dependency_data['name'] additional_data['author'] = dependency_data['author'] additional_data['minVersion'] = dependency_data['current_version'] additional_data['certification'] = dependency_data['certification'] else: additional_data['display_name'] = pack_name first_level_dependencies[dependency_id] = additional_data all_level_dependencies = [n for n in dependency_graph.nodes if dependency_graph.in_degree(n) > 0] if verbose: click.secho(f'All level dependencies are: {all_level_dependencies}', fg='white') return first_level_dependencies, all_level_dependencies def find_pack_path(pack_folder_name: str) -> list: pack_metadata_path = os.path.join(constants.PACKS_DIR, pack_folder_name, constants.PACKS_PACK_META_FILE_NAME) found_path_results = glob.glob(pack_metadata_path) return found_path_results def find_pack_display_name(pack_folder_name: str) -> str: found_path_results = find_pack_path(pack_folder_name) if not found_path_results: return pack_folder_name pack_metadata_path = found_path_results[0] with open(pack_metadata_path, 'r') as pack_metadata_file: pack_metadata = json.load(pack_metadata_file) pack_display_name = pack_metadata.get('name') if pack_metadata.get('name') else pack_folder_name return pack_display_name
MIT License
pyccel/psydac
psydac/fem/grid.py
FemAssemblyGrid.indices
python
def indices( self ): return self._indices
Global index of each element used in assembly process.
https://github.com/pyccel/psydac/blob/ddd3008a3f704814aa4e790853962243feae5d8a/psydac/fem/grid.py#L221-L224
import numpy as np from psydac.core.bsplines import elements_spans from psydac.core.bsplines import quadrature_grid from psydac.core.bsplines import basis_ders_on_quad_grid from psydac.core.bsplines import elevate_knots from psydac.utilities.quadratures import gauss_legendre __all__ = ['FemAssemblyGrid'] class FemAssemblyGrid: def __init__( self, space, start, end, *, quad_order=None, nderiv=1, parent_start=None, parent_end=None): T = space.knots degree = space.degree n = space.nbasis grid = space.breaks nc = space.ncells k = quad_order or degree pad = space.pads multiplicity = space.multiplicity u, w = gauss_legendre( k ) u = u[::-1] w = w[::-1] glob_points, glob_weights = quadrature_grid( grid, u, w ) glob_basis = basis_ders_on_quad_grid( T, degree, glob_points, nderiv, space.basis ) glob_spans = elements_spans( T, degree ) spans = [] basis = [] points = [] weights = [] indices = [] ne = 0 if pad==degree: current_glob_spans = glob_spans current_start = start current_end = end elif pad-degree == 1: multiplicity = space.parent_multiplicity elevated_T = elevate_knots(T, degree, space.periodic, multiplicity=multiplicity) current_start = parent_start or start current_end = parent_end or end current_glob_spans = elements_spans( elevated_T, pad ) else: raise NotImplementedError('TODO') if space.periodic: for k in range( nc ): gk = current_glob_spans[k] if start <= gk-n and gk-n-pad <= end: spans .append( glob_spans[k]-n ) basis .append( glob_basis [k] ) points .append( glob_points [k] ) weights.append( glob_weights[k] ) indices.append( k ) ne += 1 m = multiplicity if multiplicity>1 else 0 for k in range( nc ): gk = current_glob_spans[k] gs = glob_spans [k] if current_start-m <= gk and gk-pad <= current_end: if m>0 and pad-degree==1 and start>gs:continue spans .append( glob_spans [k] ) basis .append( glob_basis [k] ) points .append( glob_points [k] ) weights.append( glob_weights[k] ) indices.append( k ) ne += 1 self._num_elements = ne self._num_quad_pts = len( u ) self._spans = np.array( spans ) self._basis = np.array( basis ) self._points = np.array( points ) self._weights = np.array( weights ) self._indices = np.array( indices ) self._quad_rule_x = u self._quad_rule_w = w if space.periodic: local_element_start = self._spans.searchsorted( degree + start ) local_element_end = self._spans.searchsorted( degree + end ) else: if end+1 >= degree: local_element_start = self._spans.searchsorted( degree if start == 0 else 1 + start) local_element_end = self._spans.searchsorted( end if end == n-1 else 1 + end ) else: local_element_start = 1 local_element_end = 0 self._local_element_start = local_element_start self._local_element_end = local_element_end @property def num_elements( self ): return self._num_elements @property def num_quad_pts( self ): return self._num_quad_pts @property def spans( self ): return self._spans @property def basis( self ): return self._basis @property def points( self ): return self._points @property def weights( self ): return self._weights @property
MIT License
zarad1993/dyc
dyc/methods.py
MethodBuilder._is_method
python
def _is_method(self, line): return line.strip().split(" ")[0] in self.config.get("keywords")
A predicate method that checks if a line is a method Parameters ---------- str line: Text string of a line in a file
https://github.com/zarad1993/dyc/blob/5d4c34aedc463a0a85800ebcc3203e63c85f647a/dyc/methods.py#L221-L230
import sys import re import fileinput import copy import linecache import click from .utils import ( get_leading_whitespace, BlankFormatter, get_indent, add_start_end, is_one_line_method, ) from .base import Builder import os class MethodBuilder(Builder): already_printed_filepaths = [] def extract_and_set_information(self, filename, start, line, length): start_line = linecache.getline(filename, start) initial_line = line start_leading_space = get_leading_whitespace( start_line ) method_string = start_line if not is_one_line_method(start_line, self.config.get("keywords")): method_string = line linesBackwards = method_string.count("\n") - 1 start_leading_space = get_leading_whitespace( linecache.getline(filename, start - linesBackwards) ) line_within_scope = True lineno = start + 1 line = linecache.getline(filename, lineno) end_of_file = False end = None while line_within_scope and not end_of_file: current_leading_space = get_leading_whitespace(line) if len(current_leading_space) <= len(start_leading_space) and line.strip(): end = lineno - 1 break method_string += line lineno = lineno + 1 line = linecache.getline(filename, int(lineno)) end_of_file = True if lineno > length else False if not end: end = length linecache.clearcache() return MethodInterface( plain=method_string, name=self._get_name(initial_line), start=start, end=end, filename=filename, arguments=self.extract_arguments(initial_line.strip("\n")), config=self.config, leading_space=get_leading_whitespace(initial_line), placeholders=self.placeholders, ) def validate(self, result): if not result: return False name = result.name if name not in self.config.get( "ignore", [] ) and not self.is_first_line_documented(result): if ( self.filename not in self.already_printed_filepaths ): click.echo( "\n\nIn file {} :\n".format( click.style( os.path.join(*self.filename.split(os.sep)[-3:]), fg="red" ) ) ) self.already_printed_filepaths.append(self.filename) confirmed = ( True if self.placeholders else click.confirm( "Do you want to document method {}?".format( click.style(name, fg="green") ) ) ) if confirmed: return True return False def extract_arguments(self, line): args = ArgumentDetails(line, self.config.get("arguments", {})) args.extract() return args.sanitize() def is_first_line_documented(self, result): returned = False for x in range(result.start, result.end): line = linecache.getline(result.filename, x) if self.config.get("open") in line: returned = True break linecache.clearcache() return returned def prompts(self): for method_interface in self._method_interface_gen(): method_interface.prompt() if method_interface else None def apply(self): for method_interface in self._method_interface_gen(): if not method_interface: continue fileInput = fileinput.input(method_interface.filename, inplace=True) for line in fileInput: tmpLine = line if self._is_method(line) and ":" not in line: openedP = line.count("(") closedP = line.count(")") pos = 1 if openedP == closedP: continue else: while openedP != closedP: tmpLine += fileInput.readline() openedP = tmpLine.count("(") closedP = tmpLine.count(")") pos += 1 line = tmpLine if self._get_name(line) == method_interface.name: if self.config.get("within_scope"): sys.stdout.write(line + method_interface.result + "\n") else: sys.stdout.write(method_interface.result + "\n" + line) else: sys.stdout.write(line) def _method_interface_gen(self): if not self.details: yield None for filename, func_pack in self.details.items(): for method_interface in func_pack.values(): yield method_interface def _get_name(self, line): for keyword in self.config.get("keywords", []): clear_defs = re.sub("{} ".format(keyword), "", line.strip()) name = re.sub(r"\([^)]*\)\:", "", clear_defs).strip() if re.search(r"\(([\s\S]*)\)", name): try: name = re.match(r"^[^\(]+", name).group() except: pass if name: return name
MIT License
ukinti/garnet
src/_garnet/filters/state.py
_MetaStateGroup.from_iter
python
def from_iter( cls, iterable: "Union[_DummyGroupT, Iterable[_DummyGroupT]]", main_group_name: str = "AnonGroup", _depth: int = 0, /, ) -> "Type[Group]": namespace: Dict[str, Union[M, Type[Group]]] = {} for sog in iterable: if isinstance(sog, (str, int)): namespace[str(sog)] = M() elif isinstance(sog, Iterable): inner_group_name = ( f"{main_group_name}_{_depth+1}{len(namespace)}" ) namespace[inner_group_name] = cls.from_iter( sog, inner_group_name, _depth + 1 ) else: raise TypeError( f"Expected string or iterable of strings," f" not {type(sog).__name__}" ) return _MetaStateGroup( main_group_name, (Group,), namespace, )
Create "Group" class but from iterable of strings or ints. (can be nested iterable of possibly nested iterables) :param iterable: pass iterable of iterables or just strings, integers :param main_group_name: upper group name (main parent name) :param _depth: ignore this attribute unless you know what are you doing :return: new group class inheritor class Usage:: >>> from garnet.filters.group import Group >>> >>> S = Group.from_iter(("a", "b", "c", ("d", "e"))) >>> >>> assert len(S.all_children) == 1 >>> assert S.all_children[0].parent == S >>> assert len(S.all_state_objects) == len("abcde") >>> assert len(S.all_children[0].all_state_object) == len("de")
https://github.com/ukinti/garnet/blob/22eb21b512d143b7da285254fec71668f58a62f8/src/_garnet/filters/state.py#L317-L368
from typing import ( Any, Container, Dict, Iterable, Literal, Tuple, Type, Union, cast, ) from _garnet.events.filter import Filter from _garnet.vars import fsm _DummyGroupT = Iterable[Union[str, int, Any]] ENTRYPOINT_STATE = None ANY_STATE_EXCEPT_NONE = "*" class NoNext(Exception): class NoPrev(Exception): class NoTop(Exception): async def any_state_except_none_func(_): return await fsm.CageCtx.get().get_state() is not None async def no_state_but_none_func(_): return await fsm.CageCtx.get().get_state() is None _ilc_descriptor = type( "InstanceLessProperty", (), { "__slots__": ("value",), "__get__": lambda dptor, instance, owner: dptor.value, "__init__": lambda dptor, value: setattr(dptor, "value", value), }, ) class _MetaCurrentState(type): @classmethod def __eq__( mcs, _s: "Union[M, Container[M], Type[Group], Literal['*']]", ) -> Filter: if _s == ANY_STATE_EXCEPT_NONE: _f = any_state_except_none_func elif _s is None: _f = no_state_but_none_func elif isinstance(_s, type) and issubclass(_s, Group): async def _f(_): current_state = await fsm.CageCtx.get().get_state() for state in _s.all_state_objects: if state.name == current_state: fsm.MCtx.set(state) return True return False elif isinstance(_s, M): async def _f(_): current_state = await fsm.CageCtx.get().get_state() ok = current_state == _s.name if ok: fsm.MCtx.set(_s) return ok else: raise ValueError( "`state` parameter must be any of: " '`M` object, `Group` class, `None` singleton, "*" ' "(star notation for any state)" ) return Filter(_f, None) @classmethod def exact( mcs, state: "Union[M, Container[M], Type[Group], Literal['*']]", ) -> Filter: return State == state any: Filter = _ilc_descriptor( Filter(any_state_except_none_func, event_builder=None) ) entry: Filter = _ilc_descriptor( Filter(no_state_but_none_func, event_builder=None) ) class State(metaclass=_MetaCurrentState): class M: __slots__ = "_sh_name", "owner" def __set_name__(self, owner: "_MetaStateGroup", name: str) -> None: if type(owner) != _MetaStateGroup: raise ValueError("State should be declared only in `StateGroup`") self.owner = owner self._sh_name = name @property def name(self): return ".".join((self.owner.full_group_name, self._sh_name)) def __str__(self) -> str: return self.name @property def next(self) -> "M": _oss = self.owner.all_state_objects try: return _oss[_oss.index(self) + 1] except IndexError: raise NoNext from None @property def prev(self) -> "M": _oss = self.owner.all_state_objects try: return _oss[_oss.index(self) - 1] except IndexError: raise NoPrev from None @property def top(self) -> "M": try: return self.owner.all_state_objects[0] except IndexError: raise NoTop from None def is_reserved(name: str, /) -> bool: return name in frozenset( ( "parent", "children", "states", "first", "last", "all_state_names", "all_state_objects", "all_children", "_group_name", "full_group_name", ) ) class _MetaStateGroup(type): def __new__( mcs, name: str, bases: Tuple[Type[Any], ...], namespace: Dict[str, Any], ) -> "Type[Group]": cls = super(_MetaStateGroup, mcs).__new__(mcs, name, bases, namespace) states = [] children = [] cls._group_name = name dont_ignore_namespace_garbage = namespace.get( "__ignore_garbage__", False ) for name, prop in namespace.items(): if is_reserved(name): raise AttributeError( "{name!s} for {cls!r} cannot be set, since it's reserved" ) if isinstance(prop, M): states.append(prop) elif isinstance(prop, type) and issubclass(prop, Group): children.append(prop) prop.parent = cls elif dont_ignore_namespace_garbage: raise TypeError( f"{name!s} is type of {type(prop).__name__}, " f"but only {Group.__name__} classes " f"and {M.__name__} objects are allowed" ) cls.parent = None cls.children = tuple(children) cls.states = tuple(states) return cast("Type[Group]", cls) @property def full_group_name(cls) -> str: if cls.parent: return ".".join((cls.parent.full_group_name, cls._group_name)) return cls._group_name @property def all_children(cls) -> "Tuple[Type[Group], ...]": result = cls.children for child in cls.children: result += child.children return result @property def all_state_objects(cls) -> "Tuple[M, ...]": result = cls.states for child in cls.children: result += child.all_state_objects return result @property def all_state_names(cls) -> "Tuple[str, ...]": return tuple(state.name for state in cls.all_state_objects) def get_root(cls) -> "Type[Group]": if cls.parent is None: return cast(Type[Group], cls) return cls.parent.get_root() def __str__(self) -> str: return f"<StatesGroup '{self.full_group_name}'>" @property def last(cls) -> str: return cls.all_state_names[-1] @property def first(cls) -> str: return cls.all_state_names[0]
MIT License
haiwen/python-seafile
seafileapi/files.py
_SeafDirentBase.copyTo
python
def copyTo(self, dst_dir, dst_repo_id=None): if dst_repo_id is None: dst_repo_id = self.repo.id dirent_type = 'dir' if self.isdir else 'file' resp = self._copy_move_task('copy', dirent_type, dst_dir, dst_repo_id) return resp.status_code == 200
Copy file/folder to other directory (also to a different repo)
https://github.com/haiwen/python-seafile/blob/49c538d45bd3228745b76af68c08a8723fafabe0/seafileapi/files.py#L74-L82
import io import os import posixpath import re from seafileapi.utils import querystr ZERO_OBJ_ID = '0000000000000000000000000000000000000000' class _SeafDirentBase(object): isdir = None def __init__(self, repo, path, object_id, size=0): self.client = repo.client self.repo = repo self.path = path self.id = object_id self.size = size @property def name(self): return posixpath.basename(self.path) def list_revisions(self): pass def delete(self): suffix = 'dir' if self.isdir else 'file' url = '/api2/repos/%s/%s/' % (self.repo.id, suffix) + querystr(p=self.path) resp = self.client.delete(url) return resp def rename(self, newname): suffix = 'dir' if self.isdir else 'file' url = '/api2/repos/%s/%s/' % (self.repo.id, suffix) + querystr(p=self.path, reloaddir='true') postdata = {'operation': 'rename', 'newname': newname} resp = self.client.post(url, data=postdata) succeeded = resp.status_code == 200 if succeeded: if self.isdir: new_dirent = self.repo.get_dir(os.path.join(os.path.dirname(self.path), newname)) else: new_dirent = self.repo.get_file(os.path.join(os.path.dirname(self.path), newname)) for key in list(self.__dict__.keys()): self.__dict__[key] = new_dirent.__dict__[key] return succeeded def _copy_move_task(self, operation, dirent_type, dst_dir, dst_repo_id=None): url = '/api/v2.1/copy-move-task/' src_repo_id = self.repo.id src_parent_dir = os.path.dirname(self.path) src_dirent_name = os.path.basename(self.path) dst_repo_id = dst_repo_id dst_parent_dir = dst_dir operation = operation dirent_type = dirent_type postdata = {'src_repo_id': src_repo_id, 'src_parent_dir': src_parent_dir, 'src_dirent_name': src_dirent_name, 'dst_repo_id': dst_repo_id, 'dst_parent_dir': dst_parent_dir, 'operation': operation, 'dirent_type': dirent_type} return self.client.post(url, data=postdata)
Apache License 2.0
gremlin/gremlin-python
gremlinapi/oauth.py
GremlinAPIOAUTH.get_bearer_token
python
def get_bearer_token( cls, company_name: str, access_token: str, https_client: Type[GremlinAPIHttpClient] = get_gremlin_httpclient(), ) -> str: body = { "companyName": company_name, "accessToken": access_token, "provider": "oauth", } payload: dict = cls._payload(**{"data": body}) endpoint = f"{GREMLIN_SSO_USER_AUTH}?getCompanySession=true" (resp, body) = https_client.api_call("POST", endpoint, **payload) assert resp.status_code == 200 bearer_token = resp.json()["header"] assert bearer_token != None return bearer_token
Retrieves a valid bearer token to access the authenticated portions of our API. Parameters ---------- company_name : str The company for which the bearer token is to generated access_token : str The access token generated by get_access_token() Returns ------ bearer_token : str
https://github.com/gremlin/gremlin-python/blob/159d30f2222eeef9950973e34008904f1259d04c/gremlinapi/oauth.py#L218-L256
import logging import json from gremlinapi.util import experimental from gremlinapi.cli import register_cli_action from gremlinapi.config import GremlinAPIConfig from gremlinapi.exceptions import ( GremlinParameterError, GremlinAuthError, ProxyError, ClientError, HTTPTimeout, HTTPError, ) from gremlinapi.gremlinapi import GremlinAPI from gremlinapi.http_clients import ( get_gremlin_httpclient, GremlinAPIHttpClient, ) from typing import Union, Type, Any, Tuple from gremlinapi.util import ( GREMLIN_OAUTH_LOGIN, GREMLIN_OAUTH_COMPANIES_URI, GREMLIN_SSO_USER_AUTH, GREMLIN_OAUTH_CALLBACK, ) log = logging.getLogger("GremlinAPI.client") class GremlinAPIOAUTH(GremlinAPI): @classmethod def configure( cls, company_id: str = "", https_client: Type[GremlinAPIHttpClient] = get_gremlin_httpclient(), *args: tuple, **kwargs: dict, ) -> int: method: str = "POST" if not company_id: error_msg: str = f"Company ID Required" log.error(error_msg) raise GremlinParameterError(error_msg) endpoint: str = f"{GREMLIN_OAUTH_COMPANIES_URI}/{company_id}/oauth/settings" data: dict = { "authorizationUri": cls._error_if_not_param("authorizationUri", **kwargs), "tokenUri": cls._error_if_not_param("tokenUri", **kwargs), "userInfoUri": cls._error_if_not_param("userInfoUri", **kwargs), "clientId": cls._error_if_not_param("clientId", **kwargs), "clientSecret": cls._error_if_not_param("clientSecret", **kwargs), "scope": cls._warn_if_not_param("scope", **kwargs), } payload: dict = cls._payload(**{"headers": https_client.header(), "body": data}) (resp, body) = https_client.api_call(method, endpoint, **payload) return resp.status_code @classmethod def initiate_oauth( cls, company_name: str, https_client: Type[GremlinAPIHttpClient] = get_gremlin_httpclient(), ) -> Tuple[str, str]: endpoint: str = f"{GREMLIN_OAUTH_LOGIN}?companyName={company_name}" payload: dict = cls._payload(**{"headers": https_client.header()}) (resp, body) = https_client.api_call("GET", endpoint, **payload) assert resp.status_code == 307 state_cookie = resp.cookies["oauth_state"] oauth_provider_login_url = resp.headers["Location"] assert state_cookie != None assert oauth_provider_login_url != None return state_cookie, oauth_provider_login_url @classmethod def get_callback_url( cls, oauth_provider_login_url: str, data: dict, https_client: Type[GremlinAPIHttpClient] = get_gremlin_httpclient(), ) -> str: payload: dict = cls._payload(**{"headers": https_client.header(), "data": data}) (resp, body) = https_client.api_call( "POST", oauth_provider_login_url, **payload ) gremlin_callback_url = resp.headers["Location"] assert gremlin_callback_url != None return gremlin_callback_url @classmethod def get_access_token( cls, state_cookie: str, gremlin_callback_url: str, https_client: Type[GremlinAPIHttpClient] = get_gremlin_httpclient(), ) -> str: cookie = {"oauth_state": state_cookie} (resp, body) = https_client.api_call( "GET", gremlin_callback_url, **{"cookies": cookie} ) assert resp.status_code == 200 access_token = resp.json()["access_token"] assert access_token != None return access_token @classmethod
Apache License 2.0
vincent-lg/tsunami
src/primaires/salle/masques/etendue/__init__.py
Etendue.valider
python
def valider(self, personnage, dic_masques): Masque.valider(self, personnage, dic_masques) ident = self.a_interpreter try: etendue = type(self).importeur.salle.etendues[ident] except KeyError: raise ErreurValidation( "|err|L'identifiant '{}' n'est pas valide.|ff|".format(ident)) self.etendue = etendue self.ident = etendue.cle return True
Validation du masque
https://github.com/vincent-lg/tsunami/blob/36b3b974f6eefbf15cd5d5f099fc14630e66570b/src/primaires/salle/masques/etendue/__init__.py#L68-L82
from primaires.interpreteur.masque.masque import Masque from primaires.interpreteur.masque.fonctions import * from primaires.interpreteur.masque.exceptions.erreur_validation import ErreurValidation class Etendue(Masque): nom = "etendue" nom_complet = "étendue d'eau" def init(self): self.identifiant = "" self.etendue = None def repartir(self, personnage, masques, commande): ident = liste_vers_chaine(commande) if not ident: raise ErreurValidation( "Précisez un identifiant d'étendue d'eau.") ident = ident.split(" ")[0].lower() self.a_interpreter = ident commande[:] = commande[len(ident):] masques.append(self) return True
BSD 3-Clause New or Revised License
virtuesecurity/aws-extender
BappModules/boto/iam/connection.py
IAMConnection.get_user
python
def get_user(self, user_name=None): params = {} if user_name: params['UserName'] = user_name return self.get_response('GetUser', params)
Retrieve information about the specified user. If the user_name is not specified, the user_name is determined implicitly based on the AWS Access Key ID used to sign the request. :type user_name: string :param user_name: The name of the user to retrieve. If not specified, defaults to user making request.
https://github.com/virtuesecurity/aws-extender/blob/3029dd26bd7bdf7f4148e1e92adf9f8c547cafbe/BappModules/boto/iam/connection.py#L373-L387
import boto import boto.jsonresponse from boto.compat import json, six from boto.resultset import ResultSet from boto.iam.summarymap import SummaryMap from boto.connection import AWSQueryConnection DEFAULT_POLICY_DOCUMENTS = { 'default': { 'Statement': [ { 'Principal': { 'Service': ['ec2.amazonaws.com'] }, 'Effect': 'Allow', 'Action': ['sts:AssumeRole'] } ] }, 'amazonaws.com.cn': { 'Statement': [ { 'Principal': { 'Service': ['ec2.amazonaws.com.cn'] }, 'Effect': 'Allow', 'Action': ['sts:AssumeRole'] } ] }, } ASSUME_ROLE_POLICY_DOCUMENT = json.dumps(DEFAULT_POLICY_DOCUMENTS['default']) class IAMConnection(AWSQueryConnection): APIVersion = '2010-05-08' def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host='iam.amazonaws.com', debug=0, https_connection_factory=None, path='/', security_token=None, validate_certs=True, profile_name=None): super(IAMConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, host, debug, https_connection_factory, path, security_token, validate_certs=validate_certs, profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] def get_response(self, action, params, path='/', parent=None, verb='POST', list_marker='Set'): if not parent: parent = self response = self.make_request(action, params, path, verb) body = response.read() boto.log.debug(body) if response.status == 200: if body: e = boto.jsonresponse.Element(list_marker=list_marker, pythonize_name=True) h = boto.jsonresponse.XmlHandler(e, parent) h.parse(body) return e else: return {} else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body) def get_all_groups(self, path_prefix='/', marker=None, max_items=None): params = {} if path_prefix: params['PathPrefix'] = path_prefix if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListGroups', params, list_marker='Groups') def get_group(self, group_name, marker=None, max_items=None): params = {'GroupName': group_name} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('GetGroup', params, list_marker='Users') def create_group(self, group_name, path='/'): params = {'GroupName': group_name, 'Path': path} return self.get_response('CreateGroup', params) def delete_group(self, group_name): params = {'GroupName': group_name} return self.get_response('DeleteGroup', params) def update_group(self, group_name, new_group_name=None, new_path=None): params = {'GroupName': group_name} if new_group_name: params['NewGroupName'] = new_group_name if new_path: params['NewPath'] = new_path return self.get_response('UpdateGroup', params) def add_user_to_group(self, group_name, user_name): params = {'GroupName': group_name, 'UserName': user_name} return self.get_response('AddUserToGroup', params) def remove_user_from_group(self, group_name, user_name): params = {'GroupName': group_name, 'UserName': user_name} return self.get_response('RemoveUserFromGroup', params) def put_group_policy(self, group_name, policy_name, policy_json): params = {'GroupName': group_name, 'PolicyName': policy_name, 'PolicyDocument': policy_json} return self.get_response('PutGroupPolicy', params, verb='POST') def get_all_group_policies(self, group_name, marker=None, max_items=None): params = {'GroupName': group_name} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListGroupPolicies', params, list_marker='PolicyNames') def get_group_policy(self, group_name, policy_name): params = {'GroupName': group_name, 'PolicyName': policy_name} return self.get_response('GetGroupPolicy', params, verb='POST') def delete_group_policy(self, group_name, policy_name): params = {'GroupName': group_name, 'PolicyName': policy_name} return self.get_response('DeleteGroupPolicy', params, verb='POST') def get_all_users(self, path_prefix='/', marker=None, max_items=None): params = {'PathPrefix': path_prefix} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListUsers', params, list_marker='Users') def create_user(self, user_name, path='/'): params = {'UserName': user_name, 'Path': path} return self.get_response('CreateUser', params) def delete_user(self, user_name): params = {'UserName': user_name} return self.get_response('DeleteUser', params)
MIT License
candidco/confidential
tests/conftest.py
store_secret
python
def store_secret(): def wrapped(key, value): sm = boto3.client("secretsmanager", region_name="us-west-1") sm.create_secret(Name=key, SecretString=value) return wrapped
Stores a secret in SecretsManager (mocked)
https://github.com/candidco/confidential/blob/b4b00fbef60e75f18e0586f19f140db6ce995d24/tests/conftest.py#L22-L31
import json import tempfile from contextlib import contextmanager import boto3 import pytest from moto import mock_secretsmanager @pytest.yield_fixture(autouse=True) def mock_secrets_manager(): mock = mock_secretsmanager() mock.start() yield mock.stop() @pytest.fixture()
Apache License 2.0
ceteri/exelixi
src/ga.py
Individual.get_json_feature_set
python
def get_json_feature_set (self): return dumps(tuple(self._feature_set))
dump the feature set as a JSON string
https://github.com/ceteri/exelixi/blob/81bb97d3e99fe055e3816a5692b4dc29cdce6c94/src/ga.py#L403-L405
from hat_trie import Trie from collections import Counter from gevent import Greenlet from hashlib import sha224 from hashring import HashRing from json import dumps, loads from monoids import dictm from random import random, sample from service import UnitOfWork from string import ascii_lowercase from util import instantiate_class, post_distrib_rest import logging import sys class Population (UnitOfWork): def __init__ (self, uow_name, prefix, indiv_instance): super(Population, self).__init__(uow_name, prefix) logging.debug("INIT POPULATION") self.indiv_class = indiv_instance.__class__ self.total_indiv = 0 self.current_gen = 0 self._shard = {} self._trie = Trie(ascii_lowercase) def perform_task (self, payload): key = payload["key"] gen = payload["gen"] feature_set = payload["feature_set"] self.receive_reify(key, gen, feature_set) def orchestrate (self, framework): framework.send_ring_rest("pop/init", {}) framework.send_ring_rest("pop/gen", {}) while True: framework.phase_barrier() if self.current_gen == self.uow_factory.n_gen: break self.total_indiv = 0 hist = {} for shard_msg in framework.send_ring_rest("pop/hist", {}): logging.debug(shard_msg) payload = loads(shard_msg) self.total_indiv += payload["total_indiv"] hist = dictm.fold([hist, payload["hist"]]) hist_items = map(lambda x: (float(x[0]), x[1],), sorted(hist.items(), reverse=True)) if self.test_termination(self.current_gen, hist_items): break fitness_cutoff = self.get_fitness_cutoff(hist_items) framework.send_ring_rest("pop/next", { "current_gen": self.current_gen, "fitness_cutoff": fitness_cutoff }) self.current_gen += 1 results = [] for l in framework.send_ring_rest("pop/enum", { "fitness_cutoff": fitness_cutoff }): results.extend(loads(l)) results.sort(reverse=True) for x in results: print "\t".join(x) def handle_endpoints (self, worker, uri_path, env, start_response, body): if uri_path == '/pop/init': Greenlet(self.pop_init, worker, env, start_response, body).start() return True elif uri_path == '/pop/gen': Greenlet(self.pop_gen, worker, env, start_response, body).start() return True elif uri_path == '/pop/hist': Greenlet(self.pop_hist, worker, env, start_response, body).start() return True elif uri_path == '/pop/next': Greenlet(self.pop_next, worker, env, start_response, body).start() return True elif uri_path == '/pop/enum': Greenlet(self.pop_enum, worker, env, start_response, body).start() return True elif uri_path == '/pop/reify': Greenlet(self.pop_reify, worker, env, start_response, body).start() return True else: return False def pop_init (self, *args, **kwargs): worker = args[0] payload, start_response, body = worker.get_response_context(args[1:]) if worker.auth_request(payload, start_response, body): self.set_ring(worker.shard_id, worker.ring) worker.prep_task_queue() start_response('200 OK', [('Content-Type', 'text/plain')]) body.put("Bokay\r\n") body.put(StopIteration) def pop_gen (self, *args, **kwargs): worker = args[0] payload, start_response, body = worker.get_response_context(args[1:]) if worker.auth_request(payload, start_response, body): with worker.wrap_task_event(): start_response('200 OK', [('Content-Type', 'text/plain')]) body.put("Bokay\r\n") body.put(StopIteration) self.populate(0) def pop_hist (self, *args, **kwargs): worker = args[0] payload, start_response, body = worker.get_response_context(args[1:]) if worker.auth_request(payload, start_response, body): start_response('200 OK', [('Content-Type', 'application/json')]) body.put(dumps({ "total_indiv": self.total_indiv, "hist": self.get_part_hist() })) body.put("\r\n") body.put(StopIteration) def pop_next (self, *args, **kwargs): worker = args[0] payload, start_response, body = worker.get_response_context(args[1:]) if worker.auth_request(payload, start_response, body): with worker.wrap_task_event(): start_response('200 OK', [('Content-Type', 'text/plain')]) body.put("Bokay\r\n") body.put(StopIteration) current_gen = payload["current_gen"] fitness_cutoff = payload["fitness_cutoff"] self.next_generation(current_gen, fitness_cutoff) def pop_enum (self, *args, **kwargs): worker = args[0] payload, start_response, body = worker.get_response_context(args[1:]) if worker.auth_request(payload, start_response, body): fitness_cutoff = payload["fitness_cutoff"] start_response('200 OK', [('Content-Type', 'application/json')]) body.put(dumps(self.enum(fitness_cutoff))) body.put("\r\n") body.put(StopIteration) def pop_reify (self, *args, **kwargs): worker = args[0] payload, start_response, body = worker.get_response_context(args[1:]) if worker.auth_request(payload, start_response, body): worker.put_task_queue(payload) start_response('200 OK', [('Content-Type', 'text/plain')]) body.put("Bokay\r\n") body.put(StopIteration) def populate (self, current_gen): for _ in xrange(self.uow_factory.n_pop): indiv = self.indiv_class() indiv.populate(current_gen, self.uow_factory.generate_features()) self.reify(indiv) def reify (self, indiv): neighbor_shard_id = None shard_uri = None if self._hash_ring: neighbor_shard_id = self._hash_ring.get_node(indiv.key) if neighbor_shard_id != self._shard_id: shard_uri = self._shard_dict[neighbor_shard_id] if shard_uri: msg = { "key": indiv.key, "gen": indiv.gen, "feature_set": loads(indiv.get_json_feature_set()) } lines = post_distrib_rest(self.prefix, neighbor_shard_id, shard_uri, "pop/reify", msg) return False else: return self._reify_locally(indiv) def receive_reify (self, key, gen, feature_set): indiv = self.indiv_class() indiv.populate(gen, feature_set) self._reify_locally(indiv) def _reify_locally (self, indiv): if not (indiv.key in self._trie): self._trie[indiv.key] = 1 self.total_indiv += 1 indiv.get_fitness(self.uow_factory, force=True) self._shard[indiv.key] = indiv return True else: return False def evict (self, indiv): if indiv.key in self._shard: del self._shard[indiv.key] url = self._get_storage_path(indiv) def get_part_hist (self): l = [ round(indiv.get_fitness(self.uow_factory, force=False), self.uow_factory.hist_granularity) for indiv in self._shard.values() ] return dict(Counter(l)) def get_fitness_cutoff (self, hist_items): logging.debug("fit: %s", hist_items) n_indiv = sum([ count for bin, count in hist_items ]) part_sum = 0 break_next = False for bin, count in hist_items: if break_next: break part_sum += count percentile = part_sum / float(n_indiv) break_next = percentile >= self.uow_factory.selection_rate logging.debug("fit: percentile %f part_sum %d n_indiv %d bin %f", percentile, part_sum, n_indiv, bin) return bin def _get_storage_path (self, indiv): return self.prefix + "/" + indiv.key def _boost_diversity (self, current_gen, indiv): if self.uow_factory.mutation_rate > random(): indiv.mutate(self, current_gen, self.uow_factory) elif len(self._shard.values()) >= 3: self.evict(indiv) def _select_parents (self, current_gen, fitness_cutoff): partition = map(lambda x: (round(x.get_fitness(), self.uow_factory.hist_granularity) > fitness_cutoff, x), self._shard.values()) good_fit = map(lambda x: x[1], filter(lambda x: x[0], partition)) poor_fit = map(lambda x: x[1], filter(lambda x: not x[0], partition)) for indiv in poor_fit: self._boost_diversity(current_gen, indiv) return self._shard.values() def next_generation (self, current_gen, fitness_cutoff): parents = self._select_parents(current_gen, fitness_cutoff) for _ in xrange(self.uow_factory.n_pop - len(parents)): f, m = sample(parents, 2) success = f.breed(self, current_gen, m, self.uow_factory) new_count = 0 for _ in xrange(self.uow_factory.n_pop - len(self._shard.values())): indiv = self.indiv_class() indiv.populate(current_gen, self.uow_factory.generate_features()) self.reify(indiv) logging.info("gen\t%d\tshard\t%s\tsize\t%d\ttotal\t%d", current_gen, self._shard_id, len(self._shard.values()), self.total_indiv) def test_termination (self, current_gen, hist): return self.uow_factory.test_termination(current_gen, hist, self.total_indiv) def enum (self, fitness_cutoff): return [[ "indiv", "%0.4f" % indiv.get_fitness(), str(indiv.gen), indiv.get_json_feature_set() ] for indiv in filter(lambda x: x.get_fitness() >= fitness_cutoff, self._shard.values()) ] class Individual (object): def __init__ (self): self.gen = None self.key = None self._feature_set = None self._fitness = None def get_fitness (self, uow_factory=None, force=False): if uow_factory and uow_factory.use_force(force): self._fitness = uow_factory.get_fitness(self._feature_set) return self._fitness
Apache License 2.0
shallowtoil/drol
pysot/tracker/classifier/libs/fourier.py
cfft2
python
def cfft2(a): return rfftshift2(torch.rfft(a, 2))
Do FFT and center the low frequency component. Always produces odd (full) output sizes.
https://github.com/shallowtoil/drol/blob/4aebe575394bc035e9924c8711c7d5d76bfef37a/pysot/tracker/classifier/libs/fourier.py#L20-L24
import torch import torch.nn.functional as F import pysot.tracker.classifier.libs.complex as complex from .tensorlist import tensor_operation, TensorList @tensor_operation def rfftshift2(a: torch.Tensor): h = a.shape[2] + 2 return torch.cat((a[:,:,(h-1)//2:,...], a[:,:,:h//2,...]), 2) @tensor_operation def irfftshift2(a: torch.Tensor): mid = int((a.shape[2]-1)/2) return torch.cat((a[:,:,mid:,...], a[:,:,:mid,...]), 2) @tensor_operation
Apache License 2.0
vojtamolda/autodrome
autodrome/policeman/definition.py
Definition.__sizeof__
python
def __sizeof__(self): from sys import getsizeof from inspect import getmro from itertools import chain counted = set() containers = { list: lambda lst: lst, tuple: lambda tpl: tpl, dict: lambda dct: chain(dct.keys(), dct.values()) } def sizeof(item: object) -> int: if id(item) in counted: return 0 counted.add(id(item)) size = getsizeof(item, getsizeof(1)) baseclass = getmro(type(item))[-2] if baseclass in containers: iterator = containers[baseclass] size += sum(map(sizeof, iterator(item))) return size return sum(map(sizeof, containers[dict](self)))
Recursively calculate size of the contained data
https://github.com/vojtamolda/autodrome/blob/3e3aa1198ac96f3c7453f9797b776239801434ae/autodrome/policeman/definition.py#L261-L285
import struct import unittest import warnings from pathlib import Path from multiprocessing import Pool from pyparsing import Word, Group, Suppress, Combine, Optional, QuotedString, Keyword, ZeroOrMore, CharsNotIn, ParseException, alphanums, nums, hexnums, delimitedList, cStyleComment, dblSlashComment, pythonStyleComment class DefinitionFile(dict): class Grammar: class Parse: @staticmethod def int(toks): toks[0] = int(toks[0]) return toks @staticmethod def float(toks): if toks[0].startswith('&'): binary = bytes.fromhex(toks[0][1:]) toks[0] = struct.unpack('>f', binary)[0] else: toks[0] = float(toks[0]) return toks @staticmethod def bool(toks): toks[0] = (toks[0] == 'true') return toks @staticmethod def reference(toks): toks[0] = DefinitionFile.Reference(toks[0]) return toks @staticmethod def tuple(toks): toks[0] = tuple(toks[0]) return toks @staticmethod def include(toks): pass identifier = Word(alphanums + '_') name = Optional(Suppress('"')) + Word(alphanums + '.' + '_') + Optional(Suppress('"')) intValue = Word(nums + '-', nums).setParseAction(Parse.int) int = identifier + Suppress(':') + intValue int.setParseAction(lambda toks: toks.insert(0, 'int')) binaryFloat = Word('&', hexnums) regularFloat = Word(nums + '-', nums + '.' + 'eE' + '-') floatValue = (regularFloat ^ binaryFloat).setParseAction(Parse.float) float = identifier + Suppress(':') + floatValue float.setParseAction(lambda toks: toks.insert(0, 'float')) boolValue = (Keyword('true') ^ Keyword('false')).setParseAction(Parse.bool) bool = identifier + Suppress(':') + boolValue bool.setParseAction(lambda toks: toks.insert(0, 'bool')) textValue = QuotedString('"', multiline=True) ^ identifier text = identifier + Suppress(':') + textValue text.setParseAction(lambda toks: toks.insert(0, 'text')) tupleValue = Group(Suppress('(') + delimitedList(intValue ^ floatValue, delim=',') + Suppress(')')) tupleValue.setParseAction(Parse.tuple) tuple = identifier + Suppress(':') + tupleValue tuple.setParseAction(lambda toks: toks.insert(0, 'tuple')) referenceValue = Word(alphanums + '.' + '_').setParseAction(Parse.reference) reference = identifier + Suppress(':') + referenceValue reference.setParseAction(lambda toks: toks.insert(0, 'reference')) arrayValue = (intValue ^ floatValue ^ boolValue ^ textValue ^ tupleValue ^ referenceValue) array = Combine(identifier + Suppress('[' + Optional(intValue) + ']')) + Suppress(':') + arrayValue array.setParseAction(lambda toks: toks.insert(0, 'array')) label = Group(identifier + Suppress(':') + name) property = Group(int ^ float ^ bool ^ text ^ tuple ^ reference ^ array) include = Suppress(Keyword('@include')) + QuotedString('"').setParseAction(Parse.include) entry = label + Suppress('{') + ZeroOrMore(property ^ include) + Suppress('}') junk = ZeroOrMore(CharsNotIn(alphanums)) header = Suppress(junk + Optional(Keyword('SiiNunit') + '{')) footer = Suppress(Optional('}')) file = header + ZeroOrMore(Group(entry ^ include)) + footer file.ignore(cStyleComment) file.ignore(dblSlashComment) file.ignore(pythonStyleComment) @classmethod def tokenize(cls, string: str) -> list: return cls.file.parseString(string, parseAll=True).asList() class Reference(str): pass Constructors = { 'int': int, 'bool': lambda bln: bln, 'float': float, 'text': str, 'tuple': lambda tpl: tpl, 'reference': Reference, 'array': list, } def __init__(self, path: Path=None): super().__init__() self.path = path if path is None or path.suffixes == ['.custom', '.sii']: return with path.open('rt') as file: try: content = file.read() tokens = self.Grammar.tokenize(content) self.parse(tokens) except ParseException as exc: exc.msg = (f"{exc.msg}\n" f"File \"{path}\"\n" f"Entry \"{exc.line}\")") raise exc def __getattr__(self, item: object) -> object: return self[item] if item in self else None def __getstate__(self) -> dict: return self.__dict__ def __setstate__(self, dct: dict): self.__dict__.update(dct) def parse(self, tokens: list): def structuralize(iterator: iter) -> dict: structure = {} for kind, identifier, value in iterator: constructor = self.Constructors[kind] if kind == 'array': if identifier not in structure or not isinstance(structure[identifier], constructor): structure[identifier] = constructor() structure[identifier].append(value) else: if identifier in structure: message = (f"Duplicate value found during parsing:\n" f"File \"{self.path}\"\n" f"Value \"{group}:{name}::{identifier}\"") warnings.warn(message, SyntaxWarning) structure[identifier] = constructor(value) return structure for entry in tokens: iterator, container = iter(entry), self try: group, name = next(iterator) except Exception: continue for piece in name.split('.'): if piece not in container: container[piece] = {} supercontainer = container container = container[piece] supercontainer[piece] = structuralize(iterator) class Definition(dict): def __init__(self, directory: Path, recursive=False): super().__init__() siiFiles = directory.glob('**/*.sii' if recursive is True else '*.sii') siiFiles = sorted(siiFiles, key=lambda file: file.stat().st_size, reverse=True) with Pool() as pool: subDefinitions = pool.map(DefinitionFile, siiFiles) for subDefinition in subDefinitions: self.merge(subDefinition) self.resolve() def merge(self, another: DefinitionFile): recurse = [] def merge(this: dict, other: dict): for identifier, value in other.items(): if identifier in this: if isinstance(value, dict): recurse.append(identifier) merge(this[identifier], other[identifier]) recurse.pop() elif isinstance(value, list): this[identifier].extend(other[identifier]) else: message = ("Duplicate found during merging:\n" "File \"{path}\"\n" "Key \"{name}::{ident}\"") message = message.format(name=".".join(recurse), ident=identifier, path=another.path) warnings.warn(message, RuntimeWarning) this[identifier] = value else: this[identifier] = value merge(self, another) def resolve(self): recurse = [] iterators = { dict: lambda dct: dct.items(), list: lambda lst: enumerate(lst), Definition: lambda dfn: dfn.items(), } def resolve(container: object): for key, item in iterators[type(container)](container): if isinstance(item, DefinitionFile.Reference): try: resolved = self for piece in item.split('.'): resolved = resolved[piece] container[key] = resolved except KeyError: name, identifier = ".".join(recurse), key identifier = key message = (f"Unresolved reference\n" f"Key \"{name}::{identifier}\"\n" f"Reference \"{item}\"") warnings.warn(message, RuntimeWarning) elif type(item) in iterators: recurse.append(key) resolve(item) recurse.pop() resolve(self)
MIT License
wbond/oscrypto
oscrypto/_mac/symmetric.py
_decrypt
python
def _decrypt(cipher, key, data, iv, padding): if not isinstance(key, byte_cls): raise TypeError(pretty_message( ''' key must be a byte string, not %s ''', type_name(key) )) if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) if cipher != Security.kSecAttrKeyTypeRC4 and not isinstance(iv, byte_cls): raise TypeError(pretty_message( ''' iv must be a byte string, not %s ''', type_name(iv) )) if cipher != Security.kSecAttrKeyTypeRC4 and not padding: raise ValueError('padding must be specified') cf_dict = None cf_key = None cf_data = None cf_iv = None sec_key = None sec_transform = None try: cf_dict = CFHelpers.cf_dictionary_from_pairs([(Security.kSecAttrKeyType, cipher)]) cf_key = CFHelpers.cf_data_from_bytes(key) cf_data = CFHelpers.cf_data_from_bytes(data) error_pointer = new(CoreFoundation, 'CFErrorRef *') sec_key = Security.SecKeyCreateFromData(cf_dict, cf_key, error_pointer) handle_cf_error(error_pointer) sec_transform = Security.SecDecryptTransformCreate(sec_key, error_pointer) handle_cf_error(error_pointer) if cipher != Security.kSecAttrKeyTypeRC4: Security.SecTransformSetAttribute(sec_transform, Security.kSecModeCBCKey, null(), error_pointer) handle_cf_error(error_pointer) Security.SecTransformSetAttribute(sec_transform, Security.kSecPaddingKey, padding, error_pointer) handle_cf_error(error_pointer) cf_iv = CFHelpers.cf_data_from_bytes(iv) Security.SecTransformSetAttribute(sec_transform, Security.kSecIVKey, cf_iv, error_pointer) handle_cf_error(error_pointer) Security.SecTransformSetAttribute( sec_transform, Security.kSecTransformInputAttributeName, cf_data, error_pointer ) handle_cf_error(error_pointer) plaintext = Security.SecTransformExecute(sec_transform, error_pointer) handle_cf_error(error_pointer) return CFHelpers.cf_data_to_bytes(plaintext) finally: if cf_dict: CoreFoundation.CFRelease(cf_dict) if cf_key: CoreFoundation.CFRelease(cf_key) if cf_data: CoreFoundation.CFRelease(cf_data) if cf_iv: CoreFoundation.CFRelease(cf_iv) if sec_key: CoreFoundation.CFRelease(sec_key) if sec_transform: CoreFoundation.CFRelease(sec_transform)
Decrypts AES/RC4/RC2/3DES/DES ciphertext :param cipher: A kSecAttrKeyType* value that specifies the cipher to use :param key: The encryption key - a byte string 5-16 bytes long :param data: The ciphertext - a byte string :param iv: The initialization vector - a byte string - unused for RC4 :param padding: The padding mode to use, specified as a kSecPadding*Key value - unused for RC4 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the plaintext
https://github.com/wbond/oscrypto/blob/d40c62577706682a0f6da5616ad09964f1c9137d/oscrypto/_mac/symmetric.py#L647-L757
from __future__ import unicode_literals, division, absolute_import, print_function from .._errors import pretty_message from .._ffi import new, null from ._core_foundation import CoreFoundation, CFHelpers, handle_cf_error from ._security import Security from .util import rand_bytes from .._types import type_name, byte_cls __all__ = [ 'aes_cbc_no_padding_decrypt', 'aes_cbc_no_padding_encrypt', 'aes_cbc_pkcs7_decrypt', 'aes_cbc_pkcs7_encrypt', 'des_cbc_pkcs5_decrypt', 'des_cbc_pkcs5_encrypt', 'rc2_cbc_pkcs5_decrypt', 'rc2_cbc_pkcs5_encrypt', 'rc4_decrypt', 'rc4_encrypt', 'tripledes_cbc_pkcs5_decrypt', 'tripledes_cbc_pkcs5_encrypt', ] def aes_cbc_no_padding_encrypt(key, data, iv): if len(key) not in [16, 24, 32]: raise ValueError(pretty_message( ''' key must be either 16, 24 or 32 bytes (128, 192 or 256 bits) long - is %s ''', len(key) )) if not iv: iv = rand_bytes(16) elif len(iv) != 16: raise ValueError(pretty_message( ''' iv must be 16 bytes long - is %s ''', len(iv) )) if len(data) % 16 != 0: raise ValueError(pretty_message( ''' data must be a multiple of 16 bytes long - is %s ''', len(data) )) return (iv, _encrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingNoneKey)) def aes_cbc_no_padding_decrypt(key, data, iv): if len(key) not in [16, 24, 32]: raise ValueError(pretty_message( ''' key must be either 16, 24 or 32 bytes (128, 192 or 256 bits) long - is %s ''', len(key) )) if len(iv) != 16: raise ValueError(pretty_message( ''' iv must be 16 bytes long - is %s ''', len(iv) )) return _decrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingNoneKey) def aes_cbc_pkcs7_encrypt(key, data, iv): if len(key) not in [16, 24, 32]: raise ValueError(pretty_message( ''' key must be either 16, 24 or 32 bytes (128, 192 or 256 bits) long - is %s ''', len(key) )) if not iv: iv = rand_bytes(16) elif len(iv) != 16: raise ValueError(pretty_message( ''' iv must be 16 bytes long - is %s ''', len(iv) )) return (iv, _encrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingPKCS7Key)) def aes_cbc_pkcs7_decrypt(key, data, iv): if len(key) not in [16, 24, 32]: raise ValueError(pretty_message( ''' key must be either 16, 24 or 32 bytes (128, 192 or 256 bits) long - is %s ''', len(key) )) if len(iv) != 16: raise ValueError(pretty_message( ''' iv must be 16 bytes long - is %s ''', len(iv) )) return _decrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingPKCS7Key) def rc4_encrypt(key, data): if len(key) < 5 or len(key) > 16: raise ValueError(pretty_message( ''' key must be 5 to 16 bytes (40 to 128 bits) long - is %s ''', len(key) )) return _encrypt(Security.kSecAttrKeyTypeRC4, key, data, None, None) def rc4_decrypt(key, data): if len(key) < 5 or len(key) > 16: raise ValueError(pretty_message( ''' key must be 5 to 16 bytes (40 to 128 bits) long - is %s ''', len(key) )) return _decrypt(Security.kSecAttrKeyTypeRC4, key, data, None, None) def rc2_cbc_pkcs5_encrypt(key, data, iv): if len(key) < 5 or len(key) > 16: raise ValueError(pretty_message( ''' key must be 5 to 16 bytes (40 to 128 bits) long - is %s ''', len(key) )) if not iv: iv = rand_bytes(8) elif len(iv) != 8: raise ValueError(pretty_message( ''' iv must be 8 bytes long - is %s ''', len(iv) )) return (iv, _encrypt(Security.kSecAttrKeyTypeRC2, key, data, iv, Security.kSecPaddingPKCS5Key)) def rc2_cbc_pkcs5_decrypt(key, data, iv): if len(key) < 5 or len(key) > 16: raise ValueError(pretty_message( ''' key must be 5 to 16 bytes (40 to 128 bits) long - is %s ''', len(key) )) if len(iv) != 8: raise ValueError(pretty_message( ''' iv must be 8 bytes long - is %s ''', len(iv) )) return _decrypt(Security.kSecAttrKeyTypeRC2, key, data, iv, Security.kSecPaddingPKCS5Key) def tripledes_cbc_pkcs5_encrypt(key, data, iv): if len(key) != 16 and len(key) != 24: raise ValueError(pretty_message( ''' key must be 16 bytes (2 key) or 24 bytes (3 key) long - %s ''', len(key) )) if not iv: iv = rand_bytes(8) elif len(iv) != 8: raise ValueError(pretty_message( ''' iv must be 8 bytes long - %s ''', len(iv) )) if len(key) == 16: key = key + key[0:8] return (iv, _encrypt(Security.kSecAttrKeyType3DES, key, data, iv, Security.kSecPaddingPKCS5Key)) def tripledes_cbc_pkcs5_decrypt(key, data, iv): if len(key) != 16 and len(key) != 24: raise ValueError(pretty_message( ''' key must be 16 bytes (2 key) or 24 bytes (3 key) long - is %s ''', len(key) )) if len(iv) != 8: raise ValueError(pretty_message( ''' iv must be 8 bytes long - is %s ''', len(iv) )) if len(key) == 16: key = key + key[0:8] return _decrypt(Security.kSecAttrKeyType3DES, key, data, iv, Security.kSecPaddingPKCS5Key) def des_cbc_pkcs5_encrypt(key, data, iv): if len(key) != 8: raise ValueError(pretty_message( ''' key must be 8 bytes (56 bits + 8 parity bits) long - is %s ''', len(key) )) if not iv: iv = rand_bytes(8) elif len(iv) != 8: raise ValueError(pretty_message( ''' iv must be 8 bytes long - is %s ''', len(iv) )) return (iv, _encrypt(Security.kSecAttrKeyTypeDES, key, data, iv, Security.kSecPaddingPKCS5Key)) def des_cbc_pkcs5_decrypt(key, data, iv): if len(key) != 8: raise ValueError(pretty_message( ''' key must be 8 bytes (56 bits + 8 parity bits) long - is %s ''', len(key) )) if len(iv) != 8: raise ValueError(pretty_message( ''' iv must be 8 bytes long - is %s ''', len(iv) )) return _decrypt(Security.kSecAttrKeyTypeDES, key, data, iv, Security.kSecPaddingPKCS5Key) def _encrypt(cipher, key, data, iv, padding): if not isinstance(key, byte_cls): raise TypeError(pretty_message( ''' key must be a byte string, not %s ''', type_name(key) )) if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) if cipher != Security.kSecAttrKeyTypeRC4 and not isinstance(iv, byte_cls): raise TypeError(pretty_message( ''' iv must be a byte string, not %s ''', type_name(iv) )) if cipher != Security.kSecAttrKeyTypeRC4 and not padding: raise ValueError('padding must be specified') cf_dict = None cf_key = None cf_data = None cf_iv = None sec_key = None sec_transform = None try: cf_dict = CFHelpers.cf_dictionary_from_pairs([(Security.kSecAttrKeyType, cipher)]) cf_key = CFHelpers.cf_data_from_bytes(key) cf_data = CFHelpers.cf_data_from_bytes(data) error_pointer = new(CoreFoundation, 'CFErrorRef *') sec_key = Security.SecKeyCreateFromData(cf_dict, cf_key, error_pointer) handle_cf_error(error_pointer) sec_transform = Security.SecEncryptTransformCreate(sec_key, error_pointer) handle_cf_error(error_pointer) if cipher != Security.kSecAttrKeyTypeRC4: Security.SecTransformSetAttribute(sec_transform, Security.kSecModeCBCKey, null(), error_pointer) handle_cf_error(error_pointer) Security.SecTransformSetAttribute(sec_transform, Security.kSecPaddingKey, padding, error_pointer) handle_cf_error(error_pointer) cf_iv = CFHelpers.cf_data_from_bytes(iv) Security.SecTransformSetAttribute(sec_transform, Security.kSecIVKey, cf_iv, error_pointer) handle_cf_error(error_pointer) Security.SecTransformSetAttribute( sec_transform, Security.kSecTransformInputAttributeName, cf_data, error_pointer ) handle_cf_error(error_pointer) ciphertext = Security.SecTransformExecute(sec_transform, error_pointer) handle_cf_error(error_pointer) return CFHelpers.cf_data_to_bytes(ciphertext) finally: if cf_dict: CoreFoundation.CFRelease(cf_dict) if cf_key: CoreFoundation.CFRelease(cf_key) if cf_data: CoreFoundation.CFRelease(cf_data) if cf_iv: CoreFoundation.CFRelease(cf_iv) if sec_key: CoreFoundation.CFRelease(sec_key) if sec_transform: CoreFoundation.CFRelease(sec_transform)
MIT License
dials/dials
algorithms/integration/report.py
IntegrationReport.__init__
python
def __init__(self, experiments, reflections): super().__init__() tables = reflections.split_by_experiment_id() assert len(tables) == len(experiments) report_list = [] for i, (expr, data) in enumerate(zip(experiments, tables)): report_list.append(generate_integration_report(expr, data)) table = Table() table.name = "integration.image.summary" table.title = "Summary vs image number" table.cols.append(("id", "ID")) table.cols.append(("image", "Image")) table.cols.append(("n_full", "# full")) table.cols.append(("n_part", "# part")) table.cols.append(("n_over", "# over")) table.cols.append(("n_ice", "# ice")) table.cols.append(("n_sum", "# sum")) table.cols.append(("n_prf", "# prf")) table.cols.append(("ibg", "Ibg")) table.cols.append(("ios_sum", "I/sigI\n (sum)")) table.cols.append(("ios_prf", "I/sigI\n (prf)")) table.cols.append(("cc_prf", "CC prf")) table.cols.append(("rmsd_xy", "RMSD XY")) for j, report in enumerate(report_list): report = report["image"] for i in range(len(report["bins"]) - 1): table.rows.append( [ "%d" % j, "%d" % (report["bins"][i] + 1), "%d" % report["n_full"][i], "%d" % report["n_partial"][i], "%d" % report["n_overload"][i], "%d" % report["n_ice"][i], "%d" % report["n_summed"][i], "%d" % report["n_fitted"][i], f"{report['mean_background'][i]:.2f}", f"{report['ios_sum'][i]:.2f}", f"{report['ios_prf'][i]:.2f}", f"{report['cc_prf'][i]:.2f}", f"{report['rmsd_xy'][i]:.2f}", ] ) self.add_table(table) table = Table() table.name = "integration.resolution.summary" table.title = "Summary vs resolution" table.cols.append(("id", "ID")) table.cols.append(("dmin", "d min")) table.cols.append(("n_full", "# full")) table.cols.append(("n_part", "# part")) table.cols.append(("n_over", "# over")) table.cols.append(("n_ice", "# ice")) table.cols.append(("n_sum", "# sum")) table.cols.append(("n_prf", "# prf")) table.cols.append(("ibg", "Ibg")) table.cols.append(("ios_sum", "I/sigI\n (sum)")) table.cols.append(("ios_prf", "I/sigI\n (prf)")) table.cols.append(("cc_prf", "CC prf")) table.cols.append(("rmsd_xy", "RMSD XY")) for j, report in enumerate(report_list): report = report["resolution"] for i in range(len(report["bins"]) - 1): table.rows.append( [ "%d" % j, f"{report['bins'][i]:.2f}", "%d" % report["n_full"][i], "%d" % report["n_partial"][i], "%d" % report["n_overload"][i], "%d" % report["n_ice"][i], "%d" % report["n_summed"][i], "%d" % report["n_fitted"][i], f"{report['mean_background'][i]:.2f}", f"{report['ios_sum'][i]:.2f}", f"{report['ios_prf'][i]:.2f}", f"{report['cc_prf'][i]:.2f}", f"{report['rmsd_xy'][i]:.2f}", ] ) self.add_table(table) for j, report in enumerate(report_list): report = report["summary"] summary = report["overall"] high = report["high"] low = report["low"] table = Table() table.name = "integration.overall.summary" table.title = "Summary for experiment %d" % j table.cols.append(("item", "Item")) table.cols.append(("overall", "Overall")) table.cols.append(("low", "Low")) table.cols.append(("high", "High")) desc_fmt_key = [ ("dmin", "%.2f", "dmin"), ("dmax", "%.2f", "dmax"), ("number fully recorded", "%d", "n_full"), ("number partially recorded", "%d", "n_partial"), ("number with invalid background pixels", "%d", "n_invalid_bg"), ("number with invalid foreground pixels", "%d", "n_invalid_fg"), ("number with overloaded pixels", "%d", "n_overload"), ("number in powder rings", "%d", "n_ice"), ("number processed with summation", "%d", "n_summed"), ("number processed with profile fitting", "%d", "n_fitted"), ("number failed in background modelling", "%d", "n_failed_background"), ("number failed in summation", "%d", "n_failed_summation"), ("number failed in profile fitting", "%d", "n_failed_fitting"), ("ibg", "%.2f", "mean_background"), ("i/sigi (summation)", "%.2f", "ios_sum"), ("i/sigi (profile fitting)", "%.2f", "ios_prf"), ("cc prf", "%.2f", "cc_prf"), ("cc_pearson sum/prf", "%.2f", "cc_pearson_sum_prf"), ("cc_spearman sum/prf", "%.2f", "cc_spearman_sum_prf"), ] for desc, fmt, key in desc_fmt_key: table.rows.append( [desc, fmt % summary[key], fmt % low[key], fmt % high[key]] ) self.add_table(table)
Create the integration report :param experiments: The experiment list :param reflections: The reflection table
https://github.com/dials/dials/blob/a2cb71bf410e179b92554bcce2e21388e1dc25d1/algorithms/integration/report.py#L298-L436
import collections from dials.array_family import flex from dials.array_family.flex import Binner from dials.util.report import Array, Report, Table def flex_ios(val, var): assert len(val) == len(var) result = flex.double(len(val), 0) indices = flex.size_t(range(len(val))).select(var > 0) val = val.select(indices) var = var.select(indices) assert var.all_gt(0) result.set_selected(indices, val / flex.sqrt(var)) return result def generate_integration_report(experiment, reflections, n_resolution_bins=20): from cctbx import crystal, miller from dials.algorithms.statistics import ( pearson_correlation_coefficient, spearman_correlation_coefficient, ) def overall_report(data): report = collections.OrderedDict() report["n"] = len(reflections) report["n_full"] = data["full"].count(True) report["n_partial"] = data["full"].count(False) report["n_overload"] = data["over"].count(True) report["n_ice"] = data["ice"].count(True) report["n_summed"] = data["sum"].count(True) report["n_fitted"] = data["prf"].count(True) report["n_integated"] = data["int"].count(True) report["n_invalid_bg"] = data["ninvbg"].count(True) report["n_invalid_fg"] = data["ninvfg"].count(True) report["n_failed_background"] = data["fbgd"].count(True) report["n_failed_summation"] = data["fsum"].count(True) report["n_failed_fitting"] = data["fprf"].count(True) try: report["mean_background"] = flex.mean( data["background.mean"].select(data["int"]) ) except Exception: report["mean_background"] = 0.0 try: report["ios_sum"] = flex.mean(data["intensity.sum.ios"].select(data["sum"])) except Exception: report["ios_sum"] = 0.0 try: report["ios_prf"] = flex.mean(data["intensity.prf.ios"].select(data["prf"])) except Exception: report["ios_prf"] = 0.0 try: report["cc_prf"] = flex.mean( data["profile.correlation"].select(data["prf"]) ) except Exception: report["cc_prf"] = 0.0 try: mask = data["sum"] & data["prf"] Isum = data["intensity.sum.value"].select(mask) Iprf = data["intensity.prf.value"].select(mask) report["cc_pearson_sum_prf"] = pearson_correlation_coefficient(Isum, Iprf) report["cc_spearman_sum_prf"] = spearman_correlation_coefficient(Isum, Iprf) except Exception: report["cc_pearson_sum_prf"] = 0.0 report["cc_spearman_sum_prf"] = 0.0 return report def binned_report(binner, index, data): indexer_all = binner.indexer(index) indexer_sum = binner.indexer(index.select(data["sum"])) indexer_prf = binner.indexer(index.select(data["prf"])) indexer_int = binner.indexer(index.select(data["int"])) report = collections.OrderedDict() report["bins"] = list(binner.bins()) report["n_full"] = list(indexer_all.sum(data["full"])) report["n_partial"] = list(indexer_all.sum(~data["full"])) report["n_overload"] = list(indexer_all.sum(data["over"])) report["n_ice"] = list(indexer_all.sum(data["ice"])) report["n_summed"] = list(indexer_all.sum(data["sum"])) report["n_fitted"] = list(indexer_all.sum(data["prf"])) report["n_integrated"] = list(indexer_all.sum(data["int"])) report["n_invalid_bg"] = list(indexer_all.sum(data["ninvbg"])) report["n_invalid_fg"] = list(indexer_all.sum(data["ninvfg"])) report["n_failed_background"] = list(indexer_all.sum(data["fbgd"])) report["n_failed_summation"] = list(indexer_all.sum(data["fsum"])) report["n_failed_fitting"] = list(indexer_all.sum(data["fprf"])) try: report["mean_background"] = list( indexer_int.mean(data["background.mean"].select(data["int"])) ) except Exception: report["mean_background"] = [0.0] * len(binner) try: report["ios_sum"] = list( indexer_sum.mean(data["intensity.sum.ios"].select(data["sum"])) ) except Exception: report["ios_sum"] = [0.0] * len(binner) try: report["ios_prf"] = list( indexer_prf.mean(data["intensity.prf.ios"].select(data["prf"])) ) except Exception: report["ios_prf"] = [0.0] * len(binner) try: report["cc_prf"] = list( indexer_prf.mean(data["profile.correlation"].select(data["prf"])) ) except Exception: report["cc_prf"] = [0.0] * len(binner) try: report["rmsd_xy"] = list( indexer_sum.mean(data["xyz.rmsd"].select(data["sum"])) ) except Exception: report["rmsd_xy"] = [0.0] * len(binner) return report def resolution_bins(experiment, hkl, nbins): cs = crystal.symmetry( space_group=experiment.crystal.get_space_group(), unit_cell=experiment.crystal.get_unit_cell(), ) ms = miller.set(cs, hkl) ms.setup_binner(n_bins=nbins) binner = ms.binner() brange = list(binner.range_used()) bins = [binner.bin_d_range(brange[0])[0]] for i in brange: bins.append(binner.bin_d_range(i)[1]) return flex.double(reversed(bins)) def select(data, indices): result = {key: value.select(indices) for key, value in data.items()} return result assert "miller_index" in reflections assert "d" in reflections assert "flags" in reflections assert "bbox" in reflections assert "xyzcal.px" in reflections assert "partiality" in reflections assert "intensity.sum.value" in reflections assert "intensity.sum.variance" in reflections flags = flex.reflection_table.flags data = {} for key in [ "miller_index", "xyzcal.px", "xyzobs.px.value", "d", "bbox", "background.mean", "partiality", "intensity.sum.value", "intensity.sum.variance", "intensity.prf.value", "intensity.prf.variance", "profile.correlation", ]: if key in reflections: data[key] = reflections[key] data["full"] = data["partiality"] > 0.997300203937 data["over"] = reflections.get_flags(flags.overloaded) data["ice"] = reflections.get_flags(flags.in_powder_ring) data["sum"] = reflections.get_flags(flags.integrated_sum) data["prf"] = reflections.get_flags(flags.integrated_prf) data["int"] = reflections.get_flags(flags.integrated, all=False) data["ninvbg"] = reflections.get_flags(flags.background_includes_bad_pixels) data["ninvfg"] = reflections.get_flags(flags.foreground_includes_bad_pixels) data["fbgd"] = reflections.get_flags(flags.failed_during_background_modelling) data["fsum"] = reflections.get_flags(flags.failed_during_summation) data["fprf"] = reflections.get_flags(flags.failed_during_profile_fitting) data["intensity.sum.ios"] = flex_ios( data["intensity.sum.value"], data["intensity.sum.variance"] ) try: data["intensity.prf.ios"] = flex_ios( data["intensity.prf.value"], data["intensity.prf.variance"] ) except Exception: pass try: xcal, ycal, zcal = data["xyzcal.px"].parts() xobs, yobs, zobs = data["xyzobs.px.value"].parts() data["xyz.rmsd"] = flex.sqrt(flex.pow2(xcal - xobs) + flex.pow2(ycal - yobs)) except Exception: pass resolution_binner = Binner( resolution_bins(experiment, data["miller_index"], n_resolution_bins) ) try: array_range = experiment.imageset.get_array_range() except Exception: array_range = (0, len(experiment.imageset)) frame_binner = Binner( flex.int(range(array_range[0], array_range[1] + 1)).as_double() ) overall = overall_report(data) hl_binner = resolution_binner.indexer(data["d"]) high_summary = overall_report(select(data, hl_binner.indices(0))) low_summary = overall_report(select(data, hl_binner.indices(n_resolution_bins - 1))) high_summary["dmin"] = resolution_binner.bins()[0] high_summary["dmax"] = resolution_binner.bins()[1] low_summary["dmin"] = resolution_binner.bins()[n_resolution_bins - 1] low_summary["dmax"] = resolution_binner.bins()[n_resolution_bins] overall["dmin"] = high_summary["dmin"] overall["dmax"] = low_summary["dmax"] summary = collections.OrderedDict( [("overall", overall), ("low", low_summary), ("high", high_summary)] ) resolution = binned_report(resolution_binner, data["d"], data) image = binned_report(frame_binner, data["xyzcal.px"].parts()[2], data) return collections.OrderedDict( [("summary", summary), ("resolution", resolution), ("image", image)] ) class IntegrationReport(Report):
BSD 3-Clause New or Revised License
andrewdarmawan/tncontract
tncontract/tensor.py
Tensor.index_dimension
python
def index_dimension(self, label): index = self.labels.index(label) return self.data.shape[index]
Will return the dimension of the first index with label=label
https://github.com/andrewdarmawan/tncontract/blob/a65b5663fe8ec24f2170cf6d2e27fe6a1882834d/tncontract/tensor.py#L531-L534
from __future__ import (absolute_import, division, print_function, unicode_literals) __all__ = ['Tensor', 'contract', 'distance', 'matrix_to_tensor', 'tensor_to_matrix', 'random_tensor', 'tensor_product', 'tensor_svd', 'truncated_svd', 'zeros_tensor'] import copy import warnings import numpy as np import scipy as sp from tncontract import label as lbl class Tensor(): def __init__(self, data, labels=None, base_label="i"): labels = [] if labels is None else labels self.data = np.array(data) if len(labels) == 0: self.assign_labels(base_label=base_label) else: self.labels = labels def __repr__(self): return "Tensor(data=%r, labels=%r)" % (self.data, self.labels) def __str__(self): array_str = str(self.data) lines = array_str.splitlines() if len(lines) > 20: lines = lines[:20] + ["...", "Printed output of large array was truncated.\nString " "representation of full data array returned by " "tensor.data.__str__()."] array_str = "\n".join(lines) lines = [] for i, label in enumerate(self.labels): lines.append(" " + str(i) + ". (dim=" + str(self.shape[i]) + ") " + str(label) + "\n") indices_str = "".join(lines) return ("Tensor object: \n" + "Data type: " + str(self.data.dtype) + "\n" "Number of indices: " + str(len(self.data.shape)) + "\n" "\nIndex labels:\n" + indices_str + "\nTensor data = \n" + array_str) def __eq__(self, other): if isinstance(other, Tensor): return (np.array_equal(self.data, other.data) and self.labels == other.labels) else: return False def __neq__(self, other): return not self.__eq__(other) def __mul__(self, other): try: out = self.copy() out.data = out.data * other return out except TypeError: raise TypeError("unsupported operand type(s) *: for '" + self.__class__.__name__ + "' and '" + other.__class__.__name__ + "'") def __rmul__(self, other): try: out = self.copy() out.data = other * out.data return out except TypeError: raise TypeError("unsupported operand type(s) *: for '" + self.__class__.__name__ + "' and '" + other.__class__.__name__ + "'") def __add__(self, other): try: a=self.copy() b=other.copy() a.consolidate_indices() b.consolidate_indices() return Tensor(a.data+b.data, labels=a.labels) except: raise TypeError("Can only add together tensors with the same"+ " indices: labels and dimensions of each index must match.") def __getitem__(self, *args): return ToContract(self, *args) def get_labels(self): return self._labels def set_labels(self, labels): if len(labels) == len(self.data.shape): self._labels = list(labels) else: raise ValueError("Labels do not match shape of data.") labels = property(get_labels, set_labels) def assign_labels(self, base_label="i"): self.labels = [base_label + str(i) for i in range(len(self.data.shape))] def replace_label(self, old_labels, new_labels): if not isinstance(old_labels, list): old_labels = [old_labels] if not isinstance(new_labels, list): new_labels = [new_labels] for i, label in enumerate(self.labels): if label in old_labels: self.labels[i] = new_labels[old_labels.index(label)] def prime_label(self, labels=None): if labels is None: labels = self.labels elif not isinstance(labels, list): labels = [labels] for i, label in enumerate(self.labels): for noprime in labels: if lbl.noprime_label(label) == noprime: self.labels[i] = lbl.prime_label(self.labels[i]) def unprime_label(self, labels=None): if labels is None: labels = self.labels elif not isinstance(labels, list): labels = [labels] for i, label in enumerate(self.labels): for noprime in labels: if lbl.noprime_label(label) == noprime: self.labels[i] = lbl.unprime_label(self.labels[i]) def fuse_indices(self, indices_to_fuse, new_label, preserve_relative_order=False): self.move_indices(indices_to_fuse, 0, preserve_relative_order=preserve_relative_order) total_dim = 1 for i, x in enumerate(self.labels): if x in indices_to_fuse: total_dim *= self.data.shape[i] last_idx=i new_labels = [new_label] + self.labels[last_idx+1:] new_shape = (total_dim,) + self.data.shape[last_idx+1:] self.data = np.reshape(self.data, new_shape) self.labels = new_labels def split_index(self, label, new_dims, new_labels): if len(new_dims) != len(new_labels): raise ValueError("Length of new_dims must equal length of " "new_labels") new_dims = tuple(new_dims) i = self.labels.index(label) new_shape = self.data.shape[:i] + new_dims + self.data.shape[i + 1:] new_labels = self.labels[:i] + new_labels + self.labels[i + 1:] self.data = np.reshape(self.data, new_shape) self.labels = new_labels def contract_internal(self, label1, label2, index1=0, index2=0): label1_indices = [i for i, x in enumerate(self.labels) if x == label1] label2_indices = [i for i, x in enumerate(self.labels) if x == label2] index_to_contract1 = label1_indices[index1] index_to_contract2 = label2_indices[index2] self.data = np.trace(self.data, axis1=index_to_contract1, axis2= index_to_contract2) self.labels = [label for j, label in enumerate(self.labels) if j not in [index_to_contract1, index_to_contract2]] trace = contract_internal tr = contract_internal def consolidate_indices(self, labels=[]): labels_unique = sorted(set(self.labels)) if len(labels) !=0: labels_unique=[x for x in labels_unique if x in labels] for p, label in enumerate(labels_unique): indices = [i for i, j in enumerate(self.labels) if j == label] for k, q in enumerate(indices): self.data = np.rollaxis(self.data, q, p + k) total_dim = self.data.shape[p] for r in range(1, len(indices)): total_dim = total_dim * self.data.shape[p + r] new_shape = (list(self.data.shape[0:p]) + [total_dim] + list(self.data.shape[p + len(indices):])) self.data = np.reshape(self.data, tuple(new_shape)) new_labels = [x for x in self.labels if x != label] new_labels.insert(p, label) self.labels = new_labels def sort_labels(self): self.consolidate_indices() def copy(self): return Tensor(data=self.data.copy(), labels=copy.copy(self.labels)) def move_index(self, label, position): index = self.labels.index(label) self.labels.pop(index) self.labels.insert(position, label) if position <= index: self.data = np.rollaxis(self.data, index, position) else: self.data = np.rollaxis(self.data, index, position + 1) def move_indices(self, labels, position, preserve_relative_order=False): if not isinstance(labels, list): labels = [labels] if preserve_relative_order: orig_labels = self.labels.copy() n_indices_to_move = 0 for label in orig_labels: if label in labels: self.move_index(label, len(self.labels) - 1) n_indices_to_move += 1 else: unique_labels = [] for label in labels: if label not in unique_labels: unique_labels.append(label) labels = unique_labels n_indices_to_move = 0 for label in labels: for i in range(self.labels.count(label)): self.move_index(label, len(self.labels) - 1) n_indices_to_move += 1 if position + n_indices_to_move > len(self.labels): raise ValueError("Specified position too far right.") for j in range(n_indices_to_move): old_index = len(self.labels) - n_indices_to_move + j label = self.labels[old_index] self.labels.pop(old_index) self.labels.insert(position + j, label) self.data = np.rollaxis(self.data, old_index, position + j) def conjugate(self): self.data = self.data.conjugate() def inv(self): self.data = np.linalg.inv(self.data) def add_suffix_to_labels(self, suffix): new_labels = [] for label in self.labels: new_labels.append(label + suffix) self.labels = new_labels def suf(self, suffix): t=self.copy() t.labels=[x+suffix for x in t.labels] return t def add_dummy_index(self, label, position=0): self.data = self.data[np.newaxis, :] self.labels.insert(0, label) self.move_index(label, position) def remove_all_dummy_indices(self, labels=None): orig_shape = self.shape for i, x in enumerate(self.labels): if labels != None: if x in labels and orig_shape[i] == 1: self.move_index(x, 0) self.data = self.data[0] self.labels = self.labels[1:] elif orig_shape[i] == 1: self.move_index(x, 0) self.data = self.data[0] self.labels = self.labels[1:]
MIT License
olitheolix/aiokubernetes
aiokubernetes/models/v1beta2_daemon_set_status.py
V1beta2DaemonSetStatus.desired_number_scheduled
python
def desired_number_scheduled(self): return self._desired_number_scheduled
Gets the desired_number_scheduled of this V1beta2DaemonSetStatus. # noqa: E501 The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501 :return: The desired_number_scheduled of this V1beta2DaemonSetStatus. # noqa: E501 :rtype: int
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1beta2_daemon_set_status.py#L164-L172
import pprint import re from aiokubernetes.models.v1beta2_daemon_set_condition import V1beta2DaemonSetCondition class V1beta2DaemonSetStatus(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'collision_count': 'int', 'conditions': 'list[V1beta2DaemonSetCondition]', 'current_number_scheduled': 'int', 'desired_number_scheduled': 'int', 'number_available': 'int', 'number_misscheduled': 'int', 'number_ready': 'int', 'number_unavailable': 'int', 'observed_generation': 'int', 'updated_number_scheduled': 'int' } attribute_map = { 'collision_count': 'collisionCount', 'conditions': 'conditions', 'current_number_scheduled': 'currentNumberScheduled', 'desired_number_scheduled': 'desiredNumberScheduled', 'number_available': 'numberAvailable', 'number_misscheduled': 'numberMisscheduled', 'number_ready': 'numberReady', 'number_unavailable': 'numberUnavailable', 'observed_generation': 'observedGeneration', 'updated_number_scheduled': 'updatedNumberScheduled' } def __init__(self, collision_count=None, conditions=None, current_number_scheduled=None, desired_number_scheduled=None, number_available=None, number_misscheduled=None, number_ready=None, number_unavailable=None, observed_generation=None, updated_number_scheduled=None): self._collision_count = None self._conditions = None self._current_number_scheduled = None self._desired_number_scheduled = None self._number_available = None self._number_misscheduled = None self._number_ready = None self._number_unavailable = None self._observed_generation = None self._updated_number_scheduled = None self.discriminator = None if collision_count is not None: self.collision_count = collision_count if conditions is not None: self.conditions = conditions self.current_number_scheduled = current_number_scheduled self.desired_number_scheduled = desired_number_scheduled if number_available is not None: self.number_available = number_available self.number_misscheduled = number_misscheduled self.number_ready = number_ready if number_unavailable is not None: self.number_unavailable = number_unavailable if observed_generation is not None: self.observed_generation = observed_generation if updated_number_scheduled is not None: self.updated_number_scheduled = updated_number_scheduled @property def collision_count(self): return self._collision_count @collision_count.setter def collision_count(self, collision_count): self._collision_count = collision_count @property def conditions(self): return self._conditions @conditions.setter def conditions(self, conditions): self._conditions = conditions @property def current_number_scheduled(self): return self._current_number_scheduled @current_number_scheduled.setter def current_number_scheduled(self, current_number_scheduled): if current_number_scheduled is None: raise ValueError("Invalid value for `current_number_scheduled`, must not be `None`") self._current_number_scheduled = current_number_scheduled @property
Apache License 2.0
digital-concrete/light-sync
phue_lib.py
Light.effect
python
def effect(self): self._effect = self._get('effect') return self._effect
Check the effect setting of the light. [none|colorloop]
https://github.com/digital-concrete/light-sync/blob/b2f8405971b6204f4d43f5a63ae91381462913f2/phue_lib.py#L268-L271
import json import logging import os import platform import sys import socket if sys.version_info[0] > 2: PY3K = True else: PY3K = False if PY3K: import http.client as httplib else: import httplib logger = logging.getLogger('phue') if platform.system() == 'Windows': USER_HOME = 'USERPROFILE' else: USER_HOME = 'HOME' __version__ = '1.1' def is_string(data): if PY3K: return isinstance(data, str) else: return isinstance(data, str) or isinstance(data, unicode) class PhueException(Exception): def __init__(self, id, message): self.id = id self.message = message class PhueRegistrationException(PhueException): pass class PhueRequestTimeout(PhueException): pass class Light(object): def __init__(self, bridge, light_id): self.bridge = bridge self.light_id = light_id self._name = None self._on = None self._brightness = None self._colormode = None self._hue = None self._saturation = None self._xy = None self._colortemp = None self._effect = None self._alert = None self.transitiontime = None self._reset_bri_after_on = None self._reachable = None self._type = None def __repr__(self): return '<{0}.{1} object "{2}" at {3}>'.format( self.__class__.__module__, self.__class__.__name__, self.name, hex(id(self))) def _get(self, *args, **kwargs): return self.bridge.get_light(self.light_id, *args, **kwargs) def _set(self, *args, **kwargs): if self.transitiontime is not None: kwargs['transitiontime'] = self.transitiontime logger.debug("Setting with transitiontime = {0} ds = {1} s".format( self.transitiontime, float(self.transitiontime) / 10)) if (args[0] == 'on' and args[1] is False) or ( kwargs.get('on', True) is False): self._reset_bri_after_on = True return self.bridge.set_light(self.light_id, *args, **kwargs) @property def name(self): if PY3K: self._name = self._get('name') else: self._name = self._get('name').encode('utf-8') return self._name @name.setter def name(self, value): old_name = self.name self._name = value self._set('name', self._name) logger.debug("Renaming light from '{0}' to '{1}'".format( old_name, value)) self.bridge.lights_by_name[self.name] = self del self.bridge.lights_by_name[old_name] @property def on(self): self._on = self._get('on') return self._on @on.setter def on(self, value): if self._on and value is False: self._reset_bri_after_on = self.transitiontime is not None if self._reset_bri_after_on: logger.warning( 'Turned off light with transitiontime specified, brightness will be reset on power on') self._set('on', value) if self._on is False and value is True: if self._reset_bri_after_on: logger.warning( 'Light was turned off with transitiontime specified, brightness needs to be reset now.') self.brightness = self._brightness self._reset_bri_after_on = False self._on = value @property def colormode(self): self._colormode = self._get('colormode') return self._colormode @property def brightness(self): self._brightness = self._get('bri') return self._brightness @brightness.setter def brightness(self, value): self._brightness = value self._set('bri', self._brightness) @property def hue(self): self._hue = self._get('hue') return self._hue @hue.setter def hue(self, value): self._hue = int(value) self._set('hue', self._hue) @property def saturation(self): self._saturation = self._get('sat') return self._saturation @saturation.setter def saturation(self, value): self._saturation = value self._set('sat', self._saturation) @property def xy(self): self._xy = self._get('xy') return self._xy @xy.setter def xy(self, value): self._xy = value self._set('xy', self._xy) @property def colortemp(self): self._colortemp = self._get('ct') return self._colortemp @colortemp.setter def colortemp(self, value): if value < 154: logger.warn('154 mireds is coolest allowed color temp') elif value > 500: logger.warn('500 mireds is warmest allowed color temp') self._colortemp = value self._set('ct', self._colortemp) @property def colortemp_k(self): self._colortemp = self._get('ct') return int(round(1e6 / self._colortemp)) @colortemp_k.setter def colortemp_k(self, value): if value > 6500: logger.warn('6500 K is max allowed color temp') value = 6500 elif value < 2000: logger.warn('2000 K is min allowed color temp') value = 2000 colortemp_mireds = int(round(1e6 / value)) logger.debug("{0:d} K is {1} mireds".format(value, colortemp_mireds)) self.colortemp = colortemp_mireds @property
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/statistics_per_stream.py
StatisticsPerStream.dolby_vision_mode
python
def dolby_vision_mode(self): return self._dolby_vision_mode
Gets the dolby_vision_mode of this StatisticsPerStream. :return: The dolby_vision_mode of this StatisticsPerStream. :rtype: DolbyVisionPerStreamMode
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/statistics_per_stream.py#L763-L771
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.codec_config_type import CodecConfigType from bitmovin_api_sdk.models.dolby_vision_per_stream_mode import DolbyVisionPerStreamMode from bitmovin_api_sdk.models.encoding_mode import EncodingMode from bitmovin_api_sdk.models.input_factor import InputFactor from bitmovin_api_sdk.models.nex_guard_ab_watermarking_feature import NexGuardABWatermarkingFeature from bitmovin_api_sdk.models.pixel_format_bit_depth import PixelFormatBitDepth from bitmovin_api_sdk.models.psnr_per_stream_mode import PsnrPerStreamMode from bitmovin_api_sdk.models.statistics_per_title_stream import StatisticsPerTitleStream from bitmovin_api_sdk.models.statistics_resolution import StatisticsResolution import pprint import six class StatisticsPerStream(object): @poscheck_model def __init__(self, stream_id=None, codec_config_id=None, multiplicator=None, encoded_bytes=None, encoded_seconds=None, billable_minutes=None, width=None, height=None, rate=None, bitrate=None, codec=None, resolution=None, encoding_mode=None, encoding_mode_multiplicator=None, per_title_result_stream=None, per_title_multiplicator=None, psnr_mode=None, psnr_multiplicator=None, dolby_vision_mode=None, dolby_vision_multiplicator=None, preset=None, preset_multiplicator=None, live=None, live_multiplicator=None, enhanced_deinterlace=None, enhanced_deinterlace_multiplicator=None, dolby_vision_to_hdr=None, dolby_vision_to_hdr_multiplicator=None, dolby_vision_to_sdr=None, dolby_vision_to_sdr_multiplicator=None, nex_guard_ab_watermarking_type=None, nex_guard_ab_watermarking_multiplicator=None, pixel_format_bit_depth=None, pixel_format_multiplicator=None, input_factor=None): self._stream_id = None self._codec_config_id = None self._multiplicator = None self._encoded_bytes = None self._encoded_seconds = None self._billable_minutes = None self._width = None self._height = None self._rate = None self._bitrate = None self._codec = None self._resolution = None self._encoding_mode = None self._encoding_mode_multiplicator = None self._per_title_result_stream = None self._per_title_multiplicator = None self._psnr_mode = None self._psnr_multiplicator = None self._dolby_vision_mode = None self._dolby_vision_multiplicator = None self._preset = None self._preset_multiplicator = None self._live = None self._live_multiplicator = None self._enhanced_deinterlace = None self._enhanced_deinterlace_multiplicator = None self._dolby_vision_to_hdr = None self._dolby_vision_to_hdr_multiplicator = None self._dolby_vision_to_sdr = None self._dolby_vision_to_sdr_multiplicator = None self._nex_guard_ab_watermarking_type = None self._nex_guard_ab_watermarking_multiplicator = None self._pixel_format_bit_depth = None self._pixel_format_multiplicator = None self._input_factor = None self.discriminator = None if stream_id is not None: self.stream_id = stream_id if codec_config_id is not None: self.codec_config_id = codec_config_id if multiplicator is not None: self.multiplicator = multiplicator if encoded_bytes is not None: self.encoded_bytes = encoded_bytes if encoded_seconds is not None: self.encoded_seconds = encoded_seconds if billable_minutes is not None: self.billable_minutes = billable_minutes if width is not None: self.width = width if height is not None: self.height = height if rate is not None: self.rate = rate if bitrate is not None: self.bitrate = bitrate if codec is not None: self.codec = codec if resolution is not None: self.resolution = resolution if encoding_mode is not None: self.encoding_mode = encoding_mode if encoding_mode_multiplicator is not None: self.encoding_mode_multiplicator = encoding_mode_multiplicator if per_title_result_stream is not None: self.per_title_result_stream = per_title_result_stream if per_title_multiplicator is not None: self.per_title_multiplicator = per_title_multiplicator if psnr_mode is not None: self.psnr_mode = psnr_mode if psnr_multiplicator is not None: self.psnr_multiplicator = psnr_multiplicator if dolby_vision_mode is not None: self.dolby_vision_mode = dolby_vision_mode if dolby_vision_multiplicator is not None: self.dolby_vision_multiplicator = dolby_vision_multiplicator if preset is not None: self.preset = preset if preset_multiplicator is not None: self.preset_multiplicator = preset_multiplicator if live is not None: self.live = live if live_multiplicator is not None: self.live_multiplicator = live_multiplicator if enhanced_deinterlace is not None: self.enhanced_deinterlace = enhanced_deinterlace if enhanced_deinterlace_multiplicator is not None: self.enhanced_deinterlace_multiplicator = enhanced_deinterlace_multiplicator if dolby_vision_to_hdr is not None: self.dolby_vision_to_hdr = dolby_vision_to_hdr if dolby_vision_to_hdr_multiplicator is not None: self.dolby_vision_to_hdr_multiplicator = dolby_vision_to_hdr_multiplicator if dolby_vision_to_sdr is not None: self.dolby_vision_to_sdr = dolby_vision_to_sdr if dolby_vision_to_sdr_multiplicator is not None: self.dolby_vision_to_sdr_multiplicator = dolby_vision_to_sdr_multiplicator if nex_guard_ab_watermarking_type is not None: self.nex_guard_ab_watermarking_type = nex_guard_ab_watermarking_type if nex_guard_ab_watermarking_multiplicator is not None: self.nex_guard_ab_watermarking_multiplicator = nex_guard_ab_watermarking_multiplicator if pixel_format_bit_depth is not None: self.pixel_format_bit_depth = pixel_format_bit_depth if pixel_format_multiplicator is not None: self.pixel_format_multiplicator = pixel_format_multiplicator if input_factor is not None: self.input_factor = input_factor @property def openapi_types(self): types = { 'stream_id': 'string_types', 'codec_config_id': 'string_types', 'multiplicator': 'float', 'encoded_bytes': 'int', 'encoded_seconds': 'float', 'billable_minutes': 'float', 'width': 'int', 'height': 'int', 'rate': 'float', 'bitrate': 'int', 'codec': 'CodecConfigType', 'resolution': 'StatisticsResolution', 'encoding_mode': 'EncodingMode', 'encoding_mode_multiplicator': 'float', 'per_title_result_stream': 'StatisticsPerTitleStream', 'per_title_multiplicator': 'float', 'psnr_mode': 'PsnrPerStreamMode', 'psnr_multiplicator': 'float', 'dolby_vision_mode': 'DolbyVisionPerStreamMode', 'dolby_vision_multiplicator': 'float', 'preset': 'string_types', 'preset_multiplicator': 'float', 'live': 'bool', 'live_multiplicator': 'float', 'enhanced_deinterlace': 'bool', 'enhanced_deinterlace_multiplicator': 'float', 'dolby_vision_to_hdr': 'bool', 'dolby_vision_to_hdr_multiplicator': 'float', 'dolby_vision_to_sdr': 'bool', 'dolby_vision_to_sdr_multiplicator': 'float', 'nex_guard_ab_watermarking_type': 'NexGuardABWatermarkingFeature', 'nex_guard_ab_watermarking_multiplicator': 'float', 'pixel_format_bit_depth': 'PixelFormatBitDepth', 'pixel_format_multiplicator': 'float', 'input_factor': 'InputFactor' } return types @property def attribute_map(self): attributes = { 'stream_id': 'streamId', 'codec_config_id': 'codecConfigId', 'multiplicator': 'multiplicator', 'encoded_bytes': 'encodedBytes', 'encoded_seconds': 'encodedSeconds', 'billable_minutes': 'billableMinutes', 'width': 'width', 'height': 'height', 'rate': 'rate', 'bitrate': 'bitrate', 'codec': 'codec', 'resolution': 'resolution', 'encoding_mode': 'encodingMode', 'encoding_mode_multiplicator': 'encodingModeMultiplicator', 'per_title_result_stream': 'perTitleResultStream', 'per_title_multiplicator': 'perTitleMultiplicator', 'psnr_mode': 'psnrMode', 'psnr_multiplicator': 'psnrMultiplicator', 'dolby_vision_mode': 'dolbyVisionMode', 'dolby_vision_multiplicator': 'dolbyVisionMultiplicator', 'preset': 'preset', 'preset_multiplicator': 'presetMultiplicator', 'live': 'live', 'live_multiplicator': 'liveMultiplicator', 'enhanced_deinterlace': 'enhancedDeinterlace', 'enhanced_deinterlace_multiplicator': 'enhancedDeinterlaceMultiplicator', 'dolby_vision_to_hdr': 'dolbyVisionToHdr', 'dolby_vision_to_hdr_multiplicator': 'dolbyVisionToHdrMultiplicator', 'dolby_vision_to_sdr': 'dolbyVisionToSdr', 'dolby_vision_to_sdr_multiplicator': 'dolbyVisionToSdrMultiplicator', 'nex_guard_ab_watermarking_type': 'nexGuardABWatermarkingType', 'nex_guard_ab_watermarking_multiplicator': 'nexGuardABWatermarkingMultiplicator', 'pixel_format_bit_depth': 'pixelFormatBitDepth', 'pixel_format_multiplicator': 'pixelFormatMultiplicator', 'input_factor': 'inputFactor' } return attributes @property def stream_id(self): return self._stream_id @stream_id.setter def stream_id(self, stream_id): if stream_id is not None: if not isinstance(stream_id, string_types): raise TypeError("Invalid type for `stream_id`, type has to be `string_types`") self._stream_id = stream_id @property def codec_config_id(self): return self._codec_config_id @codec_config_id.setter def codec_config_id(self, codec_config_id): if codec_config_id is not None: if not isinstance(codec_config_id, string_types): raise TypeError("Invalid type for `codec_config_id`, type has to be `string_types`") self._codec_config_id = codec_config_id @property def multiplicator(self): return self._multiplicator @multiplicator.setter def multiplicator(self, multiplicator): if multiplicator is not None: if not isinstance(multiplicator, (float, int)): raise TypeError("Invalid type for `multiplicator`, type has to be `float`") self._multiplicator = multiplicator @property def encoded_bytes(self): return self._encoded_bytes @encoded_bytes.setter def encoded_bytes(self, encoded_bytes): if encoded_bytes is not None: if not isinstance(encoded_bytes, int): raise TypeError("Invalid type for `encoded_bytes`, type has to be `int`") self._encoded_bytes = encoded_bytes @property def encoded_seconds(self): return self._encoded_seconds @encoded_seconds.setter def encoded_seconds(self, encoded_seconds): if encoded_seconds is not None: if not isinstance(encoded_seconds, (float, int)): raise TypeError("Invalid type for `encoded_seconds`, type has to be `float`") self._encoded_seconds = encoded_seconds @property def billable_minutes(self): return self._billable_minutes @billable_minutes.setter def billable_minutes(self, billable_minutes): if billable_minutes is not None: if not isinstance(billable_minutes, (float, int)): raise TypeError("Invalid type for `billable_minutes`, type has to be `float`") self._billable_minutes = billable_minutes @property def width(self): return self._width @width.setter def width(self, width): if width is not None: if not isinstance(width, int): raise TypeError("Invalid type for `width`, type has to be `int`") self._width = width @property def height(self): return self._height @height.setter def height(self, height): if height is not None: if not isinstance(height, int): raise TypeError("Invalid type for `height`, type has to be `int`") self._height = height @property def rate(self): return self._rate @rate.setter def rate(self, rate): if rate is not None: if not isinstance(rate, (float, int)): raise TypeError("Invalid type for `rate`, type has to be `float`") self._rate = rate @property def bitrate(self): return self._bitrate @bitrate.setter def bitrate(self, bitrate): if bitrate is not None: if not isinstance(bitrate, int): raise TypeError("Invalid type for `bitrate`, type has to be `int`") self._bitrate = bitrate @property def codec(self): return self._codec @codec.setter def codec(self, codec): if codec is not None: if not isinstance(codec, CodecConfigType): raise TypeError("Invalid type for `codec`, type has to be `CodecConfigType`") self._codec = codec @property def resolution(self): return self._resolution @resolution.setter def resolution(self, resolution): if resolution is not None: if not isinstance(resolution, StatisticsResolution): raise TypeError("Invalid type for `resolution`, type has to be `StatisticsResolution`") self._resolution = resolution @property def encoding_mode(self): return self._encoding_mode @encoding_mode.setter def encoding_mode(self, encoding_mode): if encoding_mode is not None: if not isinstance(encoding_mode, EncodingMode): raise TypeError("Invalid type for `encoding_mode`, type has to be `EncodingMode`") self._encoding_mode = encoding_mode @property def encoding_mode_multiplicator(self): return self._encoding_mode_multiplicator @encoding_mode_multiplicator.setter def encoding_mode_multiplicator(self, encoding_mode_multiplicator): if encoding_mode_multiplicator is not None: if not isinstance(encoding_mode_multiplicator, (float, int)): raise TypeError("Invalid type for `encoding_mode_multiplicator`, type has to be `float`") self._encoding_mode_multiplicator = encoding_mode_multiplicator @property def per_title_result_stream(self): return self._per_title_result_stream @per_title_result_stream.setter def per_title_result_stream(self, per_title_result_stream): if per_title_result_stream is not None: if not isinstance(per_title_result_stream, StatisticsPerTitleStream): raise TypeError("Invalid type for `per_title_result_stream`, type has to be `StatisticsPerTitleStream`") self._per_title_result_stream = per_title_result_stream @property def per_title_multiplicator(self): return self._per_title_multiplicator @per_title_multiplicator.setter def per_title_multiplicator(self, per_title_multiplicator): if per_title_multiplicator is not None: if not isinstance(per_title_multiplicator, (float, int)): raise TypeError("Invalid type for `per_title_multiplicator`, type has to be `float`") self._per_title_multiplicator = per_title_multiplicator @property def psnr_mode(self): return self._psnr_mode @psnr_mode.setter def psnr_mode(self, psnr_mode): if psnr_mode is not None: if not isinstance(psnr_mode, PsnrPerStreamMode): raise TypeError("Invalid type for `psnr_mode`, type has to be `PsnrPerStreamMode`") self._psnr_mode = psnr_mode @property def psnr_multiplicator(self): return self._psnr_multiplicator @psnr_multiplicator.setter def psnr_multiplicator(self, psnr_multiplicator): if psnr_multiplicator is not None: if not isinstance(psnr_multiplicator, (float, int)): raise TypeError("Invalid type for `psnr_multiplicator`, type has to be `float`") self._psnr_multiplicator = psnr_multiplicator @property
MIT License
hyperledger/aries-cloudagent-python
aries_cloudagent/transport/outbound/manager.py
OutboundTransportManager.register_class
python
def register_class( self, transport_class: Type[BaseOutboundTransport], transport_id: str = None ) -> str: try: schemes = transport_class.schemes except AttributeError: raise OutboundTransportRegistrationError( f"Imported class {transport_class} does not " + "specify a required 'schemes' attribute" ) if not transport_id: transport_id = transport_class.__qualname__ for scheme in schemes: if scheme in self.registered_schemes: raise OutboundTransportRegistrationError( f"Cannot register transport '{transport_id}'" f"for '{scheme}' scheme because the scheme" "has already been registered" ) self.registered_transports[transport_id] = transport_class for scheme in schemes: self.registered_schemes[scheme] = transport_id return transport_id
Register a new outbound transport class. Args: transport_class: Transport class to register Raises: OutboundTransportRegistrationError: If the imported class does not specify a schemes attribute OutboundTransportRegistrationError: If the scheme has already been registered
https://github.com/hyperledger/aries-cloudagent-python/blob/fec69f1a2301e4745fc9d40cea190050e3f595fa/aries_cloudagent/transport/outbound/manager.py#L130-L170
import asyncio import json import logging import time from typing import Callable, Type, Union from urllib.parse import urlparse from ...connections.models.connection_target import ConnectionTarget from ...core.profile import Profile from ...utils.classloader import ClassLoader, ModuleLoadError, ClassNotFoundError from ...utils.stats import Collector from ...utils.task_queue import CompletedTask, TaskQueue, task_exc_info from ...utils.tracing import trace_event, get_timer from ..wire_format import BaseWireFormat from .base import ( BaseOutboundTransport, OutboundDeliveryError, OutboundTransportRegistrationError, ) from .message import OutboundMessage LOGGER = logging.getLogger(__name__) MODULE_BASE_PATH = "aries_cloudagent.transport.outbound" class QueuedOutboundMessage: STATE_NEW = "new" STATE_PENDING = "pending" STATE_ENCODE = "encode" STATE_DELIVER = "deliver" STATE_RETRY = "retry" STATE_DONE = "done" def __init__( self, profile: Profile, message: OutboundMessage, target: ConnectionTarget, transport_id: str, ): self.profile = profile self.endpoint = target and target.endpoint self.error: Exception = None self.message = message self.payload: Union[str, bytes] = None self.retries = None self.retry_at: float = None self.state = self.STATE_NEW self.target = target self.task: asyncio.Task = None self.transport_id: str = transport_id self.metadata: dict = None self.api_key: str = None class OutboundTransportManager: MAX_RETRY_COUNT = 4 def __init__(self, profile: Profile, handle_not_delivered: Callable = None): self.root_profile = profile self.loop = asyncio.get_event_loop() self.handle_not_delivered = handle_not_delivered self.outbound_buffer = [] self.outbound_event = asyncio.Event() self.outbound_new = [] self.registered_schemes = {} self.registered_transports = {} self.running_transports = {} self.task_queue = TaskQueue(max_active=200) self._process_task: asyncio.Task = None if self.root_profile.settings.get("transport.max_outbound_retry"): self.MAX_RETRY_COUNT = self.root_profile.settings[ "transport.max_outbound_retry" ] async def setup(self): outbound_transports = ( self.root_profile.settings.get("transport.outbound_configs") or [] ) for outbound_transport in outbound_transports: self.register(outbound_transport) def register(self, module: str) -> str: try: imported_class = ClassLoader.load_subclass_of( BaseOutboundTransport, module, MODULE_BASE_PATH ) except (ModuleLoadError, ClassNotFoundError): raise OutboundTransportRegistrationError( f"Outbound transport module {module} could not be resolved." ) return self.register_class(imported_class)
Apache License 2.0
yadage/yadage
yadage/controllers.py
PersistentController.submit_nodes
python
def submit_nodes(self, nodeids): log.debug("transaction to submit") with self.transaction(): nodes = [self.adageobj.dag.getNode(nodeid) for nodeid in nodeids] super(PersistentController, self).submit_nodes(nodes)
submit nodes to backend :param nodeids: list of ids of nodes to be submitted :return: None
https://github.com/yadage/yadage/blob/cbb26515f02265800b4f5156e6c099211121d6a8/yadage/controllers.py#L117-L128
import contextlib import importlib import logging import os from adage.wflowcontroller import BaseController from packtivity.syncbackends import defaultsyncbackend from .reset import collective_downstream, remove_rules, reset_steps, undo_rules from .wflow import YadageWorkflow from .handlers.utils import handler_decorator log = logging.getLogger(__name__) ctrlhandlers, controller = handler_decorator() class YadageController(BaseController): def __init__(self, *args, **kwargs): self.prepublishing_backend = defaultsyncbackend() self.disable_backend = False self.disable_prepublishing = kwargs.pop("disable_prepub", False) super(YadageController, self).__init__(*args, **kwargs) def sync_expected(self): for n in self.adageobj.dag.nodes(): if ( "YADAGE_IGNORE_PREPUBLISHING" in os.environ or self.disable_prepublishing ): continue node = self.adageobj.dag.getNode(n) node.expected_result = self.prepublishing_backend.prepublish( node.task.spec, node.task.parameters.json(), node.task.state ) def sync_backend(self): self.sync_expected() if not self.disable_backend: super(YadageController, self).sync_backend() @controller("frommodel") def frommodel_controller(ctrlstring, ctrlopts, model=None): if isinstance(model, YadageWorkflow): return YadageController(model, **ctrlopts) else: return PersistentController(model, **ctrlopts) @controller("http") def http_controller(ctrlstring, ctrlopts, model=None): try: from yadagehttpctrl.clientcontroller import YadageHTTPController ctrl = YadageHTTPController(server=ctrlstring, **ctrlopts) return ctrl except ImportError: log.exception("try installing yadagehttpctrl") @controller("py:") def frompython_controller(ctrlstring, ctrlopts, model=None): _, module, ctrlclass = ctrlstring.split(":") module = importlib.import_module(module) ctrlclass = getattr(module, ctrlclass) if ctrlopts.pop("pass_model", False): ctrlopts["model"] = model return ctrlclass(**ctrlopts) def setup_controller(model=None, controller="frommodel", ctrlopts=None): ctrlopts = ctrlopts or {} for k in ctrlhandlers.keys(): if controller.startswith(k): return ctrlhandlers[k](controller, ctrlopts, model) raise RuntimeError("unknown controller type %s" % controller) class PersistentController(YadageController): def __init__(self, model, backend=None): self.model = model super(PersistentController, self).__init__(self.model.load(), backend) @contextlib.contextmanager def transaction(self, sync=True): self.adageobj = self.model.load() if sync: log.debug("syncing to setup tx %s", self) super(PersistentController, self).sync_backend() yield isvalid = self.validate() if not isvalid: log.warning("commit is invalid %s", isvalid) if sync: log.debug("syncing to teardown tx %s", self) super(PersistentController, self).sync_backend() self.model.commit(self.adageobj)
MIT License
databand-ai/dbnd
modules/dbnd/src/dbnd/_vendor/pendulum/date.py
Date.diff_formatter
python
def diff_formatter(self): if not self.__class__._diff_formatter: self.__class__._diff_formatter = DifferenceFormatter(self.__class__.translator()) return self.__class__._diff_formatter
Returns a DifferenceFormatter instance. :rtype: DifferenceFormatter
https://github.com/databand-ai/dbnd/blob/ec0076f9a142b20e2f7afd886ed1a18683c553ec/modules/dbnd/src/dbnd/_vendor/pendulum/date.py#L616-L625
from __future__ import division import calendar import math from datetime import date, timedelta from dateutil.relativedelta import relativedelta from .period import Period from .formatting.difference_formatter import DifferenceFormatter from .mixins.default import TranslatableMixin, FormattableMixing, TestableMixin from .constants import ( DAYS_PER_WEEK, YEARS_PER_DECADE, YEARS_PER_CENTURY, MONTHS_PER_YEAR, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY ) from .exceptions import PendulumException class Date(TranslatableMixin, FormattableMixing, TestableMixin, date): _days = { SUNDAY: 'Sunday', MONDAY: 'Monday', TUESDAY: 'Tuesday', WEDNESDAY: 'Wednesday', THURSDAY: 'Thursday', FRIDAY: 'Friday', SATURDAY: 'Saturday' } _week_starts_at = MONDAY _week_ends_at = SUNDAY _weekend_days = [ SATURDAY, SUNDAY ] _MODIFIERS_VALID_UNITS = ['day', 'week', 'month', 'year', 'decade', 'century'] _diff_formatter = None @classmethod def instance(cls, dt): return cls(dt.year, dt.month, dt.day) @classmethod def create(cls, year=None, month=None, day=None): if any([year is None, month is None, day is None]): now = date.today() if year is None: year = now.year if month is None: month = now.month if day is None: day = now.day return cls(year, month, day) @classmethod def today(cls, tz=None): if cls.has_test_now(): return cls.get_test_now() return cls.create() @classmethod def yesterday(cls): return cls.today().subtract(days=1) @classmethod def tomorrow(cls): return cls.today().add(days=1) def year_(self, year): return self._setter(year=year) def month_(self, month): return self._setter(month=month) def day_(self, day): return self._setter(day=day) def _setter(self, **kwargs): return self.replace(**kwargs) @property def day_of_week(self): return self.isoweekday() % 7 @property def day_of_year(self): k = 1 if self.is_leap_year() else 2 return ( (275 * self.month) // 9 - k * ((self.month + 9) // 12) + self.day - 30 ) @property def week_of_year(self): return self.isocalendar()[1] @property def days_in_month(self): return calendar.monthrange(self.year, self.month)[1] @property def week_of_month(self): return int(math.ceil(self.day / DAYS_PER_WEEK)) @property def age(self): return self.diff().in_years() @property def quarter(self): return int(math.ceil(self.month / 3)) @classmethod def get_week_starts_at(cls): return cls._week_starts_at @classmethod def set_week_starts_at(cls, value): if value not in cls._days: raise ValueError('Invalid day of the week: {}'.format(value)) cls._week_starts_at = value @classmethod def get_week_ends_at(cls): return cls._week_ends_at @classmethod def set_week_ends_at(cls, value): if value not in cls._days: raise ValueError('Invalid day of the week: {}'.format(value)) cls._week_ends_at = value @classmethod def get_weekend_days(cls): return cls._weekend_days @classmethod def set_weekend_days(cls, values): for value in values: if value not in cls._days: raise ValueError('Invalid day of the week: {}' .format(value)) cls._weekend_days = values def to_date_string(self): return self.format('%Y-%m-%d', formatter='classic') def to_formatted_date_string(self): return self.format('%b %d, %Y', formatter='classic') def between(self, dt1, dt2, equal=True): if dt1 > dt2: dt1, dt2 = dt2, dt1 if equal: return self >= dt1 and self <= dt2 return self > dt1 and self < dt2 def closest(self, dt1, dt2): dt1 = self._get_date(dt1, True) dt2 = self._get_date(dt2, True) if self.diff(dt1).in_seconds() < self.diff(dt2).in_seconds(): return dt1 return dt2 def farthest(self, dt1, dt2): dt1 = self._get_date(dt1, True) dt2 = self._get_date(dt2, True) if self.diff(dt1).in_seconds() > self.diff(dt2).in_seconds(): return dt1 return dt2 def min_(self, dt=None): if dt is None: dt = Date.today() if self < dt: return self return self._get_date(dt, True) def minimum(self, dt=None): return self.min_(dt) def max_(self, dt=None): if dt is None: dt = Date.today() if self > dt: return self return self._get_date(dt, True) def maximum(self, dt=None): return self.max_(dt) def is_weekday(self): return not self.is_weekend() def is_weekend(self): return self.day_of_week in self._weekend_days def is_yesterday(self): return self == self.yesterday() def is_today(self): return self == self.today() def is_tomorrow(self): return self == self.tomorrow() def is_future(self): return self > self.today() def is_past(self): return self < self.today() def is_leap_year(self): return calendar.isleap(self.year) def is_long_year(self): return Date(self.year, 12, 28).isocalendar()[1] == 53 def is_same_day(self, dt): return self == dt def is_sunday(self): return self.day_of_week == SUNDAY def is_monday(self): return self.day_of_week == MONDAY def is_tuesday(self): return self.day_of_week == TUESDAY def is_wednesday(self): return self.day_of_week == WEDNESDAY def is_thursday(self): return self.day_of_week == THURSDAY def is_friday(self): return self.day_of_week == FRIDAY def is_saturday(self): return self.day_of_week == SATURDAY def is_birthday(self, dt=None): if dt is None: dt = Date.today() instance = self._get_date(dt, True) return (self.month, self.day) == (instance.month, instance.day) def add(self, years=0, months=0, weeks=0, days=0): delta = relativedelta( years=years, months=months, weeks=weeks, days=days, ) return self.instance(date(self.year, self.month, self.day) + delta) def subtract(self, years=0, months=0, weeks=0, days=0): delta = relativedelta( years=years, months=months, weeks=weeks, days=days ) return self.instance(date(self.year, self.month, self.day) - delta) def add_timedelta(self, delta): return self.add(days=delta.days) def subtract_timedelta(self, delta): return self.subtract(days=delta.days) def __add__(self, other): if not isinstance(other, timedelta): return NotImplemented return self.add_timedelta(other) def __sub__(self, other): if isinstance(other, timedelta): return self.subtract_timedelta(other) try: return self._get_date(other, True).diff(self, False) except ValueError: return NotImplemented @property
Apache License 2.0
rdflib/pyldapi
pyldapi/renderer_container.py
ContainerOfContainersRenderer.__init__
python
def __init__(self, request, instance_uri, label, comment, profiles, cofc_file_path, default_profile_token='mem'): super(ContainerOfContainersRenderer, self).__init__( request, instance_uri, label, comment, None, None, [], 0, profiles=profiles, default_profile_token=default_profile_token, ) self.members = [] try: with open(cofc_file_path, 'rb') as file: g = Graph().parse(file=file, format='turtle') assert g, "Could not parse the CofC RDF file." except FileNotFoundError: raise CofCTtlError() except AssertionError: raise CofCTtlError() q = ''' SELECT ?uri ?label WHERE {{ # the URIs and labels of all the things of type rdf:Bag that are within (rdfs:member) the CofC ?uri a rdf:Bag ; rdfs:label ?label . <{register_uri}> rdfs:member ?uri . }} '''.format(**{'register_uri': instance_uri}) for r in g.query(q): self.members.append((r['uri'], r['label'])) self.register_total_count = len(self.members)
Constructor :param request: The Flask request object triggering this class object's creation. :type request: :class:`flask.request` :param instance_uri: The URI requested. :type instance_uri: str :param label: The label of the Register. :type label: str :param comment: A description of the Register. :type comment: str :param cofc_file_path: The path to the Register of Registers RDF file (used in API setup). :type cofc_file_path: str
https://github.com/rdflib/pyldapi/blob/6e56f901ee264737ee3ab97fc947f7b4bbcff6a4/pyldapi/renderer_container.py#L344-L395
from pathlib import Path from fastapi import Response from fastapi.responses import JSONResponse from fastapi.templating import Jinja2Templates from rdflib import Graph, Namespace, URIRef, Literal, RDF, RDFS from pyldapi.renderer import Renderer from pyldapi.profile import Profile from pyldapi.exceptions import ProfilesMediatypesException, CofCTtlError from .data import RDF_MEDIATYPES, MEDIATYPE_NAMES templates = Jinja2Templates(directory="templates") class ContainerRenderer(Renderer): DEFAULT_ITEMS_PER_PAGE = 100 def __init__(self, request, instance_uri, label, comment, parent_container_uri, parent_container_label, members, members_total_count, *args, profiles=None, default_profile_token=None, super_register=None, page_size_max=1000): self.instance_uri = instance_uri if profiles is None: profiles = {} for k, v in profiles.items(): if k == 'mem': raise ProfilesMediatypesException( 'You must not manually add a profile with token \'mem\' as this is auto-created' ) profiles.update({ 'mem': Profile( 'https://w3id.org/profile/mem', 'Members Profile', 'A very basic RDF data model-only profile that lists the sub-items (members) of collections (rdf:Bag)', ['text/html'] + RDF_MEDIATYPES, 'text/html' ) }) if default_profile_token is None: default_profile_token = 'mem' super(ContainerRenderer, self).__init__( request, instance_uri, profiles, default_profile_token ) if self.vf_error is None: self.label = label self.comment = comment self.parent_container_uri = parent_container_uri self.parent_container_label = parent_container_label if members is not None: self.members = members else: self.members = [] self.members_total_count = members_total_count if request.query_params.get("per_page"): self.per_page = int(request.query_params.get("per_page")) else: self.per_page = ContainerRenderer.DEFAULT_ITEMS_PER_PAGE if request.query_params.get("page"): self.page = int(request.query_params.get("page")) else: self.page = 1 self.super_register = super_register self.page_size_max = page_size_max self.paging_error = self._paging() def _paging(self): self.last_page = int(round(self.members_total_count / self.per_page, 0)) + 1 if self.page > self.last_page: return 'You must enter either no value for page or an integer <= {} which is the last page number.' .format(self.last_page) if self.per_page > self.page_size_max: return 'You must choose a page size <= {}'.format(self.page_size_max) links = list() links.append('<http://www.w3.org/ns/ldp#Resource>; rel="type"') links.append('<http://www.w3.org/ns/ldp#Page>; rel="type"') other_qsas = [x + "=" + self.request.query_params[x] for x in self.request.query_params if x not in ["page", "per_page"]] if len(other_qsas) > 0: other_qsas_str = "&".join(other_qsas) + "&" else: other_qsas_str = '' self.first_page = 1 links.append( '<{}?{}per_page={}&page=1>; rel="first"'.format( self.instance_uri, other_qsas_str, self.per_page ) ) if self.page > 1: self.prev_page = self.page - 1 links.append('<{}?per_page={}&page={}>; rel="prev"'.format( self.instance_uri, self.per_page, self.prev_page )) else: self.prev_page = None if self.page < self.last_page: self.next_page = self.page + 1 links.append( '<{}?{}per_page={}&page={}>; rel="next"'.format( self.instance_uri, other_qsas_str, self.per_page, self.next_page ) ) else: self.next_page = None links.append( '<{}?{}per_page={}&page={}>; rel="last"'.format( self.instance_uri, other_qsas_str, self.per_page, self.last_page ) ) self.headers['Link'] += ', ' + ', '.join(links) return None def render( self, additional_alt_template_context=None, alt_template_context_replace=False, additional_mem_template_context=None, mem_template_context_replace=False ): response = super(ContainerRenderer, self).render( additional_alt_template_context=additional_alt_template_context, alt_template_context_replace=alt_template_context_replace ) if response is None and self.profile == 'mem': if self.paging_error is None: if self.mediatype == 'text/html': return self._render_mem_profile_html( additional_mem_template_context, mem_template_context_replace ) elif self.mediatype in RDF_MEDIATYPES: return self._render_mem_profile_rdf() else: return self._render_mem_profile_json() else: return Response(self.paging_error, status_code=400, media_type='text/plain') return response def _render_mem_profile_html( self, additional_mem_template_context=None, mem_template_context_replace=False ): _template_context = { 'uri': self.instance_uri, 'label': self.label, 'comment': self.comment, 'parent_container_uri': self.parent_container_uri, 'parent_container_label': self.parent_container_label, 'members': self.members, 'page': self.page, 'per_page': self.per_page, 'first_page': self.first_page, 'prev_page': self.prev_page, 'next_page': self.next_page, 'last_page': self.last_page, 'mediatype_names': MEDIATYPE_NAMES, 'request': self.request } if additional_mem_template_context is not None and isinstance(additional_mem_template_context, dict): if mem_template_context_replace: _template_context = additional_mem_template_context else: _template_context.update(additional_mem_template_context) return templates.TemplateResponse("mem.html", context=_template_context, headers=self.headers) def _generate_mem_profile_rdf(self): g = Graph() LDP = Namespace('http://www.w3.org/ns/ldp#') g.bind('ldp', LDP) XHV = Namespace('https://www.w3.org/1999/xhtml/vocab#') g.bind('xhv', XHV) u = URIRef(self.instance_uri) g.add((u, RDF.type, RDF.Bag)) g.add((u, RDFS.label, Literal(self.label))) g.add((u, RDFS.comment, Literal(self.comment, lang='en'))) for member in self.members: if "uri" in member: member_uri = URIRef(member["uri"]) g.add((u, RDFS.member, member_uri)) g.add((member_uri, RDFS.label, Literal(member["title"]))) elif isinstance(member, tuple): member_uri = URIRef(member[0]) g.add((u, RDFS.member, member_uri)) g.add((member_uri, RDFS.label, Literal(member[1]))) else: g.add((u, RDFS.member, URIRef(member))) other_qsas = [x + "=" + self.request.query_params[x] for x in self.request.query_params if x not in ["page", "per_page"]] if len(other_qsas) > 0: other_qsas_str = "&".join(other_qsas) + "&" else: other_qsas_str = '' page_uri_str = "{}?{}per_page={}&page={}".format(self.instance_uri, other_qsas_str, self.per_page, self.page) page_uri_str_nonum = "{}?{}per_page={}&page=".format(self.instance_uri, other_qsas_str, self.per_page) page_uri = URIRef(page_uri_str) g.add((page_uri, RDF.type, LDP.Page)) g.add((page_uri, LDP.pageOf, u)) g.add((page_uri, XHV.first, URIRef(page_uri_str_nonum + '1'))) g.add((page_uri, XHV.last, URIRef(page_uri_str_nonum + str(self.last_page)))) if self.page != 1: g.add((page_uri, XHV.prev, URIRef(page_uri_str_nonum + str(self.page - 1)))) if self.page != self.last_page: g.add((page_uri, XHV.next, URIRef(page_uri_str_nonum + str(self.page + 1)))) if self.parent_container_uri is not None: g.add((URIRef(self.parent_container_uri), RDF.Bag, u)) g.add((URIRef(self.parent_container_uri), RDFS.member, u)) if self.parent_container_label is not None: g.add((URIRef(self.parent_container_uri), RDFS.label, Literal(self.parent_container_label))) return g def _render_mem_profile_rdf(self): g = self._generate_mem_profile_rdf() return self._make_rdf_response(g) def _render_mem_profile_json(self): return JSONResponse( content={ 'uri': self.instance_uri, 'label': self.label, 'comment': self.comment, 'profiles': list(self.profiles.keys()), 'default_profile': self.default_profile_token, 'register_items': self.members }, media_type='application/json', headers=self.headers ) class ContainerOfContainersRenderer(ContainerRenderer):
BSD 3-Clause New or Revised License
tunbehaun/phue-racing-flags
external/modified/rgbxy.py
ColorHelper.get_xy_point_from_rgb
python
def get_xy_point_from_rgb(self, red_i, green_i, blue_i): red = red_i / 255.0 green = green_i / 255.0 blue = blue_i / 255.0 r = ((red + 0.055) / (1.0 + 0.055))**2.4 if (red > 0.04045) else (red / 12.92) g = ((green + 0.055) / (1.0 + 0.055))**2.4 if (green > 0.04045) else (green / 12.92) b = ((blue + 0.055) / (1.0 + 0.055))**2.4 if (blue > 0.04045) else (blue / 12.92) X = r * 0.664511 + g * 0.154324 + b * 0.162028 Y = r * 0.283881 + g * 0.668433 + b * 0.047685 Z = r * 0.000088 + g * 0.072310 + b * 0.986039 if (X + Y + Z == 0): cx = 0 cy = 0 else: cx = X / (X + Y + Z) cy = Y / (X + Y + Z) xy_point = XYPoint(cx, cy) in_reach = self.check_point_in_lamps_reach(xy_point) if not in_reach: xy_point = self.get_closest_point_to_point(xy_point) return xy_point
Returns an XYPoint object containing the closest available CIE 1931 x, y coordinates based on the RGB input values.
https://github.com/tunbehaun/phue-racing-flags/blob/25a80520ab6a30ca4e3eb917e442aec2beb45d3c/external/modified/rgbxy.py#L150-L181
import math import random from collections import namedtuple __version__ = '0.5.1' XYPoint = namedtuple('XYPoint', ['x', 'y']) GamutA = ( XYPoint(0.704, 0.296), XYPoint(0.2151, 0.7106), XYPoint(0.138, 0.08), ) GamutB = ( XYPoint(0.675, 0.322), XYPoint(0.4091, 0.518), XYPoint(0.167, 0.04), ) GamutC = ( XYPoint(0.692, 0.308), XYPoint(0.17, 0.7), XYPoint(0.153, 0.048), ) def get_light_gamut(modelId): if modelId in ('LST001', 'LLC005', 'LLC006', 'LLC007', 'LLC010', 'LLC011', 'LLC012', 'LLC013', 'LLC014'): return GamutA elif modelId in ('LCT001', 'LCT007', 'LCT002', 'LCT003', 'LLM001'): return GamutB elif modelId in ('LCT010', 'LCT011', 'LCT012', 'LCT014', 'LCT015', 'LCT016', 'LLC020', 'LST002'): return GamutC else: raise ValueError return None class ColorHelper: def __init__(self, gamut=GamutB): self.Red = gamut[0] self.Lime = gamut[1] self.Blue = gamut[2] def hex_to_red(self, hex): return int(hex[0:2], 16) def hex_to_green(self, hex): return int(hex[2:4], 16) def hex_to_blue(self, hex): return int(hex[4:6], 16) def hex_to_rgb(self, h): rgb = (self.hex_to_red(h), self.hex_to_green(h), self.hex_to_blue(h)) return rgb def rgb_to_hex(self, r, g, b): return '%02x%02x%02x' % (r, g, b) def random_rgb_value(self): return random.randrange(0, 256) def cross_product(self, p1, p2): return (p1.x * p2.y - p1.y * p2.x) def check_point_in_lamps_reach(self, p): v1 = XYPoint(self.Lime.x - self.Red.x, self.Lime.y - self.Red.y) v2 = XYPoint(self.Blue.x - self.Red.x, self.Blue.y - self.Red.y) q = XYPoint(p.x - self.Red.x, p.y - self.Red.y) s = self.cross_product(q, v2) / self.cross_product(v1, v2) t = self.cross_product(v1, q) / self.cross_product(v1, v2) return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0) def get_closest_point_to_line(self, A, B, P): AP = XYPoint(P.x - A.x, P.y - A.y) AB = XYPoint(B.x - A.x, B.y - A.y) ab2 = AB.x * AB.x + AB.y * AB.y ap_ab = AP.x * AB.x + AP.y * AB.y t = ap_ab / ab2 if t < 0.0: t = 0.0 elif t > 1.0: t = 1.0 return XYPoint(A.x + AB.x * t, A.y + AB.y * t) def get_closest_point_to_point(self, xy_point): pAB = self.get_closest_point_to_line(self.Red, self.Lime, xy_point) pAC = self.get_closest_point_to_line(self.Blue, self.Red, xy_point) pBC = self.get_closest_point_to_line(self.Lime, self.Blue, xy_point) dAB = self.get_distance_between_two_points(xy_point, pAB) dAC = self.get_distance_between_two_points(xy_point, pAC) dBC = self.get_distance_between_two_points(xy_point, pBC) lowest = dAB closest_point = pAB if (dAC < lowest): lowest = dAC closest_point = pAC if (dBC < lowest): lowest = dBC closest_point = pBC cx = closest_point.x cy = closest_point.y return XYPoint(cx, cy) def get_distance_between_two_points(self, one, two): dx = one.x - two.x dy = one.y - two.y return math.sqrt(dx * dx + dy * dy)
MIT License
system73/tamarco
tamarco/core/settings/settings.py
SettingsView.cancel_watch_tasks
python
async def cancel_watch_tasks(self): await self.settings.cancel_watch_tasks()
Cancel all the pending watcher tasks of the settings in the etcd backend.
https://github.com/system73/tamarco/blob/c85bec267d39057a4cd5f1c9854d5e2840cebb1e/tamarco/core/settings/settings.py#L325-L327
import logging import os from typing import NewType, TypeVar from tamarco.core.patterns import Singleton from tamarco.core.settings.backends import DictSettingsBackend, EtcdSettingsBackend, YamlSettingsBackend from tamarco.core.settings.backends.interface import SettingsInterface, _EmptyArg, _Undefined from tamarco.core.settings.utils import dict_deep_update from tamarco.core.utils import get_etcd_configuration_from_environment_variables UNDEFINED = _Undefined logger = logging.getLogger("tamarco.settings") class SettingsNotLoadedYet(Exception): pass class SettingNotFound(Exception): def __init__(self, key): self.key = key Key = NewType("Key", str) Value = TypeVar("Value", str, int, float, dict) def get_yml_file_from_enviroment_variable(): tamarco_yml_file = os.environ.get("TAMARCO_YML_FILE", None) return tamarco_yml_file class Settings(SettingsInterface, metaclass=Singleton): def __init__(self): super().__init__() self.promised_settings = {} self.internal_backend = DictSettingsBackend({}) self.external_backend = None self.loop = None self.etcd_external = False def update_internal(self, dict_settings): dict_deep_update(self.internal_backend.settings, dict_settings) async def bind(self, loop): self.loop = loop async def start(self): self.internal_backend.set_loop(self.loop) await self._load_external_backend() await self._resolve_promised_settings() async def _load_external_backend(self): yaml_file = get_yml_file_from_enviroment_variable() etcd_config = get_etcd_configuration_from_environment_variables() if etcd_config: self.external_backend = EtcdSettingsBackend(etcd_config=etcd_config, loop=self.loop) await self.external_backend.check_etcd_health() self.etcd_external = True elif yaml_file: self.external_backend = YamlSettingsBackend(file=yaml_file, loop=self.loop) else: logger.warning("Could not get any settings external backend from the environment") async def _resolve_promised_settings(self): for key, proxies in self.promised_settings.items(): try: setting_value = await self.get(key) except Exception: logger.warning(f"Error loading promised setting : {key}") else: for proxy in proxies: object.__setattr__(proxy, "_obj", setting_value) def register_promised_setting(self, key, promised_setting): self.promised_settings[key].setdefault([]).append(promised_setting) async def get(self, key, default=_EmptyArg): logger.debug(f"Getting the setting: {key}") try: value = await self.internal_backend.get(key) if value != UNDEFINED: return value except KeyError: if self.external_backend: logger.debug(f"Setting {key} not found in internal cache, searching in external backend") return await self.get_external(key, default) if default != _EmptyArg: return default else: raise SettingNotFound(key) async def get_external(self, key, default=_EmptyArg): try: value = await self.external_backend.get(key, default) except Exception: logger.warning(f"Setting {key} not found in external backend") raise SettingNotFound(key) else: await self.internal_backend.set(key, value) return value async def set(self, key, value): logger.info(f"Changing the value of the setting: {key}") await self.internal_backend.set(key, value) if self.external_backend: await self.external_backend.set(key, value) async def delete(self, key): logger.info(f"Deleting the setting: {key}") await self.internal_backend.delete(key) if self.external_backend: await self.external_backend.delete(key) async def watch(self, key, callback): if self.etcd_external: await self.external_backend.watch(key, callback) else: logger.warning(f"Trying to watch the setting {key} when it is not in the ETCD backend") async def update_internal_settings(self, key, value): await self.internal_backend.set(key, value) logger.debug(f"The internal setting {key} has changed") async def watch_and_update(self, key): if self.etcd_external: await self.external_backend.watch(key, self.update_internal_settings) else: logger.warning(f"Trying to watch the setting {key} when it is not in the ETCD backend") async def stop(self): await self.cancel_watch_tasks() async def cancel_watch_tasks(self): if self.etcd_external: self.external_backend.cancel_watch_tasks() else: logger.warning(f"Trying to cancel all settings watcher tasks, but not ETCD backend found. Doing nothing") class SettingsView(SettingsInterface): def __init__(self, settings, prefix, microservice_name=None): self.prefix = prefix self.settings = settings self.microservice_name = microservice_name if microservice_name: framework_prefix, *setting_route = prefix.split(".") self.microservice_prefix = f"{framework_prefix}.microservices.{microservice_name}.{'.'.join(setting_route)}" async def get(self, key, default=_EmptyArg, raw=False): if not raw: general_key = f"{self.prefix}.{key}" if self.microservice_name: microservice_key = f"{self.microservice_prefix}.{key}" value = await self.settings.get(microservice_key, UNDEFINED) if value != UNDEFINED: return value logger.warning( f"Setting {microservice_key} not found in external backend, it will use {general_key} instead." ) return await self.settings.get(general_key, default) else: return await self.settings.get(key, default) async def set(self, key, value, raw=False): if not raw: key = f"{self.prefix}.{key}" return await self.settings.set(key, value) async def delete(self, key, raw=False): if not raw: key = f"{self.prefix}.{key}" return await self.settings.delete(key) async def watch(self, key, callback, raw=False): key_microservice = key if not raw: if self.microservice_name: key_microservice = f"{self.microservice_prefix}.{key}" key = f"{self.prefix}.{key}" await self.settings.watch(key, callback) if self.microservice_name: await self.settings.watch(key_microservice, callback) async def update_internal_settings(self, key, value): await self.settings.update_internal_settings(key, value) logger.debug(f"The internal setting {key} has changed")
MIT License
clemsoncpsc-discord/clembot
ClemBot.Bot/bot/messaging/events.py
EventsMeta.on_guild_channel_delete
python
def on_guild_channel_delete(self): return 'on_guild_channel_delete'
Published when a new text channel is deleted in a guild Args: channel (discord.TextChannel): The deleted channel
https://github.com/clemsoncpsc-discord/clembot/blob/f625f42e367fdf4261685bc4374f042f5175df2a/ClemBot.Bot/bot/messaging/events.py#L345-L352
class EventsMeta(type): @property def on_example(self): return 'on_example' @property def on_guild_message_received(self): return '_on_guild_message_received' @property def on_dm_message_received(self): return '_on_dm_message_received' @property def on_raw_message_edit(self): return '_on_raw_message_edit' @property def on_message_edit(self): return '_on_message_edit' @property def on_raw_message_delete(self): return '_on_raw_message_delete' @property def on_message_delete(self): return '_on_message_delete' def on_reaction_add(self): return '_on_reaction_add' _on_raw_reaction_add = 'on_raw_reaction_add' @property def on_raw_reaction_add(self): return '_on_raw_reaction_add' @property def on_reaction_remove(self): return '_on_reaction_remove' @property def on_raw_reaction_remove(self): return '_on_raw_reaction_remove' @property def on_guild_joined(self): return '_on_guild_joined' @property def on_guild_leave(self): return '_on_guild_leave' @property def on_new_guild_initialized(self): return '_on_new_guild_initialized' @property def on_guild_role_create(self): return '_on_guild_role_create' @property def on_guild_role_update(self): return '_on_guild_role_update' @property def on_guild_role_delete(self): return '_on_guild_role_delete' @property def on_user_joined(self): return '_on_user_joined' @property def on_user_removed(self): return '_on_user_removed' @property def on_user_update(self): return '_on_user_update' @property def on_add_designated_channel(self): return '_on_add_designated_channel' @property def on_send_in_designated_channel(self): return 'on_send_in_designated_channel' @property def on_designated_message_sent(self): return 'on_designated_message_sent' @property def on_broadcast_designated_channel(self): return '_on_broadcast_designated_channel' @property def on_set_custom_prefix(self): return 'on_set_custom_prefix' @property def on_assignable_role_add(self): return 'on_assignable_role_add' @property def on_assignable_role_remove(self): return 'on_assignable_role_remove' @property def on_set_deletable(self): return '_on_set_deletable' @property def on_guild_channel_create(self): return 'on_guild_channel_create' @property
MIT License
deanishe/alfred-mailto
src/client.py
Client.get_default_app
python
def get_default_app(self): return self.wf.settings.get('default_app') or self.system_default_app
Return info dict on default email client or None {'name': 'Application Name', 'path': '/path/to/Application.app', 'bundleid': 'com.application.bundleid'}
https://github.com/deanishe/alfred-mailto/blob/e9ba53682b43c98283233ef3c45ebf853b2850b4/src/client.py#L191-L201
from __future__ import print_function, unicode_literals, absolute_import from email.header import Header from fnmatch import fnmatch import os import re from time import time from urllib import quote from workflow.background import run_in_background, is_running from common import ONE_DAY, appname, bundleid import verbose_json as json log = None match_bundle_id = re.compile(r'kMDItemCFBundleIdentifier = "(.+)"').match MAX_APP_CACHE_AGE = ONE_DAY DEFAULT_RULES = { "spaces": True, "names": True, "mime": True, "no_commas": False, "inline_to": False } class Formatter(object): def __init__(self, client, wf): global log self.client = client self.wf = wf log = self.wf.logger client_rules = {} for path in [self.wf.workflowfile('client_rules.json'), self.wf.datafile('client_rules.json')]: if not os.path.exists(path): continue log.debug( 'Loading client formatting rules from {} ...'.format(path)) with open(path) as fp: client_rules.update(json.load(fp)) self.rules = DEFAULT_RULES for bundle_id in client_rules: if fnmatch(client, bundle_id): self.rules = client_rules.get(bundle_id) break for key in ('spaces', 'names', 'mime', 'no_commas', 'inline_to'): value = self.rules[key] setattr(self, 'use_{}'.format(key), value) log.debug(u'Loaded rules {!r} for client {!r}'.format(self.rules, client)) def get_url(self, contacts, use_names=False): log.debug(u"Building URL for app '{}'".format(self.client)) parts = [] encoded = False for contact in contacts: name, email = contact if not self.use_names or not use_names: parts.append(email) log.debug('[not use_names] {!r} --> {!r}'.format(contact, email)) continue elif name is None: parts.append(email) log.debug('[name not found] {!r} --> {!r}'.format(contact, email)) continue if self.use_mime: try: name = name.encode('ascii') except UnicodeEncodeError: name = str(Header(name, 'utf-8')) encoded = True if ',' in name: if self.use_no_commas: parts.append(email) log.debug('[use_no_commas] {!r} --> {!r}'.format(contact, email)) continue else: name = '"{}"'.format(name) addr = '{} <{}>'.format(name, email) log.debug('[default] {!r} --> {!r}'.format(contact, addr)) parts.append(addr) if self.use_spaces: result = ', '.join(parts) else: result = ','.join(parts) result = result.encode('utf-8') if encoded: result = quote(result, safe='@') if self.use_inline_to: return b'mailto:{}'.format(result) return b'mailto:?to={}'.format(result) class Client(object): def __init__(self, wf): global log self.wf = wf log = wf.logger self.all_email_apps = [] self.system_default_app = {} self.update()
MIT License
deepgram/kur
setup.py
readme
python
def readme(): with open('README.rst', 'rb') as fh: result = fh.read() result = result.decode('utf-8') token = '.. package_readme_ends_here' mark = result.find(token) if mark >= 0: result = result[:mark] token = '.. package_readme_starts_here' mark = result.find(token) if mark >= 0: result = result[mark+len(token):] chunks = [] skip = False for chunk in result.split('\n\n'): if not chunk: pass elif chunk.strip().startswith('.. package_readme_ignore'): skip = True elif skip: skip = False else: chunks.append(chunk) result = '\n\n'.join(chunks) return result
Return the README text.
https://github.com/deepgram/kur/blob/fd0c120e50815c1e5be64e5dde964dcd47234556/setup.py#L53-L85
from __future__ import print_function import sys def error_message(msg): line_width = 60 format_spec = '{{: ^{width}}}'.format(width=line_width) lines = [ '', '', '='*line_width, '', 'ERROR', '', msg, '' 'See our troubleshooting page to get started:', '', 'https://kur.deepgram.com/troubleshooting.html#installation', '', '='*line_width, '', "Uh, oh. There was an error. Look up there ^^^^ and you'll be", 'training awesome models in no time!' ] for line in lines: print(format_spec.format(line), file=sys.stderr) sys.exit(1) if sys.version_info < (3, 4): error_message('Kur requires Python 3.4 or later.') import os from setuptools import setup, find_packages
Apache License 2.0
opengeoscience/geonotebook
geonotebook/vis/utils.py
discrete_colors
python
def discrete_colors(colormap, count): return [ rgba2hex(colormap(float(i))) for i in range_count(0, 1, count) ]
Generate a list of evenly spaced colors from the given colormap.
https://github.com/opengeoscience/geonotebook/blob/a20a86909677ab02dc3e59c302f056ecb906b0e4/geonotebook/vis/utils.py#L49-L54
def rgba2hex(rgba): rgb = tuple([min(max(int(255 * i), 0), 255) for i in rgba[:3]]) return "#{0:02x}{1:02x}{2:02x}".format(*rgb) def range_count(start, stop, count): step = (stop - start) / float(count - 1) return [start + i * step for i in range(count)] def generate_colormap(colormap, minimum, maximum): if hasattr(colormap, '__iter__'): return colormap else: col_list = ['#00007f', '#0000ff', '#0063ff', '#00d4ff', '#4dffa9', '#a9ff4d', '#ffe500', '#ff7c00', '#ff1300', '#7f0000'] quan_list = range_count(minimum, maximum, len(col_list)) if hasattr(colormap, '__call__') and hasattr(colormap, 'N'): quan_list = range_count(minimum, maximum, colormap.N) col_list = [rgba2hex(colormap(i)) for i in range(colormap.N)] colormap = [ {'color': c, 'quantity': q} for c, q in zip(col_list, quan_list) ] return colormap
Apache License 2.0
mobeets/bpcs
bpcs/text_to_image.py
get_word_color_map_fcn
python
def get_word_color_map_fcn(all_words): words = set(all_words) words.add(' ') ncolors = 256**3 ncolors_per_word = ncolors/len(words) word_order = sorted(words) def get_word_color(word): ind = word_order.index(word) assert ind >= 0 colors = digits_in_base_as_tuple(ind*ncolors_per_word, 256) while len(colors) < 3: colors = (0,) + colors assert len(colors) == 3 return colors return get_word_color
given a set of words, returns a fcn returning an RGB color where each word is maximally spaced out from other word colors
https://github.com/mobeets/bpcs/blob/5a9fd5b73e40119c5ae8e42f9ee1a37e9da2cb4c/bpcs/text_to_image.py#L25-L44
import re import string from math import sqrt import numpy as np from PIL import Image from .test_utils import show_html_diff def digits_in_base_as_tuple(x, base): cur = x digs = [] while cur: digs.append(cur % base) cur /= base return tuple(reversed(digs))
MIT License
datastax/cstar_perf
tool/cstar_perf/tool/fab_common.py
run_python_script
python
def run_python_script(script_name, function_name, parameters): logger.info('Running {}.{} with parameters "{}"'.format( script_name, function_name, parameters )) resource_package = __name__ resource_path = os.path.join('scripts', '{}.py'.format(script_name)) script = pkg_resources.resource_string(resource_package, resource_path) script = script.replace('{function}', function_name).replace('{parameters}', parameters) return python(script)
Run a python function on the host
https://github.com/datastax/cstar_perf/blob/aceadd1d5a2331668647c53cca231ff9c1338eb4/tool/cstar_perf/tool/fab_common.py#L891-L903
from StringIO import StringIO import time import os import re import uuid import logging from fabric import api as fab from fabric.tasks import execute import yaml import pkg_resources from cluster_config import config as cluster_config from util import get_static_vnode_tokens from util import random_token import fab_dse as dse import fab_cassandra as cstar import fab_flamegraph as flamegraph import fab_profiler as profiler logging.basicConfig() logger = logging.getLogger('common') logger.setLevel(logging.INFO) fab.env.use_ssh_config = True fab.env.connection_attempts = 10 git_repos = [ ('apache', 'git://github.com/apache/cassandra.git'), ('knifewine', 'git://github.com/knifewine/cassandra.git'), ('mambocab', 'git://github.com/mambocab/cassandra.git'), ('jbellis', 'git://github.com/jbellis/cassandra.git'), ('josh-mckenzie', 'git://github.com/josh-mckenzie/cassandra.git'), ('marcuse', 'git://github.com/krummas/cassandra.git'), ('pcmanus', 'git://github.com/pcmanus/cassandra.git'), ('iamaleksey', 'git://github.com/iamaleksey/cassandra.git'), ('tjake', 'git://github.com/tjake/cassandra.git'), ('carlyeks', 'git://github.com/carlyeks/cassandra.git'), ('aweisberg', 'git://github.com/aweisberg/cassandra.git'), ('snazy', 'git://github.com/snazy/cassandra.git'), ('blambov', 'git://github.com/blambov/cassandra.git'), ('stef1927', 'git://github.com/stef1927/cassandra.git'), ('driftx', 'git://github.com/driftx/cassandra.git'), ('jeffjirsa', 'git://github.com/jeffjirsa/cassandra.git'), ('aboudreault', 'git://github.com/aboudreault/cassandra.git'), ('pauloricardomg','git://github.com/pauloricardomg/cassandra'), ('qzg', 'git://github.com/qzg/cassandra.git'), ('nitsanw', 'git://github.com/nitsanw/cassandra.git'), ('sbtourist', 'git://github.com/sbtourist/cassandra.git'), ('mshuler', 'git://github.com/mshuler/cassandra.git'), ('thobbs', 'git://github.com/thobbs/cassandra.git'), ('yukim', 'git://github.com/yukim/cassandra.git'), ('guyboltonking', 'git://github.com/guyboltonking/cassandra.git'), ] CMD_LINE_HOSTS_SPECIFIED = False if len(fab.env.hosts) > 0 : CMD_LINE_HOSTS_SPECIFIED = True logback_template = """<configuration scan="true"> <jmxConfigurator /> <appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${cassandra.logdir}/system.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> <fileNamePattern>${cassandra.logdir}/system.log.%i.zip</fileNamePattern> <minIndex>1</minIndex> <maxIndex>20</maxIndex> </rollingPolicy> <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>20MB</maxFileSize> </triggeringPolicy> <encoder> <pattern>%-5level [%thread] %date{ISO8601} %F:%L - %msg%n</pattern> <!-- old-style log format <pattern>%5level [%thread] %date{ISO8601} %F (line %L) %msg%n</pattern> --> </encoder> </appender> <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> <encoder> <pattern>%-5level %date{HH:mm:ss,SSS} %msg%n</pattern> </encoder> </appender> <root level="INFO"> <appender-ref ref="FILE" /> <appender-ref ref="STDOUT" /> </root> <logger name="com.thinkaurelius.thrift" level="ERROR"/> </configuration> """ log4j_template = """ log4j.rootLogger=INFO,stdout,R log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n log4j.appender.R=org.apache.log4j.RollingFileAppender log4j.appender.R.maxFileSize=20MB log4j.appender.R.maxBackupIndex=50 log4j.appender.R.layout=org.apache.log4j.PatternLayout log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n log4j.appender.R.File=${cassandra.logdir}/system.log log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR """ logback_debug_template = """<configuration scan="true"> <jmxConfigurator /> <appender name="SYSTEMLOG" class="ch.qos.logback.core.rolling.RollingFileAppender"> <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <level>INFO</level> </filter> <file>${cassandra.logdir}/system.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> <fileNamePattern>${cassandra.logdir}/system.log.%i.zip</fileNamePattern> <minIndex>1</minIndex> <maxIndex>20</maxIndex> </rollingPolicy> <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>20MB</maxFileSize> </triggeringPolicy> <encoder> <pattern>%-5level [%thread] %date{ISO8601} %F:%L - %msg%n</pattern> <!-- old-style log format <pattern>%5level [%thread] %date{ISO8601} %F (line %L) %msg%n</pattern> --> </encoder> </appender> <!-- DEBUGLOG rolling file appender to debug.log (all levels) --> <appender name="DEBUGLOG" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${cassandra.logdir}/debug.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> <fileNamePattern>${cassandra.logdir}/debug.log.%i.zip</fileNamePattern> <minIndex>1</minIndex> <maxIndex>20</maxIndex> </rollingPolicy> <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>20MB</maxFileSize> </triggeringPolicy> <encoder> <pattern>%-5level [%thread] %date{ISO8601} %F:%L - %msg%n</pattern> </encoder> </appender> <!-- ASYNCLOG assynchronous appender to debug.log (all levels) --> <appender name="ASYNCDEBUGLOG" class="ch.qos.logback.classic.AsyncAppender"> <queueSize>1024</queueSize> <discardingThreshold>0</discardingThreshold> <includeCallerData>true</includeCallerData> <appender-ref ref="DEBUGLOG" /> </appender> <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> <encoder> <pattern>%-5level %date{HH:mm:ss,SSS} %msg%n</pattern> </encoder> </appender> <root level="INFO"> <appender-ref ref="SYSTEMLOG" /> <appender-ref ref="STDOUT" /> <appender-ref ref="ASYNCDEBUGLOG" /> <!-- Comment this line to disable debug.log --> </root> <logger name="org.apache.cassandra" level="DEBUG"/> <logger name="com.thinkaurelius.thrift" level="ERROR"/> </configuration> """ log4j_template = """ log4j.rootLogger=INFO,stdout,R log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n log4j.appender.R=org.apache.log4j.RollingFileAppender log4j.appender.R.maxFileSize=20MB log4j.appender.R.maxBackupIndex=50 log4j.appender.R.layout=org.apache.log4j.PatternLayout log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n log4j.appender.R.File=${cassandra.logdir}/system.log log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR """ DENIED_CSTAR_CONFIG = ['commitlog_directory', 'data_file_directories', 'saved_caches_directory', 'cdc_directory', 'cdc_overflow_directory'] CASSANDRA_STARTUP_LOG = os.path.join('~', 'nohup.out') def setup(my_config=None): global config def __get_balanced_tokens(node_count, partitioner='murmur3'): if partitioner == 'murmur3': return [str(((2**64 / node_count) * i) - 2**63) for i in range(node_count)] elif partitioner == 'random': return [str(i*(2**127/node_count)) for i in range(0, node_count)] else: raise ValueError('Unknonwn partitioner: %s' % partitioner) default_config = { 'product': 'cassandra', 'revision': 'trunk', 'override_version': None, 'cluster_name': 'cstar_perf {random_string}'.format(random_string=random_token()), 'ant_tarball': 'http://www.apache.org/dist/ant/binaries/apache-ant-1.8.4-bin.tar.bz2', 'user': 'ryan', 'partitioner': 'murmur3', 'git_repo': 'git://github.com/apache/cassandra.git', 'use_vnodes': True, 'token_allocation': 'random', 'num_tokens': 256, 'data_file_directories': ['/var/lib/cassandra/data'], 'commitlog_directory': '/var/lib/cassandra/commitlog', 'saved_caches_directory': '/var/lib/cassandra/saved_caches', 'flush_directory': '/var/lib/cassandra/flush', 'cdc_directory': '/var/lib/cassandra/cdc', 'cdc_overflow_directory': '/var/lib/cassandra/cdc_overflow', 'log_dir': '~/fab/cassandra/logs', 'blockdev_readahead': None, 'block_devices': [], 'use_jna': True, 'env': '', 'java_home': '~/fab/java', 'yourkit_profiler': False, 'debug_logging': False } public_ips = "node0, node1, node2, node3" private_ips = "192.168.1.141,192.168.1.145,192.168.1.143,192.168.1.133" public_ips = public_ips.replace(" ","").split(",") private_ips = private_ips.replace(" ","").split(",") tokens = __get_balanced_tokens(len(public_ips), default_config['partitioner']) default_config.setdefault('hosts', {}) first_node = True for i, public, private, token in zip( xrange(len(public_ips)), public_ips, private_ips, tokens): if not default_config['hosts'].has_key(public): default_config['hosts'][public] = { 'hostname': 'node%s' % i, 'internal_ip': private, } if not default_config['use_vnodes']: default_config['hosts'][public]['initial_token'] = token if first_node: default_config['hosts'][public]['seed'] = True first_node = False if not my_config: config = default_config else: config = dict(default_config.items() + my_config.items()) for node in config['hosts'].values(): if not config['use_vnodes'] and not node.has_key('initial_token'): tokens = __get_balanced_tokens(len(config['hosts']), config['partitioner']) for node,token in zip(config['hosts'].values(), tokens): node['initial_token'] = token break config['seeds'] = [v.get('external_ip', v['internal_ip']) for v in config['hosts'].values() if v.get('seed',False)] if not CMD_LINE_HOSTS_SPECIFIED: fab.env.hosts = [h for h in config['hosts']] fab.env.user = config['user'] setup() setup(cluster_config) @fab.parallel def bootstrap(git_fetch=True, revision_override=None, replace_existing_dse_install=True): partitioner = config['partitioner'] fab.run('mkdir -p fab') if config['product'] in (None, ''): logger.warn("revision product was not set -- defaulting to cassandra") config['product'] = 'cassandra' elif config['product'] not in ('cassandra', 'dse'): raise ValueError("Invalid product. Should be cassandra or dse") product = dse if config['product'] == 'dse' else cstar if product.name == 'dse': rev_id = dse.bootstrap(config, replace_existing_dse_install=replace_existing_dse_install) else: rev_id = cstar.bootstrap(config, git_fetch=git_fetch, revision_override=revision_override) cassandra_path = product.get_cassandra_path() try: cfg = config['hosts'][fab.env.host] except KeyError: return rev_id if config['use_jna']: jna_jars = os.path.join(cassandra_path, 'lib/jna*.jar') jna_jar = os.path.join(cassandra_path, 'lib/jna.jar') jna_exists = fab.run('ls {}'.format(jna_jars), quiet=True) if jna_exists.return_code != 0: jna_candidates = ['/usr/share/java/jna/jna.jar', '/usr/share/java/jna.jar'] for jar in jna_candidates: if fab.run('ls {jar}'.format(jar=jar), quiet=True).return_code == 0: fab.run('ln -s {jar} {jna}'.format(jar=jar, jna=jna_jar)) break else: if not os.path.exists('fab/jna.jar'): request = download_file(JNA_LIB_URL, 'fab/jna.jar') if request.status_code != requests.codes.ok: raise AssertionError('Could not force JNA loading, no JNA jar found.') fab.put('fab/jna.jar', jna_jar) else: fab.run('rm -f {}'.format(os.path.join(cassandra_path, 'lib/jna*'))) conf_file = StringIO() fab.get(os.path.join(cassandra_path.replace('$HOME', '~'), 'conf', 'cassandra.yaml'), conf_file) conf_file.seek(0) cass_yaml = yaml.load(conf_file.read()) cstar_config_opts = product.get_cassandra_config_options(config) try: cstar_config_opts.remove('log') except ValueError: pass if product.name == 'dse': dse_config_options = product.get_dse_config_options(config) dse_conf_file = StringIO() dse_yaml_path = os.path.join(product.get_dse_conf_path(), 'dse.yaml') fab.get(dse_yaml_path.replace('$HOME', '~'), dse_conf_file) dse_conf_file.seek(0) dse_yaml = yaml.load(dse_conf_file.read()) configured_dse_yaml_settings = config.get('dse_yaml', {}) if configured_dse_yaml_settings: for option, value in configured_dse_yaml_settings.items(): if option not in dse_config_options: raise ValueError('Unknown dse.yaml option: {}'.format(option)) dse_yaml[option] = value dse_conf_file = StringIO() dse_conf_file.write(yaml.safe_dump(dse_yaml, encoding='utf-8', allow_unicode=True)) dse_conf_file.seek(0) fab.put(dse_conf_file, dse_yaml_path.replace('$HOME', '~')) for option, value in config.items(): if option in cstar_config_opts: cass_yaml[option] = value for option, value in config.get('yaml', {}).items(): if option in DENIED_CSTAR_CONFIG: raise ValueError( 'C* yaml option "{}" can only be set in the cluster config.'.format(option) ) elif option not in cstar_config_opts: raise ValueError('Unknown C* yaml option: {}'.format(option)) cass_yaml[option] = value if 'num_tokens' not in config.get('yaml', {}): if config.get('use_vnodes', True): cass_yaml['num_tokens'] = config['num_tokens'] else: cass_yaml['initial_token'] = cfg['initial_token'] cass_yaml['num_tokens'] = 1 cass_yaml['listen_address'] = cfg['internal_ip'] cass_yaml['broadcast_address'] = cfg.get('external_ip', cfg['internal_ip']) cass_yaml['seed_provider'][0]['parameters'][0]['seeds'] = ",".join(config['seeds']) if partitioner == 'random': cass_yaml['partitioner'] = 'org.apache.cassandra.dht.RandomPartitioner' elif partitioner == 'murmur3': cass_yaml['partitioner'] = 'org.apache.cassandra.dht.Murmur3Partitioner' cass_yaml['rpc_address'] = cfg['internal_ip'] if not config.has_key('endpoint_snitch'): for node in config['hosts'].values(): if node.get('datacenter',False): config['endpoint_snitch'] = "GossipingPropertyFileSnitch" cass_yaml['auto_bootstrap'] = False break else: config['endpoint_snitch'] = "SimpleSnitch" conf_dir = os.path.join(cassandra_path, 'conf/').replace('$HOME', '~') if config['endpoint_snitch'] == 'PropertyFileSnitch': cass_yaml['endpoint_snitch'] = 'PropertyFileSnitch' fab.run("echo 'default=dc1:r1' > {}".format(conf_dir+'cassandra-topology.properties')) for node in config['hosts'].values(): line = '%s=%s:%s' % (node['external_ip'], node.get('datacenter', 'dc1'), node.get('rack', 'r1')) fab.run("echo '{}' >> {}".format(line, conf_dir+'cassandra-topology.properties')) if config['endpoint_snitch'] == "GossipingPropertyFileSnitch": cass_yaml['endpoint_snitch'] = 'GossipingPropertyFileSnitch' fab.run("echo 'dc={dc}\nrack={rack}' > {out}".format( dc=cfg.get('datacenter','dc1'), rack=cfg.get('rack','r1'), out=conf_dir+'cassandra-rackdc.properties')) conf_file = StringIO() conf_file.write(yaml.safe_dump(cass_yaml, encoding='utf-8', allow_unicode=True)) conf_file.seek(0) fab.put(conf_file, conf_dir+'cassandra.yaml') logback_template_config = logback_debug_template if config.get('debug_logging', False) else logback_template logback_conf = StringIO() log_dir = fab.run("readlink -m {log_dir}".format(log_dir=config['log_dir'])) logback_conf.write(logback_template_config.replace("${cassandra.logdir}", log_dir)) logback_conf.seek(0) fab.put(logback_conf, conf_dir + 'logback.xml') log4j_conf = StringIO() log4j_conf.write(log4j_template.replace("${cassandra.logdir}",log_dir)) log4j_conf.seek(0) fab.put(log4j_conf, conf_dir+'log4j-server.properties') fincore_script = os.path.join(os.path.dirname(os.path.realpath(__file__)),'fincore_capture.py') fab.put(fincore_script, '~/fab/fincore_capture.py') return rev_id def _clean_up_cdc_directories(): if config.get('cdc_overflow_directory'): fab.run('rm -rf {overflow_dir}/*'.format(overflow_dir=config['cdc_overflow_directory'])) if config.get('cdc_directory'): fab.run('rm -rf {cdc_dir}/*'.format(cdc_dir=config['cdc_directory'])) @fab.parallel def destroy(leave_data=False, kill_delay=0): if leave_data: fab.run('JAVA_HOME={java_home} {nodetool_cmd} drain'.format(java_home=config['java_home'], nodetool_cmd=_nodetool_cmd()), quiet=True) fab.run('rm -rf {commitlog}/*'.format(commitlog=config['commitlog_directory'])) _clean_up_cdc_directories() if kill_delay: fab.run('killall java', quiet=True) time.sleep(kill_delay) fab.run('killall -9 java', quiet=True) fab.run('pkill -f "python.*fincore_capture"', quiet=True) fab.run('rm -rf fab/cassandra') fab.run('rm -rf fab/dse') fab.run('rm -rf fab/scripts') fab.run('rm -f {startup_log}'.format(startup_log=CASSANDRA_STARTUP_LOG)) assert type(config['data_file_directories']) == list for t in [config['saved_caches_directory'], config['commitlog_directory'], config['flush_directory'], config['log_dir']] + config['data_file_directories']: assert type(t) in (str, unicode) and len(t) > 1, '{t} doesn\'t look like a directory'.format(t=t) if not leave_data: for d in config['data_file_directories']: fab.run('rm -rf {data}/*'.format(data=d)) fab.run('rm -rf {saved_caches_directory}/*'.format(saved_caches_directory=config['saved_caches_directory'])) fab.run('rm -rf {commitlog}/*'.format(commitlog=config['commitlog_directory'])) fab.run('rm -rf {flushdir}/*'.format(flushdir=config['flush_directory'])) if config.get('hints_directory'): fab.run('rm -rf {hints_directory}/*'.format(hints_directory=config.get('hints_directory'))) _clean_up_cdc_directories() fab.run('rm -rf {log_dir}/*'.format(log_dir=config['log_dir'])) fab.run('rm -f /tmp/fincore.stats.log') @fab.parallel def start(): product = dse if config['product'] == 'dse' else cstar cassandra_path = product.get_cassandra_path() env = config.get('env', '') fab.puts('env is: {}'.format(env)) if isinstance(env, list) or isinstance(env, tuple): env = "\n".join(env) env += "\n" fab.puts('env is: {}'.format(env)) if not config['use_jna']: env += 'JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Dcassandra.boot_without_jna=true"\n\n' fab.run("mkdir -p ~/fab/cassandra/logs") log_dir = fab.run("readlink -m {log_dir}".format(log_dir=config['log_dir'])) try: ip_address = cluster_config['hosts'][fab.env.host]['internal_ip'] except: ip_address = fab.env.host env = "JVM_OPTS=\"$JVM_OPTS -Djava.rmi.server.hostname={hostname} -Xloggc:{log_dir}/gc.log\"\n\n".format( hostname=ip_address, log_dir=log_dir) + env env = "JVM_OPTS=\"$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n" + env if flamegraph.is_enabled(config): env += "JVM_OPTS=\"$JVM_OPTS -XX:+PreserveFramePointer\"\n" if profiler.yourkit_is_enabled(config): execute(profiler.yourkit_clean) env += profiler.yourkit_get_jvm_opts() fab.puts("running with token allocation type: {}".format(config['token_allocation'])) if config['use_vnodes'] and config['token_allocation'] in ('static-random', 'static-algorithmic'): env += "JVM_OPTS=\"$JVM_OPTS -Dcassandra.initial_token={}\"\n".format( get_static_vnode_tokens(fab.env.host, fab.env.hosts, partitioner=config['partitioner'], group=config['token_allocation'])) env += 'LOCAL_JMX=no\n' env += 'JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Dcom.sun.management.jmxremote.authenticate=false"\n\n' env_script = "{name}.sh".format(name=uuid.uuid1()) env_file = StringIO(env) fab.run('mkdir -p ~/fab/scripts') fab.put(env_file, '~/fab/scripts/{env_script}'.format(env_script=env_script)) fab.puts('env is: {}'.format(env)) if len(env_script) > 0: env_path = os.path.join(cassandra_path, 'conf/cassandra-env.sh') fab.run('echo >> ~/fab/scripts/{env_script}'.format(**locals())) fab.run('cat {env_path} >> ~/fab/scripts/{env_script}'.format(**locals())) fab.run('cp ~/fab/scripts/{env_script} {env_path}'.format(**locals())) product.start(config) @fab.parallel def stop(clean=True): product = dse if config['product'] == 'dse' else cstar product.stop(clean, config) def _nodetool_cmd(): product = dse if config['product'] == 'dse' else cstar bin_path = product.get_bin_path() return os.path.join(bin_path, 'nodetool') @fab.parallel def multi_nodetool(cmd): return fab.run('JAVA_HOME={java_home} {nodetool_cmd} {cmd}'.format(java_home=config['java_home'], nodetool_cmd=_nodetool_cmd(), cmd=cmd), warn_only=True) def ensure_running(retries=15, wait=10): time.sleep(15) for attempt in range(retries): ring = StringIO(fab.run('JAVA_HOME={java_home} {nodetool_bin} ring'.format( java_home=config['java_home'], nodetool_bin=_nodetool_cmd()))) broadcast_ips = [x.get('external_ip', x['internal_ip']) for x in config['hosts'].values()] nodes_up = dict((host,False) for host in broadcast_ips) for line in ring: for host in broadcast_ips: try: if host in line and " Up " in line: nodes_up[host] = True except UnicodeDecodeError: pass for node,up in nodes_up.items(): if not up: fab.puts("Node is not up (yet): %s" % node) if False not in nodes_up.values(): fab.puts("All nodes available!") return fab.puts("waiting %d seconds to try again.." % wait) time.sleep(wait) else: fab.abort("Timed out waiting for all nodes to startup") @fab.parallel def ensure_stopped(retries=15, wait=10): product = dse if config['product'] == 'dse' else cstar for attempt in range(retries): pgrep = fab.run('pgrep -f "java.*org.apache.*.CassandraDaemon"', quiet=True) if not product.is_running(): fab.puts('Cassandra shutdown.') return fab.puts("waiting %d seconds to try again.." % wait) time.sleep(wait) else: fab.abort("Timed out waiting for all nodes to stop") @fab.parallel def install_java(packages=None): dist = fab.run('lsb_release -is', quiet=True) if dist.return_code != 0: dist = fab.run('cat /etc/redhat-release', quiet=True) if dist.startswith('CentOS'): if not packages: packages = ['java-1.7.0-openjdk.x86_64', 'java-1.7.0-openjdk-devel.x86_64'] cmd = 'yum -y install {package}' elif dist.startswith('Ubuntu'): if not packages: packages = ['openjdk-7-jdk'] fab.run('apt-get update') cmd = 'apt-get -y install {package}' else: raise RuntimeError('Unknown distribution: %s' % dist) for package in packages: fab.run(cmd.format(package=package)) @fab.parallel def configure_hostnames(): cfg = config['hosts'][fab.env.host] fab.run('hostname {0}'.format(cfg['hostname'])) fab.run('echo "127.0.0.1 localhost.localdomain localhost"' ' > /etc/hosts') fab.run('echo "::1 localhost.localdomain localhost" >> /etc/hosts') for cfg in config['hosts'].values(): fab.run('echo "{ip} {hostname}" >> /etc/hosts'.format( ip=cfg['internal_ip'], hostname=cfg['hostname'])) @fab.parallel def copy_logs(local_directory): with fab.settings(warn_only=True): cfg = config['hosts'][fab.env.host] host_log_dir = os.path.join(local_directory, cfg['hostname']) if not os.path.exists(host_log_dir): os.makedirs(host_log_dir) fab.get(CASSANDRA_STARTUP_LOG, host_log_dir) fab.get(os.path.join(config['log_dir'], '*'), host_log_dir) @fab.parallel def start_fincore_capture(interval=10): fab.puts("Starting fincore_capture daemon...") fab.run('python2.7 fab/fincore_capture.py -i {interval}'.format(interval=interval)) @fab.parallel def stop_fincore_capture(): fab.puts("Stopping fincore_capture.") fab.run('pkill -f ".*python.*fincore_capture"', quiet=True) @fab.parallel def copy_fincore_logs(local_directory): cfg = config['hosts'][fab.env.host] location = os.path.join(local_directory, "fincore.{host}.log".format(host=cfg['hostname'])) fab.get('/tmp/fincore.stats.log', location) @fab.parallel def whoami(): fab.run('whoami') @fab.parallel def copy_root_setup(): fab.run('ln -s ~/fab/apache-ant-1.9.2 ~/fab/ant') @fab.parallel def set_device_read_ahead(read_ahead, devices): with fab.settings(user='root'): for device in devices: if 'docker' in device: continue fab.run('blockdev --setra {read_ahead} {device}'.format(read_ahead=read_ahead,device=device)) @fab.parallel def build_jbod_drives(device_mounts, md_device='/dev/md/striped', filesystem='ext4'): with fab.settings(user='root'): if isinstance(device_mounts, basestring): device_mounts = eval(device_mounts) fab.run('umount {md_device} && mdadm --stop {md_device}'.format(**locals()), quiet=True) mounted = fab.run('mount',quiet=True) for device in [d for d in device_mounts.keys() if d in mounted]: fab.run('umount -f {device}'.format(**locals())) fab.run('echo -e "{devices}" | xargs -n 1 -P {num} -iXX mkfs -t {filesystem} XX'.format(devices="\n".join(device_mounts.keys()), filesystem=filesystem, num=len(device_mounts))) for device,mountpoint in device_mounts.items(): fab.run('mount {device} {mountpoint}'.format(**locals())) fab.run('chmod 777 {mountpoint}'.format(**locals())) @fab.parallel def build_striped_drives(devices, mount_point='/mnt/striped', filesystem='ext4', chunk_size=64, md_device='/dev/md/striped'): with fab.settings(user='root'): if isinstance(devices, basestring): devices = devices.split(";") mounted = fab.run('mount',quiet=True) for device in [d for d in devices + [mount_point] if d in mounted]: fab.run('umount -f {device}'.format(**locals())) fab.run('mdadm --stop {md_device}'.format(**locals()),quiet=True) fab.run('mdadm --create {md_device} -R --verbose --level=0 --metadata=1.2 --chunk={chunk_size} --raid-devices={num} {devices}'.format( md_device=md_device, num=len(devices), chunk_size=chunk_size, devices=" ".join(devices))) fab.run('mkfs -t {filesystem} {md_device}'.format(**locals())) fab.run('mkdir -p {mount_point} && mount {md_device} {mount_point}'.format(**locals())) fab.run('chmod 777 {mount_point}'.format(**locals())) def parse_output(output): line_pattern = re.compile('\[(?P<host>.+)\] out: (?P<line>.+)') results = {} for line in output: m = line_pattern.match(line) if m: host = m.group('host') if host not in results: results[host] = [] line = m.group('line').strip() if line: results[host].append(line) return results @fab.parallel def bash(script): script = StringIO(script) fab.run('mkdir -p ~/fab/scripts') script_path = '~/fab/scripts/{script_name}.sh'.format(script_name=uuid.uuid1()) fab.put(script, script_path) output = StringIO() fab.run('bash {script_path}'.format(script_path=script_path), stdout=output, stderr=output) output.seek(0) return output.read().splitlines() def runbg(cmd, envs, sockname="dtach"): env_vars = "" for var, value in envs.iteritems(): env_vars += "{}={} ".format(var, value) cmd_ = '{} dtach -n `mktemp -u /tmp/{}.XXXX` {}'.format(env_vars, sockname, cmd) logger.info("Running background task: {}".format(cmd_)) return fab.run(cmd_) @fab.parallel def python(script): script = StringIO(script) fab.run('mkdir -p ~/fab/scripts') script_path = '~/fab/scripts/{script_name}.py'.format(script_name=uuid.uuid1()) fab.put(script, script_path) output = StringIO() with fab.settings(warn_only=True): retval = fab.run( 'python {script_path}'.format(script_path=script_path), stdout=output, stderr=output ) output.seek(0) output = output.read() if retval.return_code != 0: logger.info(output) raise Exception('Error while running python script') return output.splitlines()
Apache License 2.0
merqurio/neo4jupyter
neo4jupyter.py
init_notebook_mode
python
def init_notebook_mode(): display( Javascript(data="require.config({ " + " paths: { " + " vis: '//cdnjs.cloudflare.com/ajax/libs/vis/4.8.2/vis.min' " + " } " + "}); " + "require(['vis'], function(vis) { " + " window.vis = vis; " + "}); ", css='https://cdnjs.cloudflare.com/ajax/libs/vis/4.8.2/vis.css') )
Creates a script tag and prints the JS read from the file in the tag.
https://github.com/merqurio/neo4jupyter/blob/54659738d7058bc4738f20b688a0598bb274cf29/neo4jupyter.py#L23-L38
import os import json import uuid import tempfile from IPython.display import HTML, Javascript, display DEFAULT_PHYSICS = { "physics": { "barnesHut": { "gravitationalConstant": -15150, "centralGravity": 3.45, "springLength": 261, "damping": 0.3 } } } def get_visjs(): return
MIT License
jmagnusson/netsuite
netsuite/soap_api/passport.py
TokenPassport._generate_timestamp
python
def _generate_timestamp(self) -> str: return str(int(datetime.now().timestamp()))
Generate timestamp Returns: str: A seconds precision timestamp
https://github.com/jmagnusson/netsuite/blob/49e06426b2065bd7be977f0b26fe003b7487f735/netsuite/soap_api/passport.py#L35-L41
import base64 import hmac import random from datetime import datetime from typing import Dict, TypeVar from ..config import Config, TokenAuth NetSuite = TypeVar("NetSuite") class Passport: def get_element(self) -> str: raise NotImplementedError class TokenPassport(Passport): def __init__( self, ns: NetSuite, *, account: str, consumer_key: str, consumer_secret: str, token_id: str, token_secret: str, ) -> None: self.ns = ns self.account = account self.consumer_key = consumer_key self.consumer_secret = consumer_secret self.token_id = token_id self.token_secret = token_secret
MIT License
virtuesecurity/aws-extender
BappModules/boto/configservice/layer1.py
ConfigServiceConnection.describe_delivery_channel_status
python
def describe_delivery_channel_status(self, delivery_channel_names=None): params = {} if delivery_channel_names is not None: params['DeliveryChannelNames'] = delivery_channel_names return self.make_request(action='DescribeDeliveryChannelStatus', body=json.dumps(params))
Returns the current status of the specified delivery channel. If a delivery channel is not specified, this action returns the current status of all delivery channels associated with the account. :type delivery_channel_names: list :param delivery_channel_names: A list of delivery channel names.
https://github.com/virtuesecurity/aws-extender/blob/3029dd26bd7bdf7f4148e1e92adf9f8c547cafbe/BappModules/boto/configservice/layer1.py#L188-L203
import boto from boto.compat import json from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo from boto.exception import JSONResponseError from boto.configservice import exceptions class ConfigServiceConnection(AWSQueryConnection): APIVersion = "2014-11-12" DefaultRegionName = "us-east-1" DefaultRegionEndpoint = "config.us-east-1.amazonaws.com" ServiceName = "ConfigService" TargetPrefix = "StarlingDoveService" ResponseError = JSONResponseError _faults = { "InvalidLimitException": exceptions.InvalidLimitException, "NoSuchBucketException": exceptions.NoSuchBucketException, "InvalidSNSTopicARNException": exceptions.InvalidSNSTopicARNException, "ResourceNotDiscoveredException": exceptions.ResourceNotDiscoveredException, "MaxNumberOfDeliveryChannelsExceededException": exceptions.MaxNumberOfDeliveryChannelsExceededException, "LastDeliveryChannelDeleteFailedException": exceptions.LastDeliveryChannelDeleteFailedException, "InsufficientDeliveryPolicyException": exceptions.InsufficientDeliveryPolicyException, "InvalidRoleException": exceptions.InvalidRoleException, "InvalidTimeRangeException": exceptions.InvalidTimeRangeException, "NoSuchDeliveryChannelException": exceptions.NoSuchDeliveryChannelException, "NoSuchConfigurationRecorderException": exceptions.NoSuchConfigurationRecorderException, "InvalidS3KeyPrefixException": exceptions.InvalidS3KeyPrefixException, "InvalidDeliveryChannelNameException": exceptions.InvalidDeliveryChannelNameException, "NoRunningConfigurationRecorderException": exceptions.NoRunningConfigurationRecorderException, "ValidationException": exceptions.ValidationException, "NoAvailableConfigurationRecorderException": exceptions.NoAvailableConfigurationRecorderException, "InvalidNextTokenException": exceptions.InvalidNextTokenException, "InvalidConfigurationRecorderNameException": exceptions.InvalidConfigurationRecorderNameException, "NoAvailableDeliveryChannelException": exceptions.NoAvailableDeliveryChannelException, "MaxNumberOfConfigurationRecordersExceededException": exceptions.MaxNumberOfConfigurationRecordersExceededException, } def __init__(self, **kwargs): region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) if 'host' not in kwargs or kwargs['host'] is None: kwargs['host'] = region.endpoint super(ConfigServiceConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): return ['hmac-v4'] def delete_delivery_channel(self, delivery_channel_name): params = {'DeliveryChannelName': delivery_channel_name, } return self.make_request(action='DeleteDeliveryChannel', body=json.dumps(params)) def deliver_config_snapshot(self, delivery_channel_name): params = {'deliveryChannelName': delivery_channel_name, } return self.make_request(action='DeliverConfigSnapshot', body=json.dumps(params)) def describe_configuration_recorder_status(self, configuration_recorder_names=None): params = {} if configuration_recorder_names is not None: params['ConfigurationRecorderNames'] = configuration_recorder_names return self.make_request(action='DescribeConfigurationRecorderStatus', body=json.dumps(params)) def describe_configuration_recorders(self, configuration_recorder_names=None): params = {} if configuration_recorder_names is not None: params['ConfigurationRecorderNames'] = configuration_recorder_names return self.make_request(action='DescribeConfigurationRecorders', body=json.dumps(params))
MIT License
diptochakrabarty/flask-online-store
venv/lib/python3.6/site-packages/flask_jwt_extended/jwt_manager.py
JWTManager.user_loader_error_loader
python
def user_loader_error_loader(self, callback): self._user_loader_error_callback = callback return callback
This decorator sets the callback function that will be called if `None` is returned from the :meth:`~flask_jwt_extended.JWTManager.user_loader_callback_loader` callback function. The default implementation will return a 401 status code with the JSON: {"msg": "Error loading the user <identity>"} *HINT*: The callback must be a function that takes **one** argument, which is the identity of the user who failed to load, and must return a *Flask response*.
https://github.com/diptochakrabarty/flask-online-store/blob/74db206565aee1920a508d009a4b866d9a848c6a/venv/lib/python3.6/site-packages/flask_jwt_extended/jwt_manager.py#L364-L378
import datetime from warnings import warn from jwt import ( ExpiredSignatureError, InvalidTokenError, InvalidAudienceError, InvalidIssuerError, DecodeError ) try: from flask import _app_ctx_stack as ctx_stack except ImportError: from flask import _request_ctx_stack as ctx_stack from flask_jwt_extended.config import config from flask_jwt_extended.exceptions import ( JWTDecodeError, NoAuthorizationError, InvalidHeaderError, WrongTokenError, RevokedTokenError, FreshTokenRequired, CSRFError, UserLoadError, UserClaimsVerificationError ) from flask_jwt_extended.default_callbacks import ( default_expired_token_callback, default_user_claims_callback, default_user_identity_callback, default_invalid_token_callback, default_unauthorized_callback, default_needs_fresh_token_callback, default_revoked_token_callback, default_user_loader_error_callback, default_claims_verification_callback, default_verify_claims_failed_callback, default_decode_key_callback, default_encode_key_callback, default_jwt_headers_callback) from flask_jwt_extended.tokens import ( encode_refresh_token, encode_access_token ) from flask_jwt_extended.utils import get_jwt_identity class JWTManager(object): def __init__(self, app=None): self._user_claims_callback = default_user_claims_callback self._user_identity_callback = default_user_identity_callback self._expired_token_callback = default_expired_token_callback self._invalid_token_callback = default_invalid_token_callback self._unauthorized_callback = default_unauthorized_callback self._needs_fresh_token_callback = default_needs_fresh_token_callback self._revoked_token_callback = default_revoked_token_callback self._user_loader_callback = None self._user_loader_error_callback = default_user_loader_error_callback self._token_in_blacklist_callback = None self._claims_verification_callback = default_claims_verification_callback self._verify_claims_failed_callback = default_verify_claims_failed_callback self._decode_key_callback = default_decode_key_callback self._encode_key_callback = default_encode_key_callback self._jwt_additional_header_callback = default_jwt_headers_callback if app is not None: self.init_app(app) def init_app(self, app): if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['flask-jwt-extended'] = self self._set_default_configuration_options(app) self._set_error_handler_callbacks(app) def _set_error_handler_callbacks(self, app): @app.errorhandler(NoAuthorizationError) def handle_auth_error(e): return self._unauthorized_callback(str(e)) @app.errorhandler(CSRFError) def handle_csrf_error(e): return self._unauthorized_callback(str(e)) @app.errorhandler(ExpiredSignatureError) def handle_expired_error(e): try: token = ctx_stack.top.expired_jwt return self._expired_token_callback(token) except TypeError: msg = ( "jwt.expired_token_loader callback now takes the expired token " "as an additional paramter. Example: expired_callback(token)" ) warn(msg, DeprecationWarning) return self._expired_token_callback() @app.errorhandler(InvalidHeaderError) def handle_invalid_header_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(DecodeError) def handle_invalid_header_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(InvalidTokenError) def handle_invalid_token_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(JWTDecodeError) def handle_jwt_decode_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(WrongTokenError) def handle_wrong_token_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(InvalidAudienceError) def handle_invalid_audience_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(InvalidIssuerError) def handle_invalid_issuer_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(RevokedTokenError) def handle_revoked_token_error(e): return self._revoked_token_callback() @app.errorhandler(FreshTokenRequired) def handle_fresh_token_required(e): return self._needs_fresh_token_callback() @app.errorhandler(UserLoadError) def handler_user_load_error(e): identity = get_jwt_identity() return self._user_loader_error_callback(identity) @app.errorhandler(UserClaimsVerificationError) def handle_failed_user_claims_verification(e): return self._verify_claims_failed_callback() @staticmethod def _set_default_configuration_options(app): app.config.setdefault('JWT_TOKEN_LOCATION', ('headers',)) app.config.setdefault('JWT_HEADER_NAME', 'Authorization') app.config.setdefault('JWT_HEADER_TYPE', 'Bearer') app.config.setdefault('JWT_QUERY_STRING_NAME', 'jwt') app.config.setdefault('JWT_ACCESS_COOKIE_NAME', 'access_token_cookie') app.config.setdefault('JWT_REFRESH_COOKIE_NAME', 'refresh_token_cookie') app.config.setdefault('JWT_ACCESS_COOKIE_PATH', '/') app.config.setdefault('JWT_REFRESH_COOKIE_PATH', '/') app.config.setdefault('JWT_COOKIE_SECURE', False) app.config.setdefault('JWT_COOKIE_DOMAIN', None) app.config.setdefault('JWT_SESSION_COOKIE', True) app.config.setdefault('JWT_COOKIE_SAMESITE', None) app.config.setdefault('JWT_JSON_KEY', 'access_token') app.config.setdefault('JWT_REFRESH_JSON_KEY', 'refresh_token') app.config.setdefault('JWT_COOKIE_CSRF_PROTECT', True) app.config.setdefault('JWT_CSRF_METHODS', ['POST', 'PUT', 'PATCH', 'DELETE']) app.config.setdefault('JWT_ACCESS_CSRF_HEADER_NAME', 'X-CSRF-TOKEN') app.config.setdefault('JWT_REFRESH_CSRF_HEADER_NAME', 'X-CSRF-TOKEN') app.config.setdefault('JWT_CSRF_IN_COOKIES', True) app.config.setdefault('JWT_ACCESS_CSRF_COOKIE_NAME', 'csrf_access_token') app.config.setdefault('JWT_REFRESH_CSRF_COOKIE_NAME', 'csrf_refresh_token') app.config.setdefault('JWT_ACCESS_CSRF_COOKIE_PATH', '/') app.config.setdefault('JWT_REFRESH_CSRF_COOKIE_PATH', '/') app.config.setdefault('JWT_CSRF_CHECK_FORM', False) app.config.setdefault('JWT_ACCESS_CSRF_FIELD_NAME', 'csrf_token') app.config.setdefault('JWT_REFRESH_CSRF_FIELD_NAME', 'csrf_token') app.config.setdefault('JWT_ACCESS_TOKEN_EXPIRES', datetime.timedelta(minutes=15)) app.config.setdefault('JWT_REFRESH_TOKEN_EXPIRES', datetime.timedelta(days=30)) app.config.setdefault('JWT_ALGORITHM', 'HS256') app.config.setdefault('JWT_DECODE_ALGORITHMS', None) app.config.setdefault('JWT_SECRET_KEY', None) app.config.setdefault('JWT_PRIVATE_KEY', None) app.config.setdefault('JWT_PUBLIC_KEY', None) app.config.setdefault('JWT_BLACKLIST_ENABLED', False) app.config.setdefault('JWT_BLACKLIST_TOKEN_CHECKS', ('access', 'refresh')) app.config.setdefault('JWT_IDENTITY_CLAIM', 'identity') app.config.setdefault('JWT_USER_CLAIMS', 'user_claims') app.config.setdefault('JWT_DECODE_AUDIENCE', None) app.config.setdefault('JWT_DECODE_ISSUER', None) app.config.setdefault('JWT_DECODE_LEEWAY', 0) app.config.setdefault('JWT_CLAIMS_IN_REFRESH_TOKEN', False) app.config.setdefault('JWT_ERROR_MESSAGE_KEY', 'msg') def user_claims_loader(self, callback): self._user_claims_callback = callback return callback def user_identity_loader(self, callback): self._user_identity_callback = callback return callback def expired_token_loader(self, callback): self._expired_token_callback = callback return callback def invalid_token_loader(self, callback): self._invalid_token_callback = callback return callback def unauthorized_loader(self, callback): self._unauthorized_callback = callback return callback def needs_fresh_token_loader(self, callback): self._needs_fresh_token_callback = callback return callback def revoked_token_loader(self, callback): self._revoked_token_callback = callback return callback def user_loader_callback_loader(self, callback): self._user_loader_callback = callback return callback
MIT License
dana-at-cp/cpauto
cpauto/objects/access.py
NATSection.show
python
def show(self, package='', name='', uid='', params={}): return self.__post('show-nat-section', package, name, uid, params)
Shows details of a NAT section within a package. https://sc1.checkpoint.com/documents/R80/APIs/#web/show-nat-section :param package: Package that the section belongs to identified by name. :param name: (optional) The name of an existing NAT section. :param uid: (optional) The unique identifier of an existing NAT section. :param params: (optional) A dictionary of additional, supported parameter names and values. :rtype: CoreClientResult
https://github.com/dana-at-cp/cpauto/blob/bade112950124677c906886d39f885164f167c59/cpauto/objects/access.py#L347-L358
from ._common import _CommonClient class AccessRule: def __init__(self, core_client): self.__cc = core_client self.__common_client = _CommonClient(core_client) def add(self, layer="", position="", params={}): return self.__common_client._add_with_layer('add-access-rule', layer, position, params) def show(self, layer='', name='', uid='', params={}): return self.__common_client._post_with_layer('show-access-rule', layer, name, uid, params) def set(self, layer='', name='', uid='', params={}): return self.__common_client._post_with_layer('set-access-rule', layer, name, uid, params) def delete(self, layer='', name='', uid='', params={}): return self.__common_client._post_with_layer('delete-access-rule', layer, name, uid, params) def show_all(self, name='', params={}): payload = { 'name': name } if params: payload = self.__cc.merge_payloads(payload, params) return self.__cc.http_post('show-access-rulebase', payload=payload) class AccessSection: def __init__(self, core_client): self.__cc = core_client self.__common_client = _CommonClient(core_client) def add(self, layer="", position="", params={}): return self.__common_client._add_with_layer('add-access-section', layer, position, params) def show(self, layer='', name='', uid='', params={}): return self.__common_client._post_with_layer('show-access-section', layer, name, uid, params) def set(self, layer='', name='', uid='', params={}): return self.__common_client._post_with_layer('set-access-section', layer, name, uid, params) def delete(self, layer='', name='', uid='', params={}): return self.__common_client._post_with_layer('delete-access-section', layer, name, uid, params) class AccessLayer: def __init__(self, core_client): self.__cc = core_client self.__common_client = _CommonClient(core_client) def add(self, name="", params={}): payload = { 'name': name } if params: payload = self.__cc.merge_payloads(payload, params) return self.__cc.http_post('add-access-layer', payload=payload) def show(self, name='', uid='', details_level=''): return self.__common_client._show('show-access-layer', name=name, uid=uid, details_level=details_level) def set(self, name='', uid='', params={}): return self.__common_client._set('set-access-layer', name=name, uid=uid, params=params) def delete(self, name='', uid='', params={}): return self.__common_client._delete('delete-access-layer', name=name, uid=uid, params=params) def show_all(self, limit=50, offset=0, order=[], details_level=''): return self.__common_client._show_all('show-access-layers', limit=limit, offset=offset, order=order, details_level=details_level) class NATRule: def __init__(self, core_client): self.__cc = core_client def __post(self, endpoint, package="", uid="", params={}): payload = { 'package': package } if uid: payload['uid'] = uid if params: payload = self.__cc.merge_payloads(payload, params) return self.__cc.http_post(endpoint, payload=payload) def add(self, package="", position="", params={}): payload = { 'package': package, 'position': position } if params: payload = self.__cc.merge_payloads(payload, params) return self.__cc.http_post('add-nat-rule', payload=payload) def show(self, package="", uid="", params={}): return self.__post('show-nat-rule', package, uid, params) def set(self, package="", uid="", params={}): return self.__post('set-nat-rule', package, uid, params) def delete(self, package="", uid="", params={}): return self.__post('delete-nat-rule', package, uid, params) def show_all(self, package="", params={}): payload = { 'package': package } if params: payload = self.__cc.merge_payloads(payload, params) return self.__cc.http_post('show-nat-rulebase', payload=payload) class NATSection: def __init__(self, core_client): self.__cc = core_client def __post(self, endpoint, package="", name="", uid="", params={}): payload = { 'package': package } if name: payload['name'] = name if uid: payload['uid'] = uid if params: payload = self.__cc.merge_payloads(payload, params) return self.__cc.http_post(endpoint, payload=payload) def add(self, package="", position="", params={}): payload = { 'package': package, 'position': position } if params: payload = self.__cc.merge_payloads(payload, params) return self.__cc.http_post('add-nat-section', payload=payload)
Apache License 2.0
eykamp/thingsboard_api_tools
thingsboard_api_tools/__init__.py
TbApi.delete_customer_by_id
python
def delete_customer_by_id(self, id): return self.delete(f"/api/customer/{id}", f"Error deleting customer '{id}'")
Returns True if successful, False if the customer wasn't found
https://github.com/eykamp/thingsboard_api_tools/blob/78d91111e648b4443c8355be6b0b1270ce764415/thingsboard_api_tools/__init__.py#L195-L199
import json import requests import time from http import HTTPStatus class TbApi: def __init__(self, url, username, password, token_timeout=600): self.mothership_url = url self.username = username self.password = password self.token_timeout = token_timeout self.token_time = 0 self.token = None self.verbose = False def get_token(self): if self.token is not None and time.time() - self.token_time < self.token_timeout: return self.token data = '{"username":"' + self.username + '", "password":"' + self.password + '"}' headers = {"Accept": "application/json", "Content-Type": "application/json"} url = self.mothership_url + "/api/auth/login" response = requests.post(url, data=data, headers=headers) self.validate_response(response, "Error requesting token") self.token = json.loads(response.text)["token"] self.token_time = time.time() return self.token def get_users(self): return self.get("/api/customers?limit=99999", "Error retrieving customers")["data"] def get_customer(self, name): customers = self.get(f"/api/customers?limit=99999&textSearch={name}", f"Can't find customer with name '{name}'") for customer in customers["data"]: if(customer["title"] == name): return customer return None def get_tenant_assets(self): return self.get("/api/tenant/assets?limit=99999", "Error retrieving assets for tenant")["data"] def get_tenant_devices(self): return self.get("/api/tenant/devices?limit=99999", "Error retrieving devices for tenant")["data"] def get_customer_devices(self, cust): cust_id = self.get_id(cust) return self.get(f"/api/customer/{cust_id}/devices?limit=99999", f"Error retrieving devices for customer '{cust_id}'")["data"] def get_public_user_id(self): return self.get_user_uuid("Public") def get_user_uuid(self, name): return self.get_id(self.get_customer(name)) def get_customer_by_id(self, cust_id): return self.get(f"/api/customer/{cust_id}", f"Could not retrieve customer with id '{cust_id}'") def get_customers_by_name(self, cust_name_prefix): return self.get(f"/api/customers?limit=99999&textSearch={cust_name_prefix}", f"Error retrieving customers with names starting with '{cust_name_prefix}'")["data"] def get_customer_by_name(self, cust_name): custs = self.get_customers_by_name(cust_name) for cust in custs: if cust["title"] == cust_name: return cust return None def update_customer(self, cust, name=None, address=None, address2=None, city=None, state=None, zip=None, country=None, email=None, phone=None, additional_info=None): if isinstance(cust, str): cust = self.get_customer_by_id(cust) if name is not None: cust["title"] = name if address is not None: cust["address"] = address if address2 is not None: cust["address2"] = address2 if city is not None: cust["city"] = city if state is not None: cust["state"] = state if zip is not None: cust["zip"] = zip if country is not None: cust["country"] = country if email is not None: cust["email"] = email if phone is not None: cust["phone"] = phone if additional_info is not None: cust["additionalInfo"] = additional_info return self.post("/api/customer", cust, "Error updating customer") def add_customer(self, name, address, address2, city, state, zip, country, email, phone, additional_info=None): data = { "title": name, "address": address, "address2": address2, "city": city, "state": state, "zip": zip, "country": country, "email": email, "phone": phone } if additional_info is not None: data["additionalInfo"] = additional_info return self.post("/api/customer", data, f"Error adding customer '{name}'")
MIT License
okpy/ok-client
client/api/assignment.py
Assignment.set_args
python
def set_args(self, **kwargs): self.cmd_args.update(**kwargs)
Set command-line arguments programmatically. For example: assignment.set_args( server='http://localhost:5000', no_browser=True, backup=True, timeout=60, )
https://github.com/okpy/ok-client/blob/3c5eca17100eed808023a815654cfe1c95179080/client/api/assignment.py#L286-L296
import uuid from datetime import timedelta import requests from client import exceptions as ex from client.sources.common import core from client.utils import auth, format, encryption from client.protocols.grading import grade from client.cli.common import messages import client import collections import glob import importlib import json import logging import os import textwrap from client.utils.printer import print_success, print_error, print_warning log = logging.getLogger(__name__) CONFIG_EXTENSION = '*.ok' def load_assignment(filepath=None, cmd_args=None): config = _get_config(filepath) if not isinstance(config, dict): raise ex.LoadingException('Config should be a dictionary') if cmd_args is None: cmd_args = Settings() return Assignment(cmd_args, **config) def _get_config(config): if config is None: configs = glob.glob(CONFIG_EXTENSION) if len(configs) > 1: raise ex.LoadingException('\n'.join([ 'Multiple .ok files found:', ' ' + ' '.join(configs), "Please specify a particular assignment's config file with", ' python3 ok --config <config file>' ])) elif not configs: raise ex.LoadingException('No .ok configuration file found') config = configs[0] elif not os.path.isfile(config): raise ex.LoadingException( 'Could not find config file: {}'.format(config)) try: with open(config, 'r') as f: result = json.load(f, object_pairs_hook=collections.OrderedDict) except IOError: raise ex.LoadingException('Error loading config: {}'.format(config)) except ValueError: raise ex.LoadingException( '{0} is a malformed .ok configuration file. ' 'Please re-download {0}.'.format(config)) else: log.info('Loaded config from {}'.format(config)) return result class Assignment(core.Serializable): name = core.String() endpoint = core.String(optional=True, default='') decryption_keypage = core.String(optional=True, default='') src = core.List(type=str, optional=True) tests = core.Dict(keys=str, values=str, ordered=True) default_tests = core.List(type=str, optional=True) protocols = core.List(type=str, optional=True) def grade(self, question, env=None, skip_locked_cases=False): if env is None: import __main__ env = __main__.__dict__ messages = {} tests = self._resolve_specified_tests([question], all_tests=False) for test in tests: try: for suite in test.suites: suite.skip_locked_cases = skip_locked_cases suite.console.skip_locked_cases = skip_locked_cases suite.console.hash_key = self.name except AttributeError: pass test_name = tests[0].name grade(tests, messages, env) return messages['grading'][test_name] def generate_encryption_key(self, keys_file): data = [(filename, encryption.generate_key()) for filename in self._get_files()] with open(keys_file, "w") as f: json.dump(data, f) def encrypt(self, keys_file, padding): with open(keys_file) as f: keys = dict(json.load(f)) for file in self._get_files(): if file in keys: self._encrypt_file(file, keys[file], padding) def decrypt(self, keys): decrypted_files, undecrypted_files = self.attempt_decryption(keys) if not undecrypted_files + decrypted_files: print_success("All files are decrypted") elif undecrypted_files: if keys: print_error("Unable to decrypt some files with the keys", ", ".join(keys)) else: print_error("No keys found, could not decrypt any files") print_error(" Non-decrypted files:", *undecrypted_files) def attempt_decryption(self, keys): if self.decryption_keypage: try: response = requests.get(self.decryption_keypage) response.raise_for_status() keys_data = response.content.decode('utf-8') keys = keys + encryption.get_keys(keys_data) except Exception as e: print_error( "Could not load decryption page {}: {}.".format(self.decryption_keypage, e)) print_error("You can pass in a key directly by running python3 ok --decrypt [KEY]") decrypted_files = [] undecrypted_files = [] for file in self._get_files(): with open(file) as f: if not encryption.is_encrypted(f.read()): continue for key in keys: success = self._decrypt_file(file, key) if success: decrypted_files.append(file) break else: undecrypted_files.append(file) return decrypted_files, undecrypted_files def _decrypt_file(self, path, key): success = False def decrypt(ciphertext): if not encryption.is_encrypted(ciphertext): return ciphertext try: plaintext = encryption.decrypt(ciphertext, key) nonlocal success success = True print_success("decrypted", path, "with", key) return plaintext except encryption.InvalidKeyException: return ciphertext self._in_place_edit(path, decrypt) return success def _encrypt_file(self, path, key, padding): def encrypt(data): if encryption.is_encrypted(data): try: data = encryption.decrypt(data, key) except encryption.InvalidKeyException: raise ValueError("Attempt to re-encrypt file with an invalid key") return encryption.encrypt(data, key, padding) self._in_place_edit(path, encrypt) @staticmethod def _in_place_edit(path, func): with open(path) as f: data = f.read() ciphertext = func(data) temporary_file = "." + uuid.uuid4().hex with open(temporary_file, "w") as f: f.write(ciphertext) os.replace(temporary_file, path) def _get_files(self): tests = [file for k, v in self.tests.items() for file in glob.glob(k) if v == 'ok_test' or v == 'scheme_test'] src = list(self.src) return sorted(set(tests + src)) @property def server_url(self): scheme = 'http' if self.cmd_args.insecure else 'https' return '{}://{}'.format(scheme, self.cmd_args.server) _TESTS_PACKAGE = 'client.sources' _PROTOCOL_PACKAGE = 'client.protocols' _PROTOCOLS = [ "testing", "file_contents", "grading", "analytics", "autostyle", "collaborate", "hinting", "lock", "scoring", "unlock", "trace", "backup", ] def __init__(self, args, **fields): self.cmd_args = args self.test_map = collections.OrderedDict() self.protocol_map = collections.OrderedDict() def post_instantiation(self): self._print_header() self._load_tests() self._load_protocols() self.specified_tests = self._resolve_specified_tests( self.cmd_args.question, self.cmd_args.all)
Apache License 2.0
nii-cloud/dodai-compute
nova/api/ec2/admin.py
AdminController.generate_x509_for_user
python
def generate_x509_for_user(self, context, name, project=None, **kwargs): if project is None: project = name project = manager.AuthManager().get_project(project) user = manager.AuthManager().get_user(name) msg = _("Getting x509 for user: %(name)s" " on project: %(project)s") % locals() LOG.audit(msg, context=context) return user_dict(user, base64.b64encode(project.get_credentials(user)))
Generates and returns an x509 certificate for a single user. Is usually called from a client that will wrap this with access and secret key info, and return a zip file.
https://github.com/nii-cloud/dodai-compute/blob/d9bea632913c0ddc6f59c6120f60daea369d09cc/nova/api/ec2/admin.py#L196-L208
import base64 import netaddr import urllib from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging from nova import utils from nova.api.ec2 import ec2utils from nova.auth import manager from nova.compute import vm_states FLAGS = flags.FLAGS LOG = logging.getLogger('nova.api.ec2.admin') def user_dict(user, base64_file=None): if user: return { 'username': user.id, 'accesskey': user.access, 'secretkey': user.secret, 'file': base64_file} else: return {} def project_dict(project): if project: return { 'projectname': project.id, 'project_manager_id': project.project_manager_id, 'description': project.description} else: return {} def host_dict(host, compute_service, instances, volume_service, volumes, now): rv = {'hostname': host, 'instance_count': len(instances), 'volume_count': len(volumes)} if compute_service: latest = compute_service['updated_at'] or compute_service['created_at'] delta = now - latest if delta.seconds <= FLAGS.service_down_time: rv['compute'] = 'up' else: rv['compute'] = 'down' if volume_service: latest = volume_service['updated_at'] or volume_service['created_at'] delta = now - latest if delta.seconds <= FLAGS.service_down_time: rv['volume'] = 'up' else: rv['volume'] = 'down' return rv def instance_dict(inst): return {'name': inst['name'], 'memory_mb': inst['memory_mb'], 'vcpus': inst['vcpus'], 'disk_gb': inst['local_gb'], 'flavor_id': inst['flavorid']} def vpn_dict(project, vpn_instance): rv = {'project_id': project.id, 'public_ip': project.vpn_ip, 'public_port': project.vpn_port} if vpn_instance: rv['instance_id'] = ec2utils.id_to_ec2_id(vpn_instance['id']) rv['created_at'] = utils.isotime(vpn_instance['created_at']) address = vpn_instance.get('fixed_ip', None) if address: rv['internal_ip'] = address['address'] if project.vpn_ip and project.vpn_port: if utils.vpn_ping(project.vpn_ip, project.vpn_port): rv['state'] = 'running' else: rv['state'] = 'down' else: rv['state'] = 'down - invalid project vpn config' else: rv['state'] = 'pending' return rv class AdminController(object): def __str__(self): return 'AdminController' def __init__(self): self.compute_api = compute.API() def describe_instance_types(self, context, **_kwargs): return {'instanceTypeSet': [instance_dict(v) for v in db.instance_type_get_all(context).values()]} def describe_user(self, _context, name, **_kwargs): return user_dict(manager.AuthManager().get_user(name)) def describe_users(self, _context, **_kwargs): return {'userSet': [user_dict(u) for u in manager.AuthManager().get_users()]} def register_user(self, context, name, **_kwargs): LOG.audit(_("Creating new user: %s"), name, context=context) return user_dict(manager.AuthManager().create_user(name)) def deregister_user(self, context, name, **_kwargs): LOG.audit(_("Deleting user: %s"), name, context=context) manager.AuthManager().delete_user(name) return True def describe_roles(self, context, project_roles=True, **kwargs): roles = manager.AuthManager().get_roles(project_roles) return {'roles': [{'role': r} for r in roles]} def describe_user_roles(self, context, user, project=None, **kwargs): roles = manager.AuthManager().get_user_roles(user, project=project) return {'roles': [{'role': r} for r in roles]} def modify_user_role(self, context, user, role, project=None, operation='add', **kwargs): if operation == 'add': if project: msg = _("Adding role %(role)s to user %(user)s" " for project %(project)s") % locals() LOG.audit(msg, context=context) else: msg = _("Adding sitewide role %(role)s to" " user %(user)s") % locals() LOG.audit(msg, context=context) manager.AuthManager().add_role(user, role, project) elif operation == 'remove': if project: msg = _("Removing role %(role)s from user %(user)s" " for project %(project)s") % locals() LOG.audit(msg, context=context) else: msg = _("Removing sitewide role %(role)s" " from user %(user)s") % locals() LOG.audit(msg, context=context) manager.AuthManager().remove_role(user, role, project) else: raise exception.ApiError(_('operation must be add or remove')) return True
Apache License 2.0
algorhythms/hackerrankalgorithms
Cut the sticks.py
Solution.solve
python
def solve(self, cipher): N, A = cipher A.sort() result = [] while A: result.append(len(A)) A = map(lambda x: x - A[0], A) A = filter(lambda x: x > 0, A) return "\n".join(map(str, result))
main solution function :param cipher: the cipher
https://github.com/algorhythms/hackerrankalgorithms/blob/439bf2e31fd395d19d40f79e969153e50e5358b5/Cut the sticks.py#L13-L27
__author__ = 'Danyang' class Solution(object):
Apache License 2.0
vemel/handsdown
examples/rst_docstrings.py
RSTExample.reference
python
def reference():
This is a reference for ``RST-style`` docstrings. Check :data:`source` code to see how it works. :param my_param: Parameter example :param int typed_param: Typed parameter example :returns str: Return statement :raises ValueError: Raises example Code example:: data = { 'key': 'value', } print(data)
https://github.com/vemel/handsdown/blob/68bd0ca6b3ed738db888f18c7a0ccbc62c2d0e42/examples/rst_docstrings.py#L13-L30
class RSTExample: @staticmethod
MIT License
spiderclub/weibospider
page_get/user.py
get_newcard_by_name
python
def get_newcard_by_name(user_name): user = UserOper.get_user_by_name(user_name) if user: is_crawled = 1 else: url = NEWCARD_URL.format(quote(user_name), int(round(time.time() * 1000))) page = get_page(url) if page.strip() == '': return None, 0 uid = person.get_uid_and_samefollow_by_new_card(page) if uid == -1: return None, 0 user, is_crawled = get_profile(uid) return user, is_crawled
Get user by user_name through newcard method.\n Although it requires login, it is less likely to get banned since it requests without s.weibo.com. Arguments: user_name {str} -- [user's name] Returns: str, int -- [databse user object, is_crawled]
https://github.com/spiderclub/weibospider/blob/e1f289871187da9e1c9096cd61984066c73625a8/page_get/user.py#L170-L194
import json import re import requests import time from urllib.parse import quote from db.models import User from logger import storage from .basic import get_page from page_parse import is_404 from config import get_samefollow_uid from db.dao import ( UserOper, SeedidsOper) from page_parse.user import ( enterprise, person, public) BASE_URL = 'http://weibo.com/p/{}{}/info?mod=pedit_more' NEWCARD_URL = 'https://www.weibo.com/aj/v6/user/newcard?ajwvr=6&name={}&type=1&callback=STK_{}39' SAMEFOLLOW_URL = 'https://weibo.com/p/100505{}/follow?relate=same_follow&amp;from=page_100505_profile&amp;wvr=6&amp;mod=bothfollow' def get_user_detail(user_id, html): user = person.get_detail(html, user_id) if user is not None: user.uid = user_id user.follows_num = person.get_friends(html) user.fans_num = person.get_fans(html) user.wb_num = person.get_status(html) return user def get_enterprise_detail(user_id, html): user = User(user_id) user.follows_num = enterprise.get_friends(html) user.fans_num = enterprise.get_fans(html) user.wb_num = enterprise.get_status(html) user.description = enterprise.get_description(html).encode('gbk', 'ignore').decode('gbk') return user def get_url_from_web(user_id): if not user_id: return None url = BASE_URL.format('100505', user_id) html = get_page(url, auth_level=1) if not is_404(html): domain = public.get_userdomain(html) if domain == '103505' or domain == '100306': url = BASE_URL.format(domain, user_id) html = get_page(url) user = get_user_detail(user_id, html) elif domain == '100505': user = get_user_detail(user_id, html) samefollow_uid = get_samefollow_uid() if samefollow_uid.strip() != '': samefollow_uid = samefollow_uid.split(',') url = SAMEFOLLOW_URL.format(user_id) isFanHtml = get_page(url, auth_level=2) person.get_isFan(isFanHtml, samefollow_uid, user_id) else: user = get_enterprise_detail(user_id, html) if user is None: return None user.name = public.get_username(html) user.head_img = public.get_headimg(html) user.verify_type = public.get_verifytype(html) user.verify_info = public.get_verifyreason(html, user.verify_type) user.level = public.get_level(html) if user.name: UserOper.add_one(user) storage.info('Has stored user {id} info successfully'.format(id=user_id)) return user else: return None else: return None def get_profile(user_id): user = UserOper.get_user_by_uid(user_id) if user: storage.info('user {id} has already crawled'.format(id=user_id)) SeedidsOper.set_seed_crawled(user_id, 1) is_crawled = 1 else: user = get_url_from_web(user_id) if user is not None: SeedidsOper.set_seed_crawled(user_id, 1) else: SeedidsOper.set_seed_crawled(user_id, 2) is_crawled = 0 return user, is_crawled def get_user_profile(user_id): user = UserOper.get_user_by_uid(user_id) if user: storage.info('user {id} has already crawled'.format(id=user_id)) else: user = get_url_from_web(user_id) return user def get_fans_or_followers_ids(user_id, crawl_type, verify_type): if crawl_type == 1 and verify_type == 1: fans_or_follows_url = 'http://weibo.com/p/100505{}/follow?relate=fans&page={}#Pl_Official_HisRelation__60' elif crawl_type == 2 and verify_type == 1: fans_or_follows_url = 'http://weibo.com/p/100505{}/follow?page={}#Pl_Official_HisRelation__60' elif crawl_type == 1 and verify_type == 2: fans_or_follows_url = 'http://weibo.com/p/100606{}/follow?relate=fans&page={}#Pl_Official_HisRelation__47' elif crawl_type == 2 and verify_type == 2: fans_or_follows_url = 'http://weibo.com/p/100606{}/follow?page={}#Pl_Official_HisRelation__47' cur_page = 1 max_page = 6 user_ids = list() while cur_page < max_page: url = fans_or_follows_url.format(user_id, cur_page) page = get_page(url) if cur_page == 1: urls_length = public.get_max_crawl_pages(page) if max_page > urls_length: max_page = urls_length + 1 user_ids.extend(public.get_fans_or_follows(page, user_id, crawl_type)) cur_page += 1 return user_ids
MIT License
peterdsharpe/aerosandbox
aerosandbox/library/propulsion_propeller.py
mass_hpa_propeller
python
def mass_hpa_propeller( diameter, max_power, include_variable_pitch_mechanism=False ): mass_propeller = ( 0.495 * (diameter / 1.25) ** 1.6 * np.softmax(0.6, max_power / 14914, hardness=5) ** 2 ) mass_variable_pitch_mech = 216.8 / 800 * mass_propeller if include_variable_pitch_mechanism: mass_propeller += mass_variable_pitch_mech return mass_propeller
Returns the estimated mass of a propeller assembly for low-disc-loading applications (human powered airplane, paramotor, etc.) :param diameter: diameter of the propeller [m] :param max_power: maximum power of the propeller [W] :param include_variable_pitch_mechanism: boolean, does this propeller have a variable pitch mechanism? :return: estimated weight [kg]
https://github.com/peterdsharpe/aerosandbox/blob/8fbf9449cba2f02e14424690ba2e34b438f21c69/aerosandbox/library/propulsion_propeller.py#L31-L57
import aerosandbox.numpy as np def propeller_shaft_power_from_thrust( thrust_force, area_propulsive, airspeed, rho, propeller_coefficient_of_performance=0.8, ): return 0.5 * thrust_force * airspeed * ( np.sqrt( thrust_force / (area_propulsive * airspeed ** 2 * rho / 2) + 1 ) + 1 ) / propeller_coefficient_of_performance
MIT License
napari/napari
napari/_vispy/experimental/vispy_tiled_image_layer.py
VispyTiledImageLayer._on_poll
python
def _on_poll(self, event=None) -> None: super()._on_poll() num_remaining = self._update_view() need_polling = num_remaining > 0 event.handled = need_polling
Called before we are drawn. This is called when the camera moves, or when we have chunks that need to be loaded. We update which tiles we are drawing based on which chunks are currently drawable.
https://github.com/napari/napari/blob/c4c987c880fe125da608edf427767eafe7f2b3f4/napari/_vispy/experimental/vispy_tiled_image_layer.py#L140-L160
from __future__ import annotations import logging from dataclasses import dataclass from typing import TYPE_CHECKING, List from vispy.scene.visuals import create_visual_node from ...utils.events import EmitterGroup from ...utils.perf import block_timer from ..layers.image import VispyImageLayer from .tile_grid import TileGrid from .tiled_image_visual import TiledImageVisual if TYPE_CHECKING: from ...layers.image.experimental import OctreeChunk from ...layers.image.image import Image TiledImageNode = create_visual_node(TiledImageVisual) LOGGER = logging.getLogger("napari.octree.visual") @dataclass class ChunkStats: drawable: int = 0 start: int = 0 remaining: int = 0 low: int = 0 final: int = 0 @property def deleted(self) -> int: return self.start - self.low @property def created(self) -> int: return self.final - self.low class VispyTiledImageLayer(VispyImageLayer): def __init__(self, layer: Image): visual = TiledImageNode( tile_shape=layer.tile_shape, image_converter=layer._raw_to_displayed, ) super().__init__(layer, visual) self.events = EmitterGroup(source=self, auto_connect=True, loaded=None) self.grid = TileGrid(self.node) self.layer.events.loaded.connect(self._on_loaded) @property def num_tiles(self) -> int: return self.node.num_tiles def set_data(self, node, data) -> None: raise NotImplementedError() def _update_tile_shape(self) -> None: tile_shape = self.layer.tile_shape if self.node.tile_shape != tile_shape: self.node.set_tile_shape(tile_shape)
BSD 3-Clause New or Revised License
phac-nml/sistr_cmd
sistr/src/parsers.py
fasta_format_check
python
def fasta_format_check(fasta_path, logger): header_count = 0 line_count = 1 nt_count = 0 with open(fasta_path) as f: for l in f: l = l.strip() if l == '': continue if l[0] == '>': header_count += 1 continue if header_count == 0 and l[0] != '>': error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' .format(line_count=line_count) logger.error(error_msg) raise Exception(error_msg) non_nucleotide_chars_in_line = set(l) - VALID_NUCLEOTIDES if len(non_nucleotide_chars_in_line) > 0: error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' .format(line=line_count, non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line])) logger.error(error_msg) raise Exception(error_msg) nt_count += len(l) line_count += 1 if nt_count == 0: error_msg = 'File "{}" does not contain any nucleotide sequence.'.format(fasta_path) logger.error(error_msg) raise Exception(error_msg) logger.info('Valid FASTA format "{}" ({} bp)'.format(fasta_path, nt_count))
Check that a file is valid FASTA format. - First non-blank line needs to begin with a '>' header character. - Sequence can only contain valid IUPAC nucleotide characters Args: fasta_str (str): FASTA file contents string Raises: Exception: If invalid FASTA format
https://github.com/phac-nml/sistr_cmd/blob/5df7afc6d2283c7476356dc20ecb3f5114eb7211/sistr/src/parsers.py#L64-L110
VALID_NUCLEOTIDES = {'A', 'a', 'C', 'c', 'G', 'g', 'T', 't', 'R', 'r', 'Y', 'y', 'S', 's', 'W', 'w', 'K', 'k', 'M', 'm', 'B', 'b', 'D', 'd', 'H', 'h', 'V', 'v', 'N', 'n', 'X', 'x', } def parse_fasta(filepath): with open(filepath, 'r') as f: seqs = [] header = '' for line in f: line = line.strip() if line == '': continue if line[0] == '>': if header == '': header = line.replace('>','') else: yield header, ''.join(seqs) seqs = [] header = line.replace('>','') else: seqs.append(line) yield header, ''.join(seqs)
Apache License 2.0
bio2bel/bio2bel
src/bio2bel/manager/abstract_manager.py
AbstractManager.is_populated
python
def is_populated(self) -> bool:
Check if the database is already populated.
https://github.com/bio2bel/bio2bel/blob/f2c015c23e9e1f4b996716ec48f61687c5e347fe/src/bio2bel/manager/abstract_manager.py#L216-L217
import logging import os import sys from abc import ABCMeta, abstractmethod from functools import wraps from typing import List, Mapping, Type import click from more_click import verbose_option from sqlalchemy.ext.declarative.api import DeclarativeMeta from .cli_manager import CliMixin from .connection_manager import ConnectionManager from ..utils import _get_managers, clear_cache, get_data_dir __all__ = [ 'AbstractManager', 'get_bio2bel_manager_classes', ] log = logging.getLogger(__name__) class AbstractManagerMeta(ABCMeta): def __new__(mcs, name, bases, namespace, **kwargs): cls = super().__new__(mcs, name, bases, namespace, **kwargs) cls._populate_original = cls.populate @wraps(cls._populate_original) def populate_wrapped(self, *populate_args, **populate_kwargs): try: cls._populate_original(self, *populate_args, **populate_kwargs) except Exception: self._store_populate_failed() raise else: self._store_populate() cls.populate = populate_wrapped return cls class AbstractManager(ConnectionManager, CliMixin, metaclass=AbstractManagerMeta): @property @abstractmethod def _base(self) -> DeclarativeMeta: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.create_all() @abstractmethod
MIT License
rebiocoder/bioforum
venv/Lib/site-packages/PIL/ImageFile.py
ImageFile.load
python
def load(self): pixel = Image.Image.load(self) if self.tile is None: raise IOError("cannot load this image") if not self.tile: return pixel self.map = None use_mmap = self.filename and len(self.tile) == 1 use_mmap = use_mmap and not hasattr(sys, 'pypy_version_info') readonly = 0 try: read = self.load_read use_mmap = False except AttributeError: read = self.fp.read try: seek = self.load_seek use_mmap = False except AttributeError: seek = self.fp.seek if use_mmap: decoder_name, extents, offset, args = self.tile[0] if decoder_name == "raw" and len(args) >= 3 and args[0] == self.mode and args[0] in Image._MAPMODES: try: if hasattr(Image.core, "map"): self.map = Image.core.map(self.filename) self.map.seek(offset) self.im = self.map.readimage( self.mode, self.size, args[1], args[2] ) else: import mmap with open(self.filename, "r") as fp: self.map = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ) self.im = Image.core.map_buffer( self.map, self.size, decoder_name, extents, offset, args ) readonly = 1 if self.palette: self.palette.dirty = 1 except (AttributeError, EnvironmentError, ImportError): self.map = None self.load_prepare() err_code = -3 if not self.map: self.tile.sort(key=_tilesort) try: prefix = self.tile_prefix except AttributeError: prefix = b"" for decoder_name, extents, offset, args in self.tile: decoder = Image._getdecoder(self.mode, decoder_name, args, self.decoderconfig) try: seek(offset) decoder.setimage(self.im, extents) if decoder.pulls_fd: decoder.setfd(self.fp) status, err_code = decoder.decode(b"") else: b = prefix while True: try: s = read(self.decodermaxblock) except (IndexError, struct.error): if LOAD_TRUNCATED_IMAGES: break else: raise IOError("image file is truncated") if not s: if LOAD_TRUNCATED_IMAGES: break else: self.tile = [] raise IOError("image file is truncated " "(%d bytes not processed)" % len(b)) b = b + s n, err_code = decoder.decode(b) if n < 0: break b = b[n:] finally: decoder.cleanup() self.tile = [] self.readonly = readonly self.load_end() if self._exclusive_fp and self._close_exclusive_fp_after_loading: self.fp.close() self.fp = None if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0: raise_ioerror(err_code) return Image.Image.load(self)
Load image data based on tile list
https://github.com/rebiocoder/bioforum/blob/08c8ff2f07ae667d37ce343f537e878d78ac8fe2/venv/Lib/site-packages/PIL/ImageFile.py#L135-L256
from . import Image from ._util import isPath import io import os import sys import struct MAXBLOCK = 65536 SAFEBLOCK = 1024*1024 LOAD_TRUNCATED_IMAGES = False ERRORS = { -1: "image buffer overrun error", -2: "decoding error", -3: "unknown error", -8: "bad configuration", -9: "out of memory error" } def raise_ioerror(error): try: message = Image.core.getcodecstatus(error) except AttributeError: message = ERRORS.get(error) if not message: message = "decoder error %d" % error raise IOError(message + " when reading image file") def _tilesort(t): return t[2] class ImageFile(Image.Image): def __init__(self, fp=None, filename=None): Image.Image.__init__(self) self._min_frame = 0 self.tile = None self.readonly = 1 self.decoderconfig = () self.decodermaxblock = MAXBLOCK if isPath(fp): self.fp = open(fp, "rb") self.filename = fp self._exclusive_fp = True else: self.fp = fp self.filename = filename self._exclusive_fp = None try: self._open() except (IndexError, TypeError, KeyError, EOFError, struct.error) as v: if self._exclusive_fp: self.fp.close() raise SyntaxError(v) if not self.mode or self.size[0] <= 0: raise SyntaxError("not identified by this driver") def draft(self, mode, size): pass def get_format_mimetype(self): if self.format is None: return return Image.MIME.get(self.format.upper()) def verify(self): if self._exclusive_fp: self.fp.close() self.fp = None
MIT License
vikifox/monster
elfinder/volumes/storage.py
ElfinderVolumeStorage._save
python
def _save(self, fp, dir_, name): tmp_file = tempfile.NamedTemporaryFile() tmp_file.write(fp.read()) fp.close() tmp_file.seek(0) path = self._join_path(dir_, name) self._options['storage'].save(path, DjangoFile(tmp_file)) tmp_file.close() return path
Create new file and write into it from file pointer. Return new file path or raise an ``Exception``.
https://github.com/vikifox/monster/blob/bac9b7da204c3eee344f55bb2187df38ef3b3d4c/elfinder/volumes/storage.py#L439-L456
import os, re, magic, time, tempfile, shutil, mimetypes try: from PIL import Image except ImportError: import Image from django.core.files.storage import FileSystemStorage from django.core.files.base import ContentFile from django.core.files import File as DjangoFile from importlib import import_module from elfinder.exceptions import NotAnImageError, ElfinderErrorMessages from base import ElfinderVolumeDriver class ElfinderVolumeStorage(ElfinderVolumeDriver): _driver_id = 's' def mount(self, opts): if "key_label" in opts['storageKwArgs'].keys(): self._key_label = opts['storageKwArgs']['key_label'] del opts['storageKwArgs']['key_label'] if not 'storage' in opts: if not 'storageClass' in opts: opts['storage'] = FileSystemStorage() else: if isinstance(opts['storageClass'], basestring): split = opts['storageClass'].split('.') storage_module = import_module('.'.join(split[:-1])) opts['storageClass'] = getattr(storage_module, split[-1]) if not 'storageKwArgs' in opts: opts['storageKwArgs'] = {} opts['storage'] = opts['storageClass'](**opts['storageKwArgs']) try: opts['storage'].listdir(self._root) opts['storage'].url(self._root) except NotImplementedError: raise Exception('Storage %s should implement both the listdir() and url() methods to be valid for use with yawd-elfinder.' % self._options['storage'].__class__) self._options['path'] = '.' if (not 'URL' in opts or not opts['URL']): self._options['URL'] = opts['storage'].url(self._root) if not 'alias' in opts or not opts['alias']: self._options['alias'] = opts['storage'].__class__.__name__ return super(ElfinderVolumeStorage, self).mount(opts) def _configure(self): if not self._isabs(self._options['tmbPath']): super(ElfinderVolumeStorage, self)._configure() if not self._options['tmbURL'] and self._options['URL']: self._options['tmbURL'] = self._options['URL'] + self._options['tmbPath'][len(self._root)+1:].replace(self._separator, '/') + '/' elif self._isabs(self._options['tmbPath']): raise Exception('tmbPath must be relative') try: self._options['storage'].delete(self.encode(str(time.time()))) except NotImplementedError: if not 'rm' in self._options['disabled']: self._options['disabled'].append('rm') except: pass if not 'rmDir' in self._options or not callable(self._options['rmDir']): if isinstance(self._options['storage'], FileSystemStorage): self._options['rmDir'] = self._rmdir_callable elif not 'rmdir' in self._options['disabled']: pass def _dirname(self, path): return self._separator.join(path.split(self._separator)[:-1]) def _basename(self, path): return path.split(self._separator)[-1] def _join_path(self, path1, path2): if self._separator == '\\' and re.match(r'([a-zA-Z]+:)?\\$', path2): return path2 elif path2.startswith(self._separator): return path2 if not path1.endswith(self._separator): return '%s%s%s' % (path1, self._separator, path2) else: return '%s%s' % (path1, path2) def _normpath(self, path): if path[-1] == self._separator: return path[:-1] return path def _get_available_name(self, dir_, name, ext, i): path = self._options['storage'].get_available_name(self._join_path(dir_, '%s%s' % (name, ext))) return self._basename(path) def _stat(self, path): stat = {} if not self._options['storage'].exists(path): raise os.error try: stat['mime'] = self.mimetype(path) try: stat['size'] = self._options['storage'].size(path) except NotImplementedError: stat['size'] = 0 except: stat['mime'] = 'directory' stat['size'] = 0 try: stat['ts'] = time.mktime(self._options['storage'].modified_time(path).timetuple()) except NotImplementedError: stat['ts'] = '' stat['read'] = True stat['write'] = True return stat def _subdirs(self, path): try: for entry in self._options['storage'].listdir(path)[0]: if not self._attr(self._join_path(path, entry), 'hidden'): return True except NotImplementedError: pass def _dimensions(self, path): try: im = self._openimage(path) return '%sx%s' % im.size except: raise NotAnImageError def _mimetype(self, path): file_name = str(path.split("/")[-1]).strip() if re.search(r'^\./proc/', path) or re.search(r'^\./sys/', path): if file_name in self._files: try: fp = self._fopen(path) mime = magic.Magic(mime=True).from_buffer(fp.read(10)) fp.close() return mime except: return "application/empty" if re.search(r'^\./dev/', path) and self._files[file_name] in 'l': return "application/empty" if file_name in self._files: if self._files[file_name] not in '-l': return "application/empty" fp = self._fopen(path) mime = magic.Magic(mime=True).from_buffer(fp.read(10)) fp.close() return mime def _scandir(self, path): try: all_ = self._options['storage'].listdir(path) return map(lambda x: self._join_path(path, x), all_[0]+all_[1]) except NotImplementedError: return [] def _fopen(self, path, mode='rb'): return self._options['storage'].open(path, mode) def _fclose(self, fp, **kwargs): return fp.close() def _openimage(self, path): fp = self._fopen(path) tmp_file = tempfile.TemporaryFile() tmp_file.write(fp.read()) fp.close() tmp_file.seek(0) im = Image.open(tmp_file) return im def _saveimage(self, im, path, form): tmp_file = tempfile.TemporaryFile() im.save(tmp_file, form) tmp_file.seek(0) fp = self._fopen(path, 'w+') fp.write(tmp_file.read()) tmp_file.close() fp.close() def _mkdir(self, path, mode=None): fname = '.%s-mkdir' % self.encode(path) self._mkfile(path, fname) self._unlink(self._join_path(path, fname)) return path def _mkfile(self, path, name): try: return self._options['storage'].save(self._join_path(path, name), ContentFile('')) except: raise os.error def _copy(self, source, target_dir, name): fp = self._fopen(source) tmp_file = tempfile.NamedTemporaryFile() tmp_file.write(fp.read()) fp.close() self._options['storage'].save(self._join_path(target_dir, name), DjangoFile(tmp_file)) tmp_file.close() def _move(self, source, target_dir, name): stat = self.stat(source) try: if stat['mime'] == 'directory': dest = self._join_path(target_dir, name) self._mkdir(dest) for p in self._get_cached_dir(source): self._move(p, dest, self._basename(p)) self._rmdir(source) else: self._copy(source, target_dir, name) self._unlink(source) except: raise os.error return self._join_path(target_dir, name) def _unlink(self, path): try: self._options['storage'].delete(path) return True except: return False def _rmdir(self, path): if 'rmDir' in self._options and callable(self._options['rmDir']): return self._options['rmDir'](path, self._options['storage']) raise os.error def _rmdir_callable(self, path, storage): return os.rmdir(self._join_path(storage.location, path))
Apache License 2.0
1password/ansible-onepasswordconnect-collection
plugins/module_utils/vault.py
update_item
python
def update_item(params, original_item, api_client, check_mode=False): try: vault_id = original_item["vault"]["id"] except KeyError: raise errors.MissingVaultID("Original item missing Vault ID") item_fields = fields.create( params.get("fields"), previous_fields=original_item.get("fields") ) updated_item = assemble_item( vault_id=vault_id, category=params["category"].upper(), title=params.get("name"), urls=params.get("urls"), favorite=params.get("favorite"), tags=params.get("tags"), fieldset=item_fields ) updated_item.update({ "id": original_item["id"], }) changed = recursive_diff(original_item, updated_item) if not bool(changed): original_item["fields"] = fields.flatten_fields(original_item["fields"]) return False, original_item if check_mode: updated_item["fields"] = fields.flatten_fieldset(updated_item.get("fields")) return changed, updated_item item = api_client.update_item(updated_item["vault"]["id"], item=updated_item) item["fields"] = fields.flatten_fieldset(item.get("fields")) return bool(changed), item
If Item with matching UUID or name exists, replaces all old Item properties. If Item not found, creates new Item. If the replacement Item is equal to the "original" item, no action is taken by Ansible. :param params: dict Values to replace the existing values. :param original_item The item returned by the server. Values may be copied from this item while updating. :param api_client: Connect API client :param check_mode: Whether Ansible is running in check mode. No changes saved if True. :return: (bool, dict) Where bool represents whether action modified an Item in 1Password.
https://github.com/1password/ansible-onepasswordconnect-collection/blob/f13f6ceabe6e05010373bbec4158fa72a0fb975e/plugins/module_utils/vault.py#L74-L121
from __future__ import (absolute_import, division, print_function) __metaclass__ = type from collections import namedtuple from uuid import uuid4 from ansible.module_utils.common.dict_transformations import recursive_diff from ansible_collections.onepassword.connect.plugins.module_utils import errors, fields, const Section = namedtuple("Section", ["id", "label"]) def find_item(params, api_client): vault_id = params.get("vault_id") if not vault_id: raise errors.MissingVaultID item_name = params.get("title") item_id = params.get("uuid") try: if item_id: return api_client.get_item_by_id(vault_id, item_id) else: return api_client.get_item_by_name(vault_id, item_name) except errors.NotFoundError: return None def create_item(params, api_client, check_mode=False): vault_id = params.get("vault_id") if not vault_id: raise errors.MissingVaultID item_fields = fields.create(params.get("fields")) op_item = assemble_item( vault_id=vault_id, category=params["category"].upper(), title=params.get("name"), urls=params.get("urls"), favorite=params.get("favorite"), fieldset=item_fields, tags=params.get("tags") ) if check_mode: op_item["fields"] = fields.flatten_fieldset(op_item.get("fields")) return True, op_item new_item = api_client.create_item(params["vault_id"], item=op_item) new_item["fields"] = fields.flatten_fieldset(new_item.get("fields")) return True, new_item
MIT License
weso/cwr-dataapi
cwr/transmission.py
Transmission.trailer
python
def trailer(self): return self._trailer
The transmission trailer. This is a TransmissionTrailer. :return: the transmission trailer
https://github.com/weso/cwr-dataapi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/transmission.py#L367-L373
from cwr.record import Record __author__ = 'Bernardo Martínez Garrido' __license__ = 'MIT' __status__ = 'Development' class TransmissionHeader(Record): def __init__(self, record_type='', sender_id=0, sender_name='', sender_type='', creation_date_time=None, transmission_date=None, edi_standard='01.10', character_set='' ): super(TransmissionHeader, self).__init__(record_type) self._sender_id = sender_id self._sender_name = sender_name self._sender_type = sender_type self._creation_date_time = creation_date_time self._transmission_date = transmission_date self._edi_standard = edi_standard self._character_set = character_set def __str__(self): return '%s (%s, %s) on %s' % ( self._sender_name, self._sender_id, self._sender_type, self._transmission_date) def __repr__(self): return '<class %s>(sender_id=%r, sender_name=%r, sender_type=%r, ' 'creation_date=%r, transmission_date=%r)' % ( 'TransmissionHeader', self._sender_id, self._sender_name, self._sender_type, self._creation_date_time, self._transmission_date) @property def character_set(self): return self._character_set @character_set.setter def character_set(self, value): self._character_set = value @property def creation_date_time(self): return self._creation_date_time @creation_date_time.setter def creation_date_time(self, value): self._creation_date_time = value @property def edi_standard(self): return self._edi_standard @edi_standard.setter def edi_standard(self, value): self._edi_standard = value @property def sender_id(self): return self._sender_id @sender_id.setter def sender_id(self, value): self._sender_id = value @property def sender_name(self): return self._sender_name @sender_name.setter def sender_name(self, value): self._sender_name = value @property def sender_type(self): return self._sender_type @sender_type.setter def sender_type(self, value): self._sender_type = value @property def transmission_date(self): return self._transmission_date @transmission_date.setter def transmission_date(self, value): self._transmission_date = value class TransmissionTrailer(Record): def __init__(self, record_type='', group_count=0, transaction_count=0, record_count=0 ): super(TransmissionTrailer, self).__init__( record_type ) self._group_count = group_count self._transaction_count = transaction_count self._record_count = record_count def __str__(self): return '%s groups, %s transactions, %s records' % ( self._group_count, self._transaction_count, self._record_count) def __repr__(self): return '<class %s>(group_count=%r, transaction_count=%r, ' 'record_count=%r)' % ( 'TransmissionTrailer', self._group_count, self._transaction_count, self._record_count) @property def group_count(self): return self._group_count @group_count.setter def group_count(self, value): self._group_count = value @property def record_count(self): return self._record_count @record_count.setter def record_count(self, value): self._record_count = value @property def transaction_count(self): return self._transaction_count @transaction_count.setter def transaction_count(self, value): self._transaction_count = value class Transmission(object): def __init__(self, header, trailer, groups=None ): self._header = header self._trailer = trailer if not groups: self._groups = [] else: self._groups = groups def __str__(self): return '%s to %s [%s]' % ( self._header, self._trailer, self._groups) def __repr__(self): return '<class %s>(hdr=%r, trl=%r, groups=%r)' % ( 'Transmission', self._header, self._trailer, self._groups) @property def groups(self): return self._groups @groups.setter def groups(self, value): self._groups = value @property def header(self): return self._header @header.setter def header(self, value): self._header = value @property
MIT License
mpi4jax/mpi4jax
mpi4jax/_src/utils.py
to_mpi_ptr
python
def to_mpi_ptr(mpi_obj): return _np.uintp(_MPI._addressof(mpi_obj))
Returns a pointer to the underlying C MPI object
https://github.com/mpi4jax/mpi4jax/blob/e3ed6f00a5552099f260c6b1f68588917461403b/mpi4jax/_src/utils.py#L35-L39
import functools from mpi4py import MPI as _MPI import numpy as _np from jax.interpreters import xla from jax.lib import xla_client def default_primitive_impl(primitive): return functools.partial(xla.apply_primitive, primitive) def xla_constant_intc(c, val): return xla_client.ops.Constant(c, _np.intc(val)) def xla_constant_uintptr(c, val): return xla_client.ops.Constant(c, _np.uintp(val)) def to_mpi_handle(mpi_obj): return _np.uintp(_MPI._handleof(mpi_obj))
MIT License
kubevirt/client-python
kubevirt/models/v1_virtual_machine_instance_preset_list.py
V1VirtualMachineInstancePresetList.kind
python
def kind(self, kind): self._kind = kind
Sets the kind of this V1VirtualMachineInstancePresetList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param kind: The kind of this V1VirtualMachineInstancePresetList. :type: str
https://github.com/kubevirt/client-python/blob/c9f9d3bc429f783076982b46b194d5f7669eab1b/kubevirt/models/v1_virtual_machine_instance_preset_list.py#L123-L132
from pprint import pformat from six import iteritems import re class V1VirtualMachineInstancePresetList(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'api_version': 'str', 'items': 'list[V1VirtualMachineInstancePreset]', 'kind': 'str', 'metadata': 'K8sIoApimachineryPkgApisMetaV1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None): self._api_version = None self._items = None self._kind = None self._metadata = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def items(self): return self._items @items.setter def items(self, items): if items is None: raise ValueError("Invalid value for `items`, must not be `None`") self._items = items @property def kind(self): return self._kind @kind.setter
Apache License 2.0
ckreibich/tracerouteparser.py
tracerouteparser.py
TracerouteParser.parse_data
python
def parse_data(self, data): self.parse_hdl(cStringIO.StringIO(data))
Parser entry point, given string of the whole traceroute output.
https://github.com/ckreibich/tracerouteparser.py/blob/cda86c241941e99cdb3e85f1134a6814952c71ba/tracerouteparser.py#L110-L112
import cStringIO import re class Probe(object): def __init__(self): self.ipaddr = None self.name = None self.rtt = None self.anno = None def clone(self): copy = Probe() copy.ipaddr = self.ipaddr copy.name = self.name return copy class Hop(object): def __init__(self): self.idx = None self.probes = [] def add_probe(self, probe): self.probes.append(probe) def __str__(self): res = [] last_probe = None for probe in self.probes: if probe.name is None: res.append('*') continue anno = '' if probe.anno is None else ' ' + probe.anno if last_probe is None or last_probe.name != probe.name: res.append('%s (%s) %1.3f ms%s' % (probe.name, probe.ipaddr, probe.rtt, anno)) else: res.append('%1.3f ms%s' % (probe.rtt, anno)) last_probe = probe return ' '.join(res) class TracerouteParser(object): HEADER_RE = re.compile(r'traceroute to (\S+) \((\d+\.\d+\.\d+\.\d+)\)') def __init__(self): self.dest_ip = None self.dest_name = None self.hops = [] def __str__(self): res = ['traceroute to %s (%s)' % (self.dest_name, self.dest_ip) ] ctr = 1 for hop in self.hops: res.append('%2d %s' % (ctr, str(hop))) ctr += 1 return '\n'.join(res)
BSD 2-Clause Simplified License
theislab/sfaira
sfaira/data/store/single_store.py
DistributedStoreSingleFeatureSpace.organism
python
def organism(self): organisms = np.unique(list(self.organisms_by_key.values())) assert len(organisms) == 1, organisms return organisms[0]
Organism of store.
https://github.com/theislab/sfaira/blob/9590015acbc4f84454d3d75ff03b191c2472a219/sfaira/data/store/single_store.py#L102-L108
import abc import anndata import dask.array import dask.dataframe import numpy as np import os import pandas as pd import pickle import scipy.sparse from typing import Dict, List, Tuple, Union from sfaira.consts import AdataIdsSfaira, OCS from sfaira.data.dataloaders.base.utils import is_child, UNS_STRING_META_IN_OBS from sfaira.data.store.base import DistributedStoreBase from sfaira.data.store.generators import GeneratorAnndata, GeneratorDask, GeneratorSingle from sfaira.versions.genomes.genomes import GenomeContainer def _process_batch_size(batch_size: int, retrival_batch_size: int) -> Tuple[int, int]: if batch_size != 1: raise ValueError("batch size is only supported as 1") return batch_size, retrival_batch_size class DistributedStoreSingleFeatureSpace(DistributedStoreBase): _adata_by_key: Dict[str, anndata.AnnData] _indices: Dict[str, np.ndarray] _obs_by_key: Union[None, Dict[str, dask.dataframe.DataFrame]] data_source: str def __init__(self, adata_by_key: Dict[str, anndata.AnnData], indices: Dict[str, np.ndarray], obs_by_key: Union[None, Dict[str, dask.dataframe.DataFrame]] = None, data_source: str = "X"): self.adata_by_key = adata_by_key self.indices = indices self.obs_by_key = obs_by_key self.ontology_container = OCS self._genome_container = None self._adata_ids_sfaira = AdataIdsSfaira() self.data_source = data_source self._celltype_universe = None @property def idx(self) -> np.ndarray: idx_global = np.arange(0, np.sum([len(v) for v in self.indices.values()])) return idx_global @property def organisms_by_key(self) -> Dict[str, str]: ks = self.indices.keys() organisms = [self._adata_by_key[k].uns[self._adata_ids_sfaira.organism] for k in ks] organisms = [x[0] if (isinstance(x, list) or isinstance(x, tuple)) else x for x in organisms] return dict(list(zip(ks, organisms))) @property
BSD 3-Clause New or Revised License
google-research/morph-net
morph_net/framework/op_regularizer_manager.py
OpRegularizerManager._slice_op_slice
python
def _slice_op_slice(self, op_slice, sizes, size_index, size_count, new_op_slice_group): op = op_slice.op op_slices = self.get_op_slices(op) op_slice_sizes = op_handler_util.get_op_slice_sizes([op_slices])[0] op_slice_index = op_slices.index(op_slice) if op_slice in self._op_group_dict: del self._op_group_dict[op_slice] op_slice_sizes.pop(op_slice_index) is_resliced = [False] * len(op_slice_sizes) for i in range(size_count): op_slice_sizes.insert(op_slice_index + i, sizes[size_index + i]) is_resliced.insert(op_slice_index + i, True) is_source = self._get_source_slices(op_slice_sizes, op_slices) slices = self._slice_op_with_sizes(op, op_slice_sizes, is_source, is_resliced) for i in range(size_count): new_op_slice_group[i].append(slices[op_slice_index + i])
Slices an OpSlice according to new sizes. During reslicing, any OpSlice of an op could be resliced. Given the new sizes, this method finds the index where the old OpSlice matches, and reslices the OpSlice according to the new sizes. The new OpSlice are added to new_op_slice_group by index, so that matching OpSlice can be grouped together later. Args: op_slice: OpSlice that should be sliced. sizes: List of integers specifying the new slice sizes. size_index: Integer specifying which index in sizes corresponds to op_slice. size_count: Integer specifying how many slices op_slice will be sliced into. new_op_slice_group: List of list of new OpSlice that should be grouped together.
https://github.com/google-research/morph-net/blob/49c5679e03c79e56ac013c7b62a88e5d893b9d14/morph_net/framework/op_regularizer_manager.py#L467-L515
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from morph_net.framework import concat_and_slice_regularizers from morph_net.framework import constant_op_regularizer from morph_net.framework import grouping_regularizers from morph_net.framework import op_handler_util import tensorflow.compat.v1 as tf from typing import List ITERATION_LIMIT = 1000000 class OpSlice(collections.namedtuple('OpSlice', ['op', 'slice'])): def __str__(self): return '{} {}'.format(self.op.name, self.slice) __repr__ = __str__ class Slice(collections.namedtuple('Slice', ['start_index', 'size'])): def __str__(self): return '({}, {})'.format(self.start_index, self.size) __repr__ = __str__ class OpRegularizerManager(object): def __init__( self, output_boundary: List[tf.Operation], op_handler_dict=None, create_grouping_regularizer=grouping_regularizers.MaxGroupingRegularizer, force_group=None, regularizer_blacklist=None, input_boundary: List[tf.Operation] = None, iteration_limit=ITERATION_LIMIT): self._op_slice_dict = {} self._op_group_dict = {} self._op_handler_dict = op_handler_dict or {} self._op_regularizer_dict = {} self._op_deque = collections.deque() self._all_ops = set() tf.logging.info('OpRegularizerManager starting analysis from: %s.', output_boundary) self._dfs_for_source_ops(output_boundary, input_boundary) tf.logging.info('OpRegularizerManager found %d ops and %d sources.', len(self._all_ops), len(self._op_deque)) iteration_count = 0 while self._op_deque and iteration_count < iteration_limit: op = self._op_deque.pop() self._op_handler_dict[op.type].assign_grouping(op, self) iteration_count += 1 if iteration_count >= iteration_limit: raise RuntimeError('OpRegularizerManager could not handle ops: %s' % ['%s (%s)' % (o.name, o.type) for o in self._op_deque]) force_group = force_group or [] if not isinstance(force_group, list): raise TypeError('force_group must be a list of regex.') self._force_group_ops(force_group) blacklist_regex = '' if regularizer_blacklist: if not isinstance(regularizer_blacklist, list): raise TypeError('regularizer_blacklist must be a list of regex.') blacklist_regex = '|'.join(regularizer_blacklist) groups = set(self._op_group_dict.values()) blacklist_used = False for group in groups: source_op_slices = [] regularizers = [] if op_handler_util.group_match(blacklist_regex, group.op_slices): tf.logging.info('OpGroup not regularized due to blacklist: %s.', group.op_slices) blacklist_used = True else: for source_op_slice in group.source_op_slices: handler = self._op_handler_dict[source_op_slice.op.type] source_op_slices.append(source_op_slice) regularizers.append(handler.create_regularizer(source_op_slice)) if regularizers: if len(regularizers) > 1: group_regularizer = create_grouping_regularizer(regularizers) else: group_regularizer = regularizers[0] else: group_regularizer = None for op_slice in group.op_slices: self._op_regularizer_dict[op_slice] = group_regularizer tf.logging.info('Source OpSlice %s for OpGroup: %s.', source_op_slices, group.op_slices) if blacklist_regex and not blacklist_used: raise ValueError('Blacklist regex never used: \'%s\'.' % blacklist_regex) tf.logging.info('OpRegularizerManager regularizing %d groups.', len(set(self._op_group_dict.values()))) self._all_ops = set(self._op_slice_dict.keys()) @property def ops(self): return self._all_ops def get_regularizer(self, op): op_slices = self.get_op_slices(op) regularizers = [ self._op_regularizer_dict.get(op_slice) for op_slice in op_slices ] if not any(regularizers): return None regularizers = [] for op_slice in op_slices: regularizer = self._op_regularizer_dict.get(op_slice) if regularizer is None: regularizer = constant_op_regularizer.ConstantOpRegularizer( op_slice.slice.size) self._op_regularizer_dict[op_slice] = regularizer regularizers.append(regularizer) if len(regularizers) == 1: return regularizers[0] else: return concat_and_slice_regularizers.ConcatRegularizer(regularizers) def create_op_group_for_op_slice(self, op_slice, is_source=True): omit_source_op_slices = [] if is_source else [op_slice] op_group = OpGroup(op_slice, omit_source_op_slices=omit_source_op_slices) self._op_group_dict[op_slice] = op_group return self.get_op_group(op_slice) def group_op_slices(self, op_slices, omit_source_op_slices=None): existing_op_groups = [] for op_slice in op_slices: op_group = self.get_op_group(op_slice) if op_group and op_group not in existing_op_groups: existing_op_groups.append(op_group) op_slices_to_update = [ os for og in existing_op_groups for os in og.op_slices ] for op_slice in op_slices: if op_slice not in op_slices_to_update: temp_op_group = self.create_op_group_for_op_slice( op_slice, is_source=self.is_source_op(op_slice.op)) existing_op_groups.append(temp_op_group) op_slices_to_update.append(op_slice) new_op_group = OpGroup( op_groups=existing_op_groups, omit_source_op_slices=omit_source_op_slices) for op_slice in op_slices_to_update: self._op_group_dict[op_slice] = new_op_group def slice_op(self, op, sizes): old_op_slices = self.get_op_slices(op) old_op_slice_sizes = op_handler_util.get_op_slice_sizes([old_op_slices])[0] if old_op_slice_sizes == sizes: return try: aligned_op_slice_sizes = op_handler_util.get_aligned_sizes( [old_op_slice_sizes, sizes]) except ValueError as e: raise ValueError('Error with op: %s: %s' % (op.name, e.args[0])) if sizes != aligned_op_slice_sizes: raise ValueError('Cannot slice op %s from sizes %s to %s' % (op.name, old_op_slice_sizes, sizes)) old_slice_index = 0 new_slice_index = 0 new_slice_count = 1 while (new_slice_index + new_slice_count <= len(aligned_op_slice_sizes) and old_slice_index < len(old_op_slice_sizes)): old_size = old_op_slice_sizes[old_slice_index] new_size = op_handler_util.get_total_slice_size(sizes, new_slice_index, new_slice_count) if old_size == new_size: if new_slice_count > 1: op_group = self.get_op_group(old_op_slices[old_slice_index]) if op_group: group_op_slices = op_group.op_slices else: group_op_slices = [old_op_slices[old_slice_index]] new_op_slice_group = [list() for _ in range(new_slice_count)] for group_op_slice in group_op_slices: self._slice_op_slice(group_op_slice, sizes, new_slice_index, new_slice_count, new_op_slice_group) if op_group: for i in range(new_slice_count): self.group_op_slices(new_op_slice_group[i]) old_slice_index += 1 new_slice_index += new_slice_count new_slice_count = 1 else: new_slice_count += 1 def process_ops(self, ops): new_ops = [ op for op in ops if op not in self._op_deque and op in self._all_ops ] self._op_deque.extend(new_ops) def process_ops_last(self, ops): new_ops = [op for op in ops if op not in self._op_deque] self._op_deque.extendleft(new_ops) def is_source_op(self, op): op_handler = self._op_handler_dict[op.type] return op_handler.is_source_op def is_passthrough(self, op): op_handler = self._op_handler_dict[op.type] return op_handler.is_passthrough def get_op_slices(self, op): if op not in self._op_slice_dict: size = op_handler_util.get_op_size(op) if size > 0: new_op_slice = OpSlice(op, Slice(0, size)) self._op_slice_dict[op] = [new_op_slice] else: self._op_slice_dict[op] = [] return self._op_slice_dict[op] def get_op_group(self, op_slice): return self._op_group_dict.get(op_slice)
Apache License 2.0
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1beta1_allowed_host_path.py
V1beta1AllowedHostPath.read_only
python
def read_only(self): return self._read_only
Gets the read_only of this V1beta1AllowedHostPath. # noqa: E501 when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. # noqa: E501 :return: The read_only of this V1beta1AllowedHostPath. # noqa: E501 :rtype: bool
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1beta1_allowed_host_path.py#L84-L92
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1beta1AllowedHostPath(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'path_prefix': 'str', 'read_only': 'bool' } attribute_map = { 'path_prefix': 'pathPrefix', 'read_only': 'readOnly' } def __init__(self, path_prefix=None, read_only=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._path_prefix = None self._read_only = None self.discriminator = None if path_prefix is not None: self.path_prefix = path_prefix if read_only is not None: self.read_only = read_only @property def path_prefix(self): return self._path_prefix @path_prefix.setter def path_prefix(self, path_prefix): self._path_prefix = path_prefix @property
Apache License 2.0
openstack/manila
manila/share/drivers/nexenta/ns4/nexenta_nas.py
NexentaNasDriver.delete_snapshot
python
def delete_snapshot(self, context, snapshot, share_server=None): LOG.debug('Deleting snapshot %(shr_name)s@%(snap_name)s.', { 'shr_name': snapshot['share_name'], 'snap_name': snapshot['name']}) self.helper.delete_snapshot(snapshot['share_name'], snapshot['name'])
Delete a snapshot.
https://github.com/openstack/manila/blob/34d209484366cd921e052d37c5f9daef5e97af20/manila/share/drivers/nexenta/ns4/nexenta_nas.py#L103-L108
from oslo_log import log from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.nexenta.ns4 import nexenta_nfs_helper from manila.share.drivers.nexenta import options VERSION = '1.0' LOG = log.getLogger(__name__) class NexentaNasDriver(driver.ShareDriver): def __init__(self, *args, **kwargs): LOG.debug('Initializing Nexenta driver.') super(NexentaNasDriver, self).__init__(False, *args, **kwargs) self.configuration = kwargs.get('configuration') if self.configuration: self.configuration.append_config_values( options.nexenta_connection_opts) self.configuration.append_config_values( options.nexenta_nfs_opts) self.configuration.append_config_values( options.nexenta_dataset_opts) self.helper = nexenta_nfs_helper.NFSHelper(self.configuration) else: raise exception.BadConfigurationException( reason=_('Nexenta configuration missing.')) @property def share_backend_name(self): if not hasattr(self, '_share_backend_name'): self._share_backend_name = None if self.configuration: self._share_backend_name = self.configuration.safe_get( 'share_backend_name') if not self._share_backend_name: self._share_backend_name = 'NexentaStor4' return self._share_backend_name def do_setup(self, context): LOG.debug('Setting up the NexentaStor4 plugin.') return self.helper.do_setup() def check_for_setup_error(self): self.helper.check_for_setup_error() def create_share(self, context, share, share_server=None): LOG.debug('Creating share %s.', share['name']) return self.helper.create_filesystem(share) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): LOG.debug('Creating share from snapshot %s.', snapshot['name']) return self.helper.create_share_from_snapshot(share, snapshot) def delete_share(self, context, share, share_server=None): LOG.debug('Deleting share %s.', share['name']) self.helper.delete_share(share['name']) def extend_share(self, share, new_size, share_server=None): LOG.debug('Extending share %(name)s to %(size)sG.', { 'name': share['name'], 'size': new_size}) self.helper.set_quota(share['name'], new_size) def create_snapshot(self, context, snapshot, share_server=None): LOG.debug('Creating a snapshot of share %s.', snapshot['share_name']) snap_id = self.helper.create_snapshot( snapshot['share_name'], snapshot['name']) LOG.info('Created snapshot %s.', snap_id)
Apache License 2.0
iagcl/watchmen
elasticsearch/roll_indexes/packages/elasticsearch/client/cat.py
CatClient.thread_pool
python
def thread_pool(self, thread_pool_patterns=None, params=None): return self.transport.perform_request('GET', _make_path('_cat', 'thread_pool', thread_pool_patterns), params=params)
Get information about thread pools. `<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html>`_ :arg thread_pool_patterns: A comma-separated list of regular-expressions to filter the thread pools in the output :arg format: a short version of the Accept header, e.g. json, yaml :arg h: Comma-separated list of column names to display :arg help: Return help information, default False :arg local: Return local information, do not retrieve the state from master node (default: false) :arg master_timeout: Explicit operation timeout for connection to master node :arg s: Comma-separated list of column names or column aliases to sort by :arg size: The multiplier in which to display values, valid choices are: '', 'k', 'm', 'g', 't', 'p' :arg v: Verbose mode. Display column headers, default False
https://github.com/iagcl/watchmen/blob/d329b357e6fde3ad91e972988b160a33c12afc2a/elasticsearch/roll_indexes/packages/elasticsearch/client/cat.py#L268-L289
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH class CatClient(NamespacedClient): @query_params('format', 'h', 'help', 'local', 'master_timeout', 's', 'v') def aliases(self, name=None, params=None): return self.transport.perform_request('GET', _make_path('_cat', 'aliases', name), params=params) @query_params('bytes', 'format', 'h', 'help', 'local', 'master_timeout', 's', 'v') def allocation(self, node_id=None, params=None): return self.transport.perform_request('GET', _make_path('_cat', 'allocation', node_id), params=params) @query_params('format', 'h', 'help', 'local', 'master_timeout', 's', 'v') def count(self, index=None, params=None): return self.transport.perform_request('GET', _make_path('_cat', 'count', index), params=params) @query_params('format', 'h', 'help', 'local', 'master_timeout', 's', 'ts', 'v') def health(self, params=None): return self.transport.perform_request('GET', '/_cat/health', params=params) @query_params('help', 's') def help(self, params=None): return self.transport.perform_request('GET', '/_cat', params=params) @query_params('bytes', 'format', 'h', 'health', 'help', 'local', 'master_timeout', 'pri', 's', 'v') def indices(self, index=None, params=None): return self.transport.perform_request('GET', _make_path('_cat', 'indices', index), params=params) @query_params('format', 'h', 'help', 'local', 'master_timeout', 's', 'v') def master(self, params=None): return self.transport.perform_request('GET', '/_cat/master', params=params) @query_params('format', 'full_id', 'h', 'help', 'local', 'master_timeout', 's', 'v') def nodes(self, params=None): return self.transport.perform_request('GET', '/_cat/nodes', params=params) @query_params('bytes', 'format', 'h', 'help', 'master_timeout', 's', 'v') def recovery(self, index=None, params=None): return self.transport.perform_request('GET', _make_path('_cat', 'recovery', index), params=params) @query_params('format', 'h', 'help', 'local', 'master_timeout', 's', 'v') def shards(self, index=None, params=None): return self.transport.perform_request('GET', _make_path('_cat', 'shards', index), params=params) @query_params('format', 'h', 'help', 's', 'v') def segments(self, index=None, params=None): return self.transport.perform_request('GET', _make_path('_cat', 'segments', index), params=params) @query_params('format', 'h', 'help', 'local', 'master_timeout', 's', 'v') def pending_tasks(self, params=None): return self.transport.perform_request('GET', '/_cat/pending_tasks', params=params) @query_params('format', 'h', 'help', 'local', 'master_timeout', 's', 'size', 'v')
Apache License 2.0
tbone-framework/tbone
tbone/testing/resources.py
DummyResource.request_body
python
async def request_body(self): if isinstance(self.request.body, dict): return json.dumps(self.request.body) return self.request.body
Returns the body of the current request. The resource expects a text-formatted body
https://github.com/tbone-framework/tbone/blob/5a6672d8bbac449a0ab9e99560609f671fe84d4d/tbone/testing/resources.py#L22-L29
import json from . import * class DummyResource(object): @classmethod def build_http_response(cls, payload, status=200): return Response( payload=payload, headers={'Content-Type': 'application/json'}, status=status )
MIT License
rwl/muntjac
muntjac/terminal/composite_error_message.py
CompositeErrorMessage.getErrorLevel
python
def getErrorLevel(self): return self._level
The error level is the largest error level in. @see: L{muntjac.terminal.IErrorMessage.getErrorLevel}
https://github.com/rwl/muntjac/blob/8db97712edd81b4d25deaaa48587d2a08010f2c8/muntjac/terminal/composite_error_message.py#L42-L47
import sys from muntjac.terminal.error_message import IErrorMessage from muntjac.terminal.paintable import IRepaintRequestListener class CompositeErrorMessage(IErrorMessage): def __init__(self, errorMessages): self._errors = None self._level = None self._errors = list() self._level = -sys.maxint - 1 for m in errorMessages: self.addErrorMessage(m) if len(self._errors) == 0: raise ValueError, 'Composite error message must have at least one error'
Apache License 2.0
googlecloudplatform/perfkitbenchmarker
perfkitbenchmarker/linux_benchmarks/mesh_network_benchmark.py
RunNetperf
python
def RunNetperf(vm, benchmark_name, servers, result): cmd = '' if FLAGS.duration_in_seconds: cmd_duration_suffix = '-l %s' % FLAGS.duration_in_seconds else: cmd_duration_suffix = '' for server in servers: if vm != server: cmd += ('./netperf -t ' '{benchmark_name} -H {server_ip} -i {iterations} ' '{cmd_suffix} & ').format( benchmark_name=benchmark_name, server_ip=server.internal_ip, iterations=FLAGS.num_iterations, cmd_suffix=cmd_duration_suffix) netperf_cmd = '' for _ in range(FLAGS.num_connections): netperf_cmd += cmd netperf_cmd += 'wait' output, _ = vm.RemoteCommand(netperf_cmd) logging.info(output) match = re.findall(r'(\d+\.\d+)\s+\n', output) value = 0 expected_num_match = (len(servers) - 1) * FLAGS.num_connections if len(match) != expected_num_match: raise errors.Benchmarks.RunError( 'Netserver not reachable. Expecting %s results, got %s.' % (expected_num_match, len(match))) for res in match: if benchmark_name == 'TCP_RR': value += 1.0 / float(res) * 1000.0 else: value += float(res) with RESULT_LOCK: result[VALUE_INDEX] += value
Spawns netperf on a remote VM, parses results. Args: vm: The VM running netperf. benchmark_name: The netperf benchmark to run. servers: VMs running netserver. result: The result variable shared by all threads.
https://github.com/googlecloudplatform/perfkitbenchmarker/blob/c14a122016d414351d41167029c79c9a19709384/perfkitbenchmarker/linux_benchmarks/mesh_network_benchmark.py#L93-L136
import logging import re import threading from absl import flags from perfkitbenchmarker import configs from perfkitbenchmarker import errors from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import netperf from six.moves import range flags.DEFINE_integer('num_connections', 1, 'Number of connections between each pair of vms.') flags.DEFINE_integer('num_iterations', 1, 'Number of iterations for each run.') FLAGS = flags.FLAGS BENCHMARK_NAME = 'mesh_network' BENCHMARK_CONFIG = """ mesh_network: description: > Measures VM to VM cross section bandwidth in a mesh network. Specify the number of VMs in the network with --num_vms. vm_groups: default: vm_spec: *default_single_core """ NETPERF_BENCHMARKSS = ['TCP_RR', 'TCP_STREAM'] VALUE_INDEX = 1 RESULT_LOCK = threading.Lock() def GetConfig(user_config): config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) config['vm_groups']['default']['vm_count'] = FLAGS.num_vms if FLAGS.num_vms < 2: config['vm_groups']['default']['vm_count'] = 2 return config def PrepareVM(vm): vm.RemoteCommand('./netserver') def Prepare(benchmark_spec): vms = benchmark_spec.vms logging.info('Preparing netperf on %s', vms[0]) vms[0].Install('netperf') for vm in vms: vms[0].MoveFile(vm, netperf.NETPERF_PATH) vms[0].MoveFile(vm, netperf.NETSERVER_PATH) vm_util.RunThreaded(PrepareVM, vms, len(vms))
Apache License 2.0
hirofumi0810/neural_sp
neural_sp/models/seq2seq/encoders/subsampling.py
AddSubsampler.forward
python
def forward(self, xs, xlens, batch_first=True): if self.factor == 1: return xs, xlens if batch_first: bs, xmax, idim = xs.size() xs_even = xs[:, ::self.factor] if xmax % 2 == 0: xs_odd = xs[:, 1::self.factor] else: xs_odd = torch.cat([xs, xs.new_zeros(bs, 1, idim)], dim=1)[:, 1::self.factor] else: xmax, bs, idim = xs.size() xs_even = xs[::self.factor] if xmax % 2 == 0: xs_odd = xs[1::self.factor] else: xs_odd = torch.cat([xs, xs.new_zeros(1, bs, idim)], dim=0)[1::self.factor] xs = xs_odd + xs_even xlens = [max(1, math.ceil(i.item() / self.factor)) for i in xlens] xlens = torch.IntTensor(xlens) return xs, xlens
Forward pass. Args: xs (FloatTensor): `[B, T, F]` or `[T, B, F]` xlens (IntTensor): `[B]` (on CPU) batch_first (bool): operate batch-first tensor Returns: xs (FloatTensor): `[B, T', F']` or `[T', B, F']` xlens (IntTensor): `[B]` (on CPU)
https://github.com/hirofumi0810/neural_sp/blob/b91877c6d2a11f06026480ab422176274d88cbf2/neural_sp/models/seq2seq/encoders/subsampling.py#L138-L172
import math import torch import torch.nn as nn from neural_sp.models.seq2seq.encoders.conv import update_lens_1d class ConcatSubsampler(nn.Module): def __init__(self, subsampling_factor, n_units): super(ConcatSubsampler, self).__init__() self.factor = subsampling_factor if subsampling_factor > 1: self.proj = nn.Linear(n_units * subsampling_factor, n_units) def forward(self, xs, xlens, batch_first=True): if self.factor == 1: return xs, xlens if batch_first: xs = xs.transpose(1, 0).contiguous() xs = [torch.cat([xs[t - r:t - r + 1] for r in range(self.factor - 1, -1, -1)], dim=-1) for t in range(xs.size(0)) if (t + 1) % self.factor == 0] xs = torch.cat(xs, dim=0) xs = torch.relu(self.proj(xs)) if batch_first: xs = xs.transpose(1, 0) xlens = [max(1, i.item() // self.factor) for i in xlens] xlens = torch.IntTensor(xlens) return xs, xlens class Conv1dSubsampler(nn.Module): def __init__(self, subsampling_factor, n_units, kernel_size=3): super(Conv1dSubsampler, self).__init__() assert kernel_size % 2 == 1, "Kernel size should be odd for 'same' conv." self.factor = subsampling_factor if subsampling_factor > 1: self.conv1d = nn.Conv1d(in_channels=n_units, out_channels=n_units, kernel_size=kernel_size, stride=subsampling_factor, padding=(kernel_size - 1) // 2) def forward(self, xs, xlens, batch_first=True): if self.factor == 1: return xs, xlens if batch_first: xs = self.conv1d(xs.transpose(2, 1)) xs = xs.transpose(2, 1).contiguous() else: xs = self.conv1d(xs.permute(1, 2, 0)) xs = xs.permute(2, 0, 1).contiguous() xs = torch.relu(xs) xlens = update_lens_1d(xlens, self.conv1d) return xs, xlens class DropSubsampler(nn.Module): def __init__(self, subsampling_factor): super(DropSubsampler, self).__init__() self.factor = subsampling_factor def forward(self, xs, xlens, batch_first=True): if self.factor == 1: return xs, xlens if batch_first: xs = xs[:, ::self.factor] else: xs = xs[::self.factor] xlens = [max(1, math.ceil(i.item() / self.factor)) for i in xlens] xlens = torch.IntTensor(xlens) return xs, xlens class AddSubsampler(nn.Module): def __init__(self, subsampling_factor): super(AddSubsampler, self).__init__() self.factor = subsampling_factor assert subsampling_factor <= 2
Apache License 2.0