repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
nttcom/eclcli
eclcli/bare/bareclient/v2/servers.py
ServerManager.add_fixed_ip
python
def add_fixed_ip(self, server, network_id): self._action('addFixedIp', server, {'networkId': network_id})
Add an IP address on a network. :param server: The :class:`Server` (or its ID) to add an IP to. :param network_id: The ID of the network the IP should be on.
https://github.com/nttcom/eclcli/blob/25946165882b352c16df4077f5470d3c5e4b910e/eclcli/bare/bareclient/v2/servers.py#L612-L619
import base64 import os try: import json except ImportError: import simplejson as json try: from oslo_utils import encodeutils except ImportError: from oslo.utils import encodeutils import six from six.moves.urllib import parse from .. import base from .. import crypto REBOOT_SOFT, REBOOT_HARD = 'SOFT', 'HARD' class Server(base.Resource): HUMAN_ID = True def __repr__(self): return '<Server: %s>' % getattr(self, 'name', 'unknown-name') def delete(self): self.manager.delete(self) def update(self, name=None): self.manager.update(self, name=name) def get_console_output(self, length=None): return self.manager.get_console_output(self, length) def get_management_console(self): return self.manager.get_management_console(self) def get_vnc_console(self, console_type): return self.manager.get_vnc_console(self, console_type) def get_spice_console(self, console_type): return self.manager.get_spice_console(self, console_type) def get_rdp_console(self, console_type): return self.manager.get_rdp_console(self, console_type) def get_serial_console(self, console_type): return self.manager.get_serial_console(self, console_type) def get_password(self, private_key=None): return self.manager.get_password(self, private_key) def clear_password(self): return self.manager.clear_password(self) def add_fixed_ip(self, network_id): self.manager.add_fixed_ip(self, network_id) def add_floating_ip(self, address, fixed_address=None): self.manager.add_floating_ip(self, address, fixed_address) def remove_floating_ip(self, address): self.manager.remove_floating_ip(self, address) def stop(self): self.manager.stop(self) def force_delete(self): self.manager.force_delete(self) def restore(self): self.manager.restore(self) def start(self): self.manager.start(self) def pause(self): self.manager.pause(self) def unpause(self): self.manager.unpause(self) def lock(self): self.manager.lock(self) def unlock(self): self.manager.unlock(self) def suspend(self): self.manager.suspend(self) def resume(self): self.manager.resume(self) def rescue(self, password=None, image=None): return self.manager.rescue(self, password, image) def unrescue(self): self.manager.unrescue(self) def shelve(self): self.manager.shelve(self) def shelve_offload(self): self.manager.shelve_offload(self) def unshelve(self): self.manager.unshelve(self) def diagnostics(self): return self.manager.diagnostics(self) def migrate(self): self.manager.migrate(self) def remove_fixed_ip(self, address): self.manager.remove_fixed_ip(self, address) def change_password(self, password): self.manager.change_password(self, password) def reboot(self, reboot_type=REBOOT_SOFT): self.manager.reboot(self, reboot_type) def rebuild(self, image, password=None, preserve_ephemeral=False, **kwargs): return self.manager.rebuild(self, image, password=password, preserve_ephemeral=preserve_ephemeral, **kwargs) def resize(self, flavor, **kwargs): self.manager.resize(self, flavor, **kwargs) def create_image(self, image_name, metadata=None): return self.manager.create_image(self, image_name, metadata) def backup(self, backup_name, backup_type, rotation): self.manager.backup(self, backup_name, backup_type, rotation) def confirm_resize(self): self.manager.confirm_resize(self) def revert_resize(self): self.manager.revert_resize(self) @property def networks(self): networks = {} try: for network_label, address_list in self.addresses.items(): networks[network_label] = [a['addr'] for a in address_list] return networks except Exception: return {} def live_migrate(self, host=None, block_migration=False, disk_over_commit=False): self.manager.live_migrate(self, host, block_migration, disk_over_commit) def reset_state(self, state='error'): self.manager.reset_state(self, state) def reset_network(self): self.manager.reset_network(self) def add_security_group(self, security_group): self.manager.add_security_group(self, security_group) def remove_security_group(self, security_group): self.manager.remove_security_group(self, security_group) def list_security_group(self): return self.manager.list_security_group(self) def evacuate(self, host=None, on_shared_storage=True, password=None): return self.manager.evacuate(self, host, on_shared_storage, password) def interface_list(self): return self.manager.interface_list(self) def interface_attach(self, port_id, net_id, fixed_ip): return self.manager.interface_attach(self, port_id, net_id, fixed_ip) def interface_detach(self, port_id): return self.manager.interface_detach(self, port_id) class ServerManager(base.BootingManagerWithFind): resource_class = Server def _boot(self, resource_url, response_key, name, image, flavor, meta=None, files=None, userdata=None, return_raw=False, key_name=None, availability_zone=None, nics=None, admin_pass=None, disk_config=None, **kwargs): body = { "server": { "name": name, "flavorRef": str(base.getid(flavor)), } } image = str(base.getid(image)) if image: body['server'].update({'imageRef': image}) if userdata: if os.path.exists(userdata): with open(userdata, "r") as fuserdata: userdata = fuserdata.read() if six.PY3: userdata = userdata.encode("utf-8") else: userdata = encodeutils.safe_encode(userdata) userdata_b64 = base64.b64encode(userdata).decode('utf-8') body["server"]["user_data"] = userdata_b64 if meta: body["server"]["metadata"] = meta if key_name: body["server"]["key_name"] = key_name if admin_pass: body["server"]["adminPass"] = admin_pass if files: personality = body['server']['personality'] = [] for filepath, file_or_string in sorted(files.items(), key=lambda x: x[0]): if hasattr(file_or_string, 'read'): data = file_or_string.read() else: data = file_or_string if six.PY3 and isinstance(data, str): data = data.encode('utf-8') cont = base64.b64encode(data).decode('utf-8') personality.append({ 'path': filepath, 'contents': cont, }) if availability_zone: body["server"]["availability_zone"] = availability_zone if nics is not None: body['server']['networks'] = nics if disk_config is not None: disk_config_dict = json.loads(disk_config) for k, v in disk_config_dict.items(): body['server'][k] = v return self._create(resource_url, body, response_key, return_raw=return_raw, **kwargs) def get(self, server): return self._get("/servers/%s" % base.getid(server), "server") def list(self, detailed=True, search_opts=None, marker=None, limit=None, sort_keys=None, sort_dirs=None): if search_opts is None: search_opts = {} qparams = {} for opt, val in six.iteritems(search_opts): if val: qparams[opt] = val if marker: qparams['marker'] = marker if limit: qparams['limit'] = limit if qparams or sort_keys or sort_dirs: items = list(qparams.items()) if sort_keys: items.extend(('sort_key', sort_key) for sort_key in sort_keys) if sort_dirs: items.extend(('sort_dir', sort_dir) for sort_dir in sort_dirs) new_qparams = sorted(items, key=lambda x: x[0]) query_string = "?%s" % parse.urlencode(new_qparams) else: query_string = "" detail = "" if detailed: detail = "/detail" return self._list("/servers%s%s" % (detail, query_string), "servers")
Apache License 2.0
gitpython-developers/smmap
smmap/util.py
MapRegion.__init__
python
def __init__(self, path_or_fd, ofs, size, flags=0): self._b = ofs self._size = 0 self._uc = 0 if isinstance(path_or_fd, int): fd = path_or_fd else: fd = os.open(path_or_fd, os.O_RDONLY | getattr(os, 'O_BINARY', 0) | flags) try: kwargs = dict(access=ACCESS_READ, offset=ofs) corrected_size = size sizeofs = ofs actual_size = min(os.fstat(fd).st_size - sizeofs, corrected_size) self._mf = mmap(fd, actual_size, **kwargs) self._size = len(self._mf) finally: if isinstance(path_or_fd, str): os.close(fd) self.increment_client_count()
Initialize a region, allocate the memory map :param path_or_fd: path to the file to map, or the opened file descriptor :param ofs: **aligned** offset into the file to be mapped :param size: if size is larger then the file on disk, the whole file will be allocated the the size automatically adjusted :param flags: additional flags to be given when opening the file. :raise Exception: if no memory can be allocated
https://github.com/gitpython-developers/smmap/blob/f2f2eb6ee9a406bfaa5f8be927ad8ab8367cdbb0/smmap/util.py#L99-L136
import os import sys from mmap import mmap, ACCESS_READ from mmap import ALLOCATIONGRANULARITY __all__ = ["align_to_mmap", "is_64_bit", "MapWindow", "MapRegion", "MapRegionList", "ALLOCATIONGRANULARITY"] def align_to_mmap(num, round_up): res = (num // ALLOCATIONGRANULARITY) * ALLOCATIONGRANULARITY if round_up and (res != num): res += ALLOCATIONGRANULARITY return res def is_64_bit(): return sys.maxsize > (1 << 32) - 1 class MapWindow: __slots__ = ( 'ofs', 'size' ) def __init__(self, offset, size): self.ofs = offset self.size = size def __repr__(self): return "MapWindow(%i, %i)" % (self.ofs, self.size) @classmethod def from_region(cls, region): return cls(region._b, region.size()) def ofs_end(self): return self.ofs + self.size def align(self): nofs = align_to_mmap(self.ofs, 0) self.size += self.ofs - nofs self.ofs = nofs self.size = align_to_mmap(self.size, 1) def extend_left_to(self, window, max_size): rofs = self.ofs - window.ofs_end() nsize = rofs + self.size rofs -= nsize - min(nsize, max_size) self.ofs = self.ofs - rofs self.size += rofs def extend_right_to(self, window, max_size): self.size = min(self.size + (window.ofs - self.ofs_end()), max_size) class MapRegion: __slots__ = [ '_b', '_mf', '_uc', '_size', '__weakref__' ]
BSD 3-Clause New or Revised License
polycortex/pymuse
pymuse/pipeline.py
Pipeline.get_output_queue
python
def get_output_queue(self, queue_index=0) -> StoppableQueue: return self._output_queues[queue_index]
Return a ref to the queue given by queue_index
https://github.com/polycortex/pymuse/blob/0b157eb7bdc03209207480711bc32657a88d0fa7/pymuse/pipeline.py#L27-L29
from pymuse.pipelinestages.pipeline_stage import PipelineStage from pymuse.utils.stoppablequeue import StoppableQueue from pymuse.signal import Signal from pymuse.constants import PIPELINE_QUEUE_SIZE class PipelineFork(): def __init__(self, *branches): self.forked_branches: list = list(branches) class Pipeline(): def __init__(self, input_signal: Signal, *stages): self._output_queues = [] self._stages: list = list(stages) self._link_stages(self._stages) self._stages[0]._queue_in = input_signal.signal_queue
MIT License
cortex-lab/phy
phy/plot/gloo/globject.py
GLObject._update
python
def _update(self): pass
Dummy update method
https://github.com/cortex-lab/phy/blob/9a330b9437a3d0b40a37a201d147224e6e7fb462/phy/plot/gloo/globject.py#L150-L153
import logging log = logging.getLogger(__name__) class GLObject(object): _idcount = 0 def __init__(self): self._handle = -1 self._target = None self._need_setup = True self._need_create = True self._need_update = True self._need_delete = False GLObject._idcount += 1 self._id = GLObject._idcount @property def need_create(self): return self._need_create @property def need_update(self): return self._need_update @property def need_setup(self): return self._need_setup @property def need_delete(self): return self._need_delete def delete(self): self._delete() self._handle = -1 self._need_setup = True self._need_create = True self._need_update = True self._need_delete = False def activate(self): if hasattr(self, "base") and isinstance(self.base, GLObject): self.base.activate() return if self.need_create: self._create() self._need_create = False self._activate() if self.need_setup: self._setup() self._need_setup = False if self.need_update: log.log(5, "%s need update" % self.handle) self._update() self._need_update = False def deactivate(self): if hasattr(self, "base") and isinstance(self.base, GLObject): self.base.deactivate() else: self._deactivate() @property def handle(self): if hasattr(self, "base") and isinstance(self.base, GLObject): if hasattr(self.base, "_handle"): return self.base._handle return self._handle @property def target(self): if hasattr(self, "base") and isinstance(self.base, GLObject): return self.base._target return self._target def _create(self): pass def _delete(self): pass def _activate(self): pass def _deactivate(self): pass def _setup(self): pass
BSD 3-Clause New or Revised License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_pod_spec.py
V1PodSpec.service_account
python
def service_account(self): return self._service_account
Gets the service_account of this V1PodSpec. # noqa: E501 DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. # noqa: E501 :return: The service_account of this V1PodSpec. # noqa: E501 :rtype: str
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_pod_spec.py#L814-L822
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1PodSpec(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'active_deadline_seconds': 'int', 'affinity': 'V1Affinity', 'automount_service_account_token': 'bool', 'containers': 'list[V1Container]', 'dns_config': 'V1PodDNSConfig', 'dns_policy': 'str', 'enable_service_links': 'bool', 'ephemeral_containers': 'list[V1EphemeralContainer]', 'host_aliases': 'list[V1HostAlias]', 'host_ipc': 'bool', 'host_network': 'bool', 'host_pid': 'bool', 'hostname': 'str', 'image_pull_secrets': 'list[V1LocalObjectReference]', 'init_containers': 'list[V1Container]', 'node_name': 'str', 'node_selector': 'dict(str, str)', 'overhead': 'dict(str, str)', 'preemption_policy': 'str', 'priority': 'int', 'priority_class_name': 'str', 'readiness_gates': 'list[V1PodReadinessGate]', 'restart_policy': 'str', 'runtime_class_name': 'str', 'scheduler_name': 'str', 'security_context': 'V1PodSecurityContext', 'service_account': 'str', 'service_account_name': 'str', 'share_process_namespace': 'bool', 'subdomain': 'str', 'termination_grace_period_seconds': 'int', 'tolerations': 'list[V1Toleration]', 'topology_spread_constraints': 'list[V1TopologySpreadConstraint]', 'volumes': 'list[V1Volume]' } attribute_map = { 'active_deadline_seconds': 'activeDeadlineSeconds', 'affinity': 'affinity', 'automount_service_account_token': 'automountServiceAccountToken', 'containers': 'containers', 'dns_config': 'dnsConfig', 'dns_policy': 'dnsPolicy', 'enable_service_links': 'enableServiceLinks', 'ephemeral_containers': 'ephemeralContainers', 'host_aliases': 'hostAliases', 'host_ipc': 'hostIPC', 'host_network': 'hostNetwork', 'host_pid': 'hostPID', 'hostname': 'hostname', 'image_pull_secrets': 'imagePullSecrets', 'init_containers': 'initContainers', 'node_name': 'nodeName', 'node_selector': 'nodeSelector', 'overhead': 'overhead', 'preemption_policy': 'preemptionPolicy', 'priority': 'priority', 'priority_class_name': 'priorityClassName', 'readiness_gates': 'readinessGates', 'restart_policy': 'restartPolicy', 'runtime_class_name': 'runtimeClassName', 'scheduler_name': 'schedulerName', 'security_context': 'securityContext', 'service_account': 'serviceAccount', 'service_account_name': 'serviceAccountName', 'share_process_namespace': 'shareProcessNamespace', 'subdomain': 'subdomain', 'termination_grace_period_seconds': 'terminationGracePeriodSeconds', 'tolerations': 'tolerations', 'topology_spread_constraints': 'topologySpreadConstraints', 'volumes': 'volumes' } def __init__(self, active_deadline_seconds=None, affinity=None, automount_service_account_token=None, containers=None, dns_config=None, dns_policy=None, enable_service_links=None, ephemeral_containers=None, host_aliases=None, host_ipc=None, host_network=None, host_pid=None, hostname=None, image_pull_secrets=None, init_containers=None, node_name=None, node_selector=None, overhead=None, preemption_policy=None, priority=None, priority_class_name=None, readiness_gates=None, restart_policy=None, runtime_class_name=None, scheduler_name=None, security_context=None, service_account=None, service_account_name=None, share_process_namespace=None, subdomain=None, termination_grace_period_seconds=None, tolerations=None, topology_spread_constraints=None, volumes=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._active_deadline_seconds = None self._affinity = None self._automount_service_account_token = None self._containers = None self._dns_config = None self._dns_policy = None self._enable_service_links = None self._ephemeral_containers = None self._host_aliases = None self._host_ipc = None self._host_network = None self._host_pid = None self._hostname = None self._image_pull_secrets = None self._init_containers = None self._node_name = None self._node_selector = None self._overhead = None self._preemption_policy = None self._priority = None self._priority_class_name = None self._readiness_gates = None self._restart_policy = None self._runtime_class_name = None self._scheduler_name = None self._security_context = None self._service_account = None self._service_account_name = None self._share_process_namespace = None self._subdomain = None self._termination_grace_period_seconds = None self._tolerations = None self._topology_spread_constraints = None self._volumes = None self.discriminator = None if active_deadline_seconds is not None: self.active_deadline_seconds = active_deadline_seconds if affinity is not None: self.affinity = affinity if automount_service_account_token is not None: self.automount_service_account_token = automount_service_account_token self.containers = containers if dns_config is not None: self.dns_config = dns_config if dns_policy is not None: self.dns_policy = dns_policy if enable_service_links is not None: self.enable_service_links = enable_service_links if ephemeral_containers is not None: self.ephemeral_containers = ephemeral_containers if host_aliases is not None: self.host_aliases = host_aliases if host_ipc is not None: self.host_ipc = host_ipc if host_network is not None: self.host_network = host_network if host_pid is not None: self.host_pid = host_pid if hostname is not None: self.hostname = hostname if image_pull_secrets is not None: self.image_pull_secrets = image_pull_secrets if init_containers is not None: self.init_containers = init_containers if node_name is not None: self.node_name = node_name if node_selector is not None: self.node_selector = node_selector if overhead is not None: self.overhead = overhead if preemption_policy is not None: self.preemption_policy = preemption_policy if priority is not None: self.priority = priority if priority_class_name is not None: self.priority_class_name = priority_class_name if readiness_gates is not None: self.readiness_gates = readiness_gates if restart_policy is not None: self.restart_policy = restart_policy if runtime_class_name is not None: self.runtime_class_name = runtime_class_name if scheduler_name is not None: self.scheduler_name = scheduler_name if security_context is not None: self.security_context = security_context if service_account is not None: self.service_account = service_account if service_account_name is not None: self.service_account_name = service_account_name if share_process_namespace is not None: self.share_process_namespace = share_process_namespace if subdomain is not None: self.subdomain = subdomain if termination_grace_period_seconds is not None: self.termination_grace_period_seconds = termination_grace_period_seconds if tolerations is not None: self.tolerations = tolerations if topology_spread_constraints is not None: self.topology_spread_constraints = topology_spread_constraints if volumes is not None: self.volumes = volumes @property def active_deadline_seconds(self): return self._active_deadline_seconds @active_deadline_seconds.setter def active_deadline_seconds(self, active_deadline_seconds): self._active_deadline_seconds = active_deadline_seconds @property def affinity(self): return self._affinity @affinity.setter def affinity(self, affinity): self._affinity = affinity @property def automount_service_account_token(self): return self._automount_service_account_token @automount_service_account_token.setter def automount_service_account_token(self, automount_service_account_token): self._automount_service_account_token = automount_service_account_token @property def containers(self): return self._containers @containers.setter def containers(self, containers): if self.local_vars_configuration.client_side_validation and containers is None: raise ValueError("Invalid value for `containers`, must not be `None`") self._containers = containers @property def dns_config(self): return self._dns_config @dns_config.setter def dns_config(self, dns_config): self._dns_config = dns_config @property def dns_policy(self): return self._dns_policy @dns_policy.setter def dns_policy(self, dns_policy): self._dns_policy = dns_policy @property def enable_service_links(self): return self._enable_service_links @enable_service_links.setter def enable_service_links(self, enable_service_links): self._enable_service_links = enable_service_links @property def ephemeral_containers(self): return self._ephemeral_containers @ephemeral_containers.setter def ephemeral_containers(self, ephemeral_containers): self._ephemeral_containers = ephemeral_containers @property def host_aliases(self): return self._host_aliases @host_aliases.setter def host_aliases(self, host_aliases): self._host_aliases = host_aliases @property def host_ipc(self): return self._host_ipc @host_ipc.setter def host_ipc(self, host_ipc): self._host_ipc = host_ipc @property def host_network(self): return self._host_network @host_network.setter def host_network(self, host_network): self._host_network = host_network @property def host_pid(self): return self._host_pid @host_pid.setter def host_pid(self, host_pid): self._host_pid = host_pid @property def hostname(self): return self._hostname @hostname.setter def hostname(self, hostname): self._hostname = hostname @property def image_pull_secrets(self): return self._image_pull_secrets @image_pull_secrets.setter def image_pull_secrets(self, image_pull_secrets): self._image_pull_secrets = image_pull_secrets @property def init_containers(self): return self._init_containers @init_containers.setter def init_containers(self, init_containers): self._init_containers = init_containers @property def node_name(self): return self._node_name @node_name.setter def node_name(self, node_name): self._node_name = node_name @property def node_selector(self): return self._node_selector @node_selector.setter def node_selector(self, node_selector): self._node_selector = node_selector @property def overhead(self): return self._overhead @overhead.setter def overhead(self, overhead): self._overhead = overhead @property def preemption_policy(self): return self._preemption_policy @preemption_policy.setter def preemption_policy(self, preemption_policy): self._preemption_policy = preemption_policy @property def priority(self): return self._priority @priority.setter def priority(self, priority): self._priority = priority @property def priority_class_name(self): return self._priority_class_name @priority_class_name.setter def priority_class_name(self, priority_class_name): self._priority_class_name = priority_class_name @property def readiness_gates(self): return self._readiness_gates @readiness_gates.setter def readiness_gates(self, readiness_gates): self._readiness_gates = readiness_gates @property def restart_policy(self): return self._restart_policy @restart_policy.setter def restart_policy(self, restart_policy): self._restart_policy = restart_policy @property def runtime_class_name(self): return self._runtime_class_name @runtime_class_name.setter def runtime_class_name(self, runtime_class_name): self._runtime_class_name = runtime_class_name @property def scheduler_name(self): return self._scheduler_name @scheduler_name.setter def scheduler_name(self, scheduler_name): self._scheduler_name = scheduler_name @property def security_context(self): return self._security_context @security_context.setter def security_context(self, security_context): self._security_context = security_context @property
Apache License 2.0
jwlodek/py_cui
py_cui/widget_set.py
WidgetSet.add_text_block
python
def add_text_block(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, initial_text = ''): id = 'Widget{}'.format(len(self._widgets.keys())) new_text_block = widgets.ScrollTextBlock(id, title, self._grid, row, column, row_span, column_span, padx, pady, self._logger, initial_text) self._widgets[id] = new_text_block if self._selected_widget is None: self.set_selected_widget(id) self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_text_block)))) return new_text_block
Function that adds a new text block to the CUI grid Parameters ---------- title : str The title of the text block row : int The row value, from the top down column : int The column value from the top down row_span=1 : int The number of rows to span accross column_span=1 : int the number of columns to span accross padx=1 : int number of padding characters in the x direction pady=0 : int number of padding characters in the y direction initial_text='' : str Initial text for the text block Returns ------- new_text_block : ScrollTextBlock A reference to the created textblock object.
https://github.com/jwlodek/py_cui/blob/e5f86515bb12929073ec62d27f061092d8040c23/py_cui/widget_set.py#L239-L283
import shutil import py_cui.widgets as widgets import py_cui.grid as grid import py_cui.controls as controls class WidgetSet: def __init__(self, num_rows, num_cols, logger, simulated_terminal=None): self._widgets = {} self._keybindings = {} self._simulated_terminal = simulated_terminal if self._simulated_terminal is None: term_size = shutil.get_terminal_size() height = term_size.lines width = term_size.columns else: height = self._simulated_terminal[0] width = self._simulated_terminal[1] self._height = height self._width = width self._height = self._height - 4 self._grid = grid.Grid(num_rows, num_cols, self._height, self._width, logger) self._selected_widget = None self._logger = logger def set_selected_widget(self, widget_id): if widget_id in self._widgets.keys(): self._selected_widget = widget_id def get_widgets(self): return self._widgets def add_key_command(self, key, command): self._keybindings[key] = command def add_scroll_menu(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0): id = 'Widget{}'.format(len(self._widgets.keys())) new_scroll_menu = widgets.ScrollMenu(id, title, self._grid, row, column, row_span, column_span, padx, pady, self._logger) self._widgets[id] = new_scroll_menu if self._selected_widget is None: self.set_selected_widget(id) self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_scroll_menu)))) return new_scroll_menu def add_checkbox_menu(self, title, row, column, row_span=1, column_span=1, padx=1, pady=0, checked_char='X'): id = 'Widget{}'.format(len(self._widgets.keys())) new_checkbox_menu = widgets.CheckBoxMenu(id, title, self._grid, row, column, row_span, column_span, padx, pady, self._logger, checked_char) self._widgets[id] = new_checkbox_menu if self._selected_widget is None: self.set_selected_widget(id) self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_checkbox_menu)))) return new_checkbox_menu def add_text_box(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, initial_text = '', password = False): id = 'Widget{}'.format(len(self._widgets.keys())) new_text_box = widgets.TextBox(id, title, self._grid, row, column, row_span, column_span, padx, pady, self._logger, initial_text, password) self._widgets[id] = new_text_box if self._selected_widget is None: self.set_selected_widget(id) self._logger.info('Adding widget {} w/ ID {} of type {}'.format(title, id, str(type(new_text_box)))) return new_text_box
BSD 3-Clause New or Revised License
graviti-ai/tensorbay-python-sdk
tensorbay/geometry/vector.py
Vector2D.__new__
python
def __new__( cls: Type[_V2], *args: float, **kwargs: float ) -> _V2: obj: _V2 = object.__new__(cls) return obj
Create a :class:`Vector2D` instance. Arguments: args: The coordinates of the 2D vector. kwargs: The coordinates of the 2D vector. Returns: The created :class:`Vector2D` instance.
https://github.com/graviti-ai/tensorbay-python-sdk/blob/db60d259869d6a528ee1ad84103d2b9bab1bd72e/tensorbay/geometry/vector.py#L225-L239
from itertools import zip_longest from math import hypot, sqrt from sys import version_info from typing import Dict, Iterable, Optional, Sequence, Tuple, Type, TypeVar, Union from tensorbay.utility import ReprType, UserSequence if version_info >= (3, 8): _hypot_for_n_dimension = hypot else: def _hypot_for_n_dimension(*coordinates: float) -> float: return sqrt(sum(x * x for x in coordinates)) _V = TypeVar("_V", bound="Vector") _V2 = TypeVar("_V2", bound="Vector2D") _V3 = TypeVar("_V3", bound="Vector3D") _T = Union["Vector2D", "Vector3D"] class Vector(UserSequence[float]): _data: Tuple[float, ...] _repr_type = ReprType.INSTANCE _DIMENSION: Optional[int] = None def __new__( cls: Type[_V], x: float, y: float, z: Optional[float] = None, ) -> _T: ReturnType = Vector2D if z is None else Vector3D obj: _T = object.__new__(ReturnType) return obj def __bool__(self) -> bool: return any(self._data) def __neg__(self: _V) -> _V: result: _V = object.__new__(self.__class__) result._data = tuple(-coordinate for coordinate in self._data) return result def __add__(self: _V, other: Iterable[float]) -> _V: try: result: _V = object.__new__(self.__class__) result._data = tuple(i + j for i, j in zip_longest(self._data, other)) return result except TypeError: return NotImplemented def __radd__(self: _V, other: Sequence[float]) -> _V: return self.__add__(other) def __sub__(self: _V, other: Iterable[float]) -> _V: try: result: _V = object.__new__(self.__class__) result._data = tuple(i - j for i, j in zip_longest(self._data, other)) return result except TypeError: return NotImplemented def __rsub__(self: _V, other: Iterable[float]) -> _V: try: result: _V = object.__new__(self.__class__) result._data = tuple(i - j for i, j in zip_longest(other, self._data)) return result except TypeError: return NotImplemented def __mul__(self: _V, other: float) -> _V: try: if isinstance(other, (int, float)): result: _V = object.__new__(self.__class__) result._data = tuple(i * other for i in self._data) return result except TypeError: pass return NotImplemented def __rmul__(self: _V, other: float) -> _V: return self.__mul__(other) def __truediv__(self: _V, other: float) -> _V: try: if isinstance(other, (int, float)): result: _V = object.__new__(self.__class__) result._data = tuple(i / other for i in self._data) return result except TypeError: pass return NotImplemented def __floordiv__(self: _V, other: float) -> _V: try: if isinstance(other, (int, float)): result: _V = object.__new__(self.__class__) result._data = tuple(i // other for i in self._data) return result except TypeError: pass return NotImplemented def __abs__(self) -> float: return _hypot_for_n_dimension(*self._data) def _repr_head(self) -> str: return f"{self.__class__.__name__}{self._data}" @staticmethod def loads(contents: Dict[str, float]) -> _T: if "z" in contents: return Vector3D.loads(contents) return Vector2D.loads(contents) class Vector2D(Vector): _DIMENSION = 2
MIT License
tryolabs/luminoth
luminoth/models/ssd/utils.py
generate_anchors_reference
python
def generate_anchors_reference(ratios, scales, num_anchors, feature_map_shape): heights = np.zeros(num_anchors) widths = np.zeros(num_anchors) if len(scales) > 1: widths[0] = heights[0] = (np.sqrt(scales[0] * scales[1]) * feature_map_shape[0]) else: heights[0] = scales[0] * feature_map_shape[0] * 0.99 widths[0] = scales[0] * feature_map_shape[1] * 0.99 ratios = ratios[:num_anchors - 1] heights[1:] = scales[0] / np.sqrt(ratios) * feature_map_shape[0] widths[1:] = scales[0] * np.sqrt(ratios) * feature_map_shape[1] x_center = y_center = 0.5 anchors = np.column_stack([ x_center - widths / 2, y_center - heights / 2, x_center + widths / 2, y_center + heights / 2, ]) return anchors
Generate the default anchor for one feat map which we will later convolve to generate all the anchors of that feat map.
https://github.com/tryolabs/luminoth/blob/9109d8b98bcbb0ad4e8e0c928da0b20627fa774f/luminoth/models/ssd/utils.py#L33-L72
import numpy as np import tensorflow as tf def adjust_bboxes(bboxes, old_height, old_width, new_height, new_width): x_min = bboxes[:, 0] / old_width y_min = bboxes[:, 1] / old_height x_max = bboxes[:, 2] / old_width y_max = bboxes[:, 3] / old_height x_min = x_min * new_width y_min = y_min * new_height x_max = x_max * new_width y_max = y_max * new_height return np.stack([x_min, y_min, x_max, y_max], axis=1)
BSD 3-Clause New or Revised License
openstack/manila
manila/share/drivers/netapp/dataontap/cluster_mode/performance.py
PerformanceLibrary._init_counter_info
python
def _init_counter_info(self): self.system_object_name = None self.avg_processor_busy_base_counter_name = None try: if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS: self.system_object_name = 'system:constituent' self.avg_processor_busy_base_counter_name = ( self._get_base_counter_name('system:constituent', 'avg_processor_busy')) elif self.zapi_client.features.SYSTEM_METRICS: self.system_object_name = 'system' self.avg_processor_busy_base_counter_name = ( self._get_base_counter_name('system', 'avg_processor_busy')) except netapp_api.NaApiError: if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time' else: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1' LOG.exception('Could not get performance base counter ' 'name. Performance-based scheduler ' 'functions may not be available.')
Set a few counter names based on Data ONTAP version.
https://github.com/openstack/manila/blob/34d209484366cd921e052d37c5f9daef5e97af20/manila/share/drivers/netapp/dataontap/cluster_mode/performance.py#L41-L65
import copy from oslo_log import log as logging from manila import exception from manila.i18n import _ from manila.share.drivers.netapp.dataontap.client import api as netapp_api LOG = logging.getLogger(__name__) DEFAULT_UTILIZATION = 50 class PerformanceLibrary(object): def __init__(self, zapi_client): self.zapi_client = zapi_client self.performance_counters = {} self.pool_utilization = {} self._init_counter_info()
Apache License 2.0
docusign/docusign-python-client
docusign_esign/models/chunked_upload_part.py
ChunkedUploadPart.to_dict
python
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ChunkedUploadPart, dict): for key, value in self.items(): result[key] = value return result
Returns the model properties as a dict
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/chunked_upload_part.py#L104-L129
import pprint import re import six from docusign_esign.client.configuration import Configuration class ChunkedUploadPart(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'sequence': 'str', 'size': 'str' } attribute_map = { 'sequence': 'sequence', 'size': 'size' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._sequence = None self._size = None self.discriminator = None setattr(self, "_{}".format('sequence'), kwargs.get('sequence', None)) setattr(self, "_{}".format('size'), kwargs.get('size', None)) @property def sequence(self): return self._sequence @sequence.setter def sequence(self, sequence): self._sequence = sequence @property def size(self): return self._size @size.setter def size(self, size): self._size = size
MIT License
hetznercloud/hcloud-python
hcloud/firewalls/client.py
FirewallsClient.get_all
python
def get_all(self, label_selector=None, name=None, sort=None): return super(FirewallsClient, self).get_all( label_selector=label_selector, name=name, sort=sort )
Get all floating ips from this account :param label_selector: str (optional) Can be used to filter Firewalls by labels. The response will only contain Firewalls matching the label selector values. :param name: str (optional) Can be used to filter networks by their name. :param sort: List[str] (optional) Choices: id name created (You can add one of ":asc", ":desc" to modify sort order. ( ":asc" is default)) :return: List[:class:`BoundFirewall <hcloud.firewalls.client.BoundFirewall>`]
https://github.com/hetznercloud/hcloud-python/blob/fad870790a19621fd130fd28d564a8d6ba7a566c/hcloud/firewalls/client.py#L258-L272
from hcloud.actions.client import BoundAction from hcloud.core.client import BoundModelBase, ClientEntityBase, GetEntityByNameMixin from hcloud.core.domain import add_meta_to_result from hcloud.firewalls.domain import ( Firewall, CreateFirewallResponse, FirewallRule, FirewallResource, FirewallResourceLabelSelector, ) class BoundFirewall(BoundModelBase): model = Firewall def __init__(self, client, data, complete=True): rules = data.get("rules", []) if rules: rules = [ FirewallRule( direction=rule["direction"], source_ips=rule["source_ips"], destination_ips=rule["destination_ips"], protocol=rule["protocol"], port=rule["port"], description=rule["description"], ) for rule in rules ] data["rules"] = rules applied_to = data.get("applied_to", []) if applied_to: from hcloud.servers.client import BoundServer ats = [] for a in applied_to: if a["type"] == FirewallResource.TYPE_SERVER: ats.append( FirewallResource( type=a["type"], server=BoundServer( client._client.servers, a["server"], complete=False ), ) ) elif a["type"] == FirewallResource.TYPE_LABEL_SELECTOR: ats.append( FirewallResource( type=a["type"], label_selector=FirewallResourceLabelSelector( selector=a["label_selector"]["selector"] ), ) ) data["applied_to"] = ats super(BoundFirewall, self).__init__(client, data, complete) def get_actions_list(self, status=None, sort=None, page=None, per_page=None): return self._client.get_actions_list(self, status, sort, page, per_page) def get_actions(self, status=None, sort=None): return self._client.get_actions(self, status, sort) def update(self, name=None, labels=None): return self._client.update(self, labels, name) def delete(self): return self._client.delete(self) def set_rules(self, rules): return self._client.set_rules(self, rules) def apply_to_resources(self, resources): return self._client.apply_to_resources(self, resources) def remove_from_resources(self, resources): return self._client.remove_from_resources(self, resources) class FirewallsClient(ClientEntityBase, GetEntityByNameMixin): results_list_attribute_name = "firewalls" def get_actions_list( self, firewall, status=None, sort=None, page=None, per_page=None, ): params = {} if status is not None: params["status"] = status if sort is not None: params["sort"] = sort if page is not None: params["page"] = page if per_page is not None: params["per_page"] = per_page response = self._client.request( url="/firewalls/{firewall_id}/actions".format(firewall_id=firewall.id), method="GET", params=params, ) actions = [ BoundAction(self._client.actions, action_data) for action_data in response["actions"] ] return add_meta_to_result(actions, response, "actions") def get_actions( self, firewall, status=None, sort=None, ): return super(FirewallsClient, self).get_actions( firewall, status=status, sort=sort ) def get_by_id(self, id): response = self._client.request( url="/firewalls/{firewall_id}".format(firewall_id=id), method="GET" ) return BoundFirewall(self, response["firewall"]) def get_list( self, label_selector=None, page=None, per_page=None, name=None, sort=None, ): params = {} if label_selector is not None: params["label_selector"] = label_selector if page is not None: params["page"] = page if per_page is not None: params["per_page"] = per_page if name is not None: params["name"] = name if sort is not None: params["sort"] = sort response = self._client.request(url="/firewalls", method="GET", params=params) firewalls = [ BoundFirewall(self, firewall_data) for firewall_data in response["firewalls"] ] return self._add_meta_to_result(firewalls, response)
MIT License
autonomousvision/neat
aim_mt_2d/data.py
scale_and_crop_seg
python
def scale_and_crop_seg(image, scale=1, crop=256): (width, height) = (int(image.width / scale), int(image.height / scale)) if scale != 1: im_resized = image.resize((width, height), resample=Image.NEAREST) else: im_resized = image im_resized = np.asarray(im_resized) start_y = height//2 - crop//2 start_x = width//2 - crop//2 cropped_image = im_resized[start_y:start_y+crop, start_x:start_x+crop] if len(cropped_image.shape)==2: cropped_image = cropped_image.reshape((crop,crop,1)) cropped_image = np.transpose(cropped_image, (2,0,1)) return cropped_image
Scale and crop a seg image, returning a channels-first numpy array.
https://github.com/autonomousvision/neat/blob/686f4a0b5b5bf20c99f323e9542f5b68808df2de/aim_mt_2d/data.py#L337-L358
import os import json from PIL import Image import numpy as np import torch from torch.utils.data import Dataset class CARLA_waypoint(Dataset): def __init__(self, root, config, points_per_batch=1024): self.seq_len = config.seq_len self.pred_len = config.pred_len self.points_per_batch = points_per_batch self.ignore_sides = config.ignore_sides self.ignore_rear = config.ignore_rear self.input_resolution = config.input_resolution self.scale = config.scale self.converter = np.uint8(config.converter) self.front = [] self.left = [] self.right = [] self.rear = [] self.seg_front = [] self.seg_left = [] self.seg_right = [] self.seg_rear = [] self.depth_front = [] self.depth_left = [] self.depth_right = [] self.depth_rear = [] self.x = [] self.y = [] self.x_command = [] self.y_command = [] self.theta = [] self.steer = [] self.throttle = [] self.brake = [] self.command = [] self.velocity = [] for sub_root in root: preload_file = os.path.join(sub_root, 'aim_multitask_pl_'+str(self.seq_len)+'_'+str(self.pred_len)+'.npy') if not os.path.exists(preload_file): preload_front = [] preload_left = [] preload_right = [] preload_rear = [] preload_seg_front = [] preload_seg_left = [] preload_seg_right = [] preload_seg_rear = [] preload_depth_front = [] preload_depth_left = [] preload_depth_right = [] preload_depth_rear = [] preload_x = [] preload_y = [] preload_x_command = [] preload_y_command = [] preload_theta = [] preload_steer = [] preload_throttle = [] preload_brake = [] preload_command = [] preload_velocity = [] root_files = os.listdir(sub_root) routes = [folder for folder in root_files if not os.path.isfile(os.path.join(sub_root,folder))] for route in routes: route_dir = os.path.join(sub_root, route) print(route_dir) num_seq = (len(os.listdir(route_dir+"/rgb_front/"))-self.pred_len-2)//self.seq_len for seq in range(num_seq): fronts = [] lefts = [] rights = [] rears = [] seg_fronts = [] seg_lefts = [] seg_rights = [] seg_rears = [] depth_fronts = [] depth_lefts = [] depth_rights = [] depth_rears = [] xs = [] ys = [] thetas = [] for i in range(self.seq_len): filename = f"{str(seq*self.seq_len+1+i).zfill(4)}.png" fronts.append(route_dir+"/rgb_front/"+filename) lefts.append(route_dir+"/rgb_left/"+filename) rights.append(route_dir+"/rgb_right/"+filename) rears.append(route_dir+"/rgb_rear/"+filename) seg_fronts.append(route_dir+"/seg_front/"+filename) seg_lefts.append(route_dir+"/seg_left/"+filename) seg_rights.append(route_dir+"/seg_right/"+filename) seg_rears.append(route_dir+"/seg_rear/"+filename) depth_fronts.append(route_dir+"/depth_front/"+filename) depth_lefts.append(route_dir+"/depth_left/"+filename) depth_rights.append(route_dir+"/depth_right/"+filename) depth_rears.append(route_dir+"/depth_rear/"+filename) with open(route_dir + f"/measurements/{str(seq*self.seq_len+1+i).zfill(4)}.json", "r") as read_file: data = json.load(read_file) xs.append(data['x']) ys.append(data['y']) thetas.append(data['theta']) preload_x_command.append(data['x_command']) preload_y_command.append(data['y_command']) preload_steer.append(data['steer']) preload_throttle.append(data['throttle']) preload_brake.append(data['brake']) preload_command.append(data['command']) preload_velocity.append(data['speed']) for i in range(self.seq_len, self.seq_len + self.pred_len): with open(route_dir + f"/measurements/{str(seq*self.seq_len+1+i).zfill(4)}.json", "r") as read_file: data = json.load(read_file) xs.append(data['x']) ys.append(data['y']) if np.isnan(data['theta']): thetas.append(0) else: thetas.append(data['theta']) preload_front.append(fronts) preload_left.append(lefts) preload_right.append(rights) preload_rear.append(rears) preload_seg_front.append(seg_fronts) preload_seg_left.append(seg_lefts) preload_seg_right.append(seg_rights) preload_seg_rear.append(seg_rears) preload_depth_front.append(depth_fronts) preload_depth_left.append(depth_lefts) preload_depth_right.append(depth_rights) preload_depth_rear.append(depth_rears) preload_x.append(xs) preload_y.append(ys) preload_theta.append(thetas) preload_dict = {} preload_dict['front'] = preload_front preload_dict['left'] = preload_left preload_dict['right'] = preload_right preload_dict['rear'] = preload_rear preload_dict['seg_front'] = preload_seg_front preload_dict['seg_left'] = preload_seg_left preload_dict['seg_right'] = preload_seg_right preload_dict['seg_rear'] = preload_seg_rear preload_dict['depth_front'] = preload_depth_front preload_dict['depth_left'] = preload_depth_left preload_dict['depth_right'] = preload_depth_right preload_dict['depth_rear'] = preload_depth_rear preload_dict['x'] = preload_x preload_dict['y'] = preload_y preload_dict['x_command'] = preload_x_command preload_dict['y_command'] = preload_y_command preload_dict['theta'] = preload_theta preload_dict['steer'] = preload_steer preload_dict['throttle'] = preload_throttle preload_dict['brake'] = preload_brake preload_dict['command'] = preload_command preload_dict['velocity'] = preload_velocity np.save(preload_file, preload_dict) preload_dict = np.load(preload_file, allow_pickle=True) self.front += preload_dict.item()['front'] self.left += preload_dict.item()['left'] self.right += preload_dict.item()['right'] self.rear += preload_dict.item()['rear'] self.seg_front += preload_dict.item()['seg_front'] self.seg_left += preload_dict.item()['seg_left'] self.seg_right += preload_dict.item()['seg_right'] self.seg_rear += preload_dict.item()['seg_rear'] self.depth_front += preload_dict.item()['depth_front'] self.depth_left += preload_dict.item()['depth_left'] self.depth_right += preload_dict.item()['depth_right'] self.depth_rear += preload_dict.item()['depth_rear'] self.x += preload_dict.item()['x'] self.y += preload_dict.item()['y'] self.x_command += preload_dict.item()['x_command'] self.y_command += preload_dict.item()['y_command'] self.theta += preload_dict.item()['theta'] self.steer += preload_dict.item()['steer'] self.throttle += preload_dict.item()['throttle'] self.brake += preload_dict.item()['brake'] self.command += preload_dict.item()['command'] self.velocity += preload_dict.item()['velocity'] print("Preloading " + str(len(preload_dict.item()['front'])) + " sequences from " + preload_file) def __len__(self): return len(self.front) def __getitem__(self, index): data = dict() data['fronts'] = [] data['lefts'] = [] data['rights'] = [] data['rears'] = [] data['seg_fronts'] = [] data['seg_lefts'] = [] data['seg_rights'] = [] data['seg_rears'] = [] data['depth_fronts'] = [] data['depth_lefts'] = [] data['depth_rights'] = [] data['depth_rears'] = [] seq_fronts = self.front[index] seq_lefts = self.left[index] seq_rights = self.right[index] seq_rears = self.rear[index] seq_seg_fronts = self.seg_front[index] seq_seg_lefts = self.seg_left[index] seq_seg_rights = self.seg_right[index] seq_seg_rears = self.seg_rear[index] seq_depth_fronts = self.depth_front[index] seq_depth_lefts = self.depth_left[index] seq_depth_rights = self.depth_right[index] seq_depth_rears = self.depth_rear[index] seq_x = self.x[index] seq_y = self.y[index] seq_theta = self.theta[index] for i in range(self.seq_len): data['fronts'].append(torch.from_numpy(np.array( scale_and_crop_image(Image.open(seq_fronts[i]), scale=self.scale, crop=self.input_resolution)))) if not self.ignore_sides: data['lefts'].append(torch.from_numpy(np.array( scale_and_crop_image(Image.open(seq_lefts[i]), scale=self.scale, crop=self.input_resolution)))) data['rights'].append(torch.from_numpy(np.array( scale_and_crop_image(Image.open(seq_rights[i]), scale=self.scale, crop=self.input_resolution)))) if not self.ignore_rear: data['rears'].append(torch.from_numpy(np.array( scale_and_crop_image(Image.open(seq_rears[i]), scale=self.scale, crop=self.input_resolution)))) data['seg_fronts'].append(torch.from_numpy(self.converter[scale_and_crop_seg(Image.open(seq_seg_fronts[i]), scale=self.scale, crop=self.input_resolution)])) if not self.ignore_sides: data['seg_lefts'].append(torch.from_numpy(self.converter[scale_and_crop_seg(Image.open(seq_seg_lefts[i]), scale=self.scale, crop=self.input_resolution)])) data['seg_rights'].append(torch.from_numpy(self.converter[scale_and_crop_seg(Image.open(seq_seg_rights[i]), scale=self.scale, crop=self.input_resolution)])) if not self.ignore_rear: data['seg_rears'].append(torch.from_numpy(self.converter[scale_and_crop_seg(Image.open(seq_seg_rears[i]), scale=self.scale, crop=self.input_resolution)])) data['depth_fronts'].append(torch.from_numpy(get_depth(scale_and_crop_image(Image.open(seq_depth_fronts[i]), scale=self.scale, crop=self.input_resolution)))) if not self.ignore_sides: data['depth_lefts'].append(torch.from_numpy(get_depth(scale_and_crop_image(Image.open(seq_depth_lefts[i]), scale=self.scale, crop=self.input_resolution)))) data['depth_rights'].append(torch.from_numpy(get_depth(scale_and_crop_image(Image.open(seq_depth_rights[i]), scale=self.scale, crop=self.input_resolution)))) if not self.ignore_rear: data['depth_rears'].append(torch.from_numpy(get_depth(scale_and_crop_image(Image.open(seq_depth_rears[i]), scale=self.scale, crop=self.input_resolution)))) if np.isnan(seq_theta[i]): seq_theta[i] = 0. ego_x = seq_x[i] ego_y = seq_y[i] ego_theta = seq_theta[i] waypoints = [] for i in range(self.seq_len + self.pred_len): local_waypoint = transform_2d_points(np.zeros((1,3)), np.pi/2-seq_theta[i], -seq_x[i], -seq_y[i], np.pi/2-ego_theta, -ego_x, -ego_y) waypoints.append(tuple(local_waypoint[0,:2])) data['waypoints'] = waypoints R = np.array([ [np.cos(np.pi/2+ego_theta), -np.sin(np.pi/2+ego_theta)], [np.sin(np.pi/2+ego_theta), np.cos(np.pi/2+ego_theta)] ]) local_command_point = np.array([self.x_command[index]-ego_x, self.y_command[index]-ego_y]) local_command_point = R.T.dot(local_command_point) data['target_point'] = tuple(local_command_point) data['steer'] = self.steer[index] data['throttle'] = self.throttle[index] data['brake'] = self.brake[index] data['command'] = self.command[index] data['velocity'] = self.velocity[index] return data def get_depth(data): data = np.transpose(data, (1,2,0)) data = data.astype(np.float32) normalized = np.dot(data, [65536.0, 256.0, 1.0]) normalized /= (256 * 256 * 256 - 1) return normalized
MIT License
sap/ewm-cloud-robotics
python-modules/robcoewmordermanager/robcoewmordermanager/orderreservationcontroller.py
OrderReservationController.open_reservations
python
def open_reservations(self) -> DefaultDict[str, Dict]: with self._open_reservations_lock: return self._open_reservations
Return open_reservations defaultdict.
https://github.com/sap/ewm-cloud-robotics/blob/5a7eedc3d70743a882cf6a31497b3689a7991009/python-modules/robcoewmordermanager/robcoewmordermanager/orderreservationcontroller.py#L113-L116
import logging from collections import defaultdict from threading import RLock from typing import DefaultDict, Dict, List from cattr import structure from robcoewmtypes.helper import get_sample_cr from robcoewmtypes.auction import OrderReservationStatus from robcoewmtypes.warehouseorder import WarehouseOrderIdent from k8scrhandler.k8scrhandler import K8sCRHandler _LOGGER = logging.getLogger(__name__) class OrderReservationController(K8sCRHandler): def __init__(self) -> None: template_cr = get_sample_cr('orderreservation') super().__init__( 'ewm.sap.com', 'v1alpha1', 'orderreservations', 'default', template_cr, {} ) self._open_reservations: DefaultDict[str, Dict] = defaultdict(dict) self._open_reservations_lock = RLock() self.register_callback( 'GetOpenReservations', ['ADDED', 'MODIFIED', 'REPROCESS'], self._get_open_reservations_cb) self.register_callback( 'DeleteOpenReservations', ['DELETED'], self._delete_open_reservations_cb) def get_reserved_warehouseorders(self) -> List[WarehouseOrderIdent]: reserved_whos: List[WarehouseOrderIdent] = [] cr_resp = self.list_all_cr() for custom_res in cr_resp: if not custom_res.get('status'): continue status = structure(custom_res.get('status'), OrderReservationStatus) if status.status not in OrderReservationStatus.IN_PROCESS_STATUS: continue for who in status.warehouseorders: who_ident = WarehouseOrderIdent(lgnum=who.lgnum, who=who.who) reserved_whos.append(who_ident) return reserved_whos def _delete_open_reservations_cb(self, name: str, custom_res: Dict) -> None: with self._open_reservations_lock: for res in self.open_reservations.values(): if name in res: res.pop(name) def _get_open_reservations_cb(self, name: str, custom_res: Dict) -> None: with self._open_reservations_lock: for res in self.open_reservations.values(): if name in res: res.pop(name) res_open = False if not custom_res.get('status'): res_open = True else: status = structure(custom_res.get('status'), OrderReservationStatus) if status.status in OrderReservationStatus.IN_PROCESS_STATUS: res_open = True if res_open: owner_refs = custom_res['metadata'].get('ownerReferences') if owner_refs is not None: for owner_ref in owner_refs: if owner_ref['kind'] == 'Auctioneer': self._open_reservations[owner_ref['name']][name] = True @property
Apache License 2.0
jrief/django-websocket-redis
ws4redis/websocket.py
Header.encode_header
python
def encode_header(cls, fin, opcode, mask, length, flags): first_byte = opcode second_byte = 0 if six.PY2: extra = '' else: extra = b'' if fin: first_byte |= cls.FIN_MASK if flags & cls.RSV0_MASK: first_byte |= cls.RSV0_MASK if flags & cls.RSV1_MASK: first_byte |= cls.RSV1_MASK if flags & cls.RSV2_MASK: first_byte |= cls.RSV2_MASK if length < 126: second_byte += length elif length <= 0xffff: second_byte += 126 extra = struct.pack('!H', length) elif length <= 0xffffffffffffffff: second_byte += 127 extra = struct.pack('!Q', length) else: raise FrameTooLargeException if mask: second_byte |= cls.MASK_MASK extra += mask if six.PY3: return bytes([first_byte, second_byte]) + extra return chr(first_byte) + chr(second_byte) + extra
Encodes a WebSocket header. :param fin: Whether this is the final frame for this opcode. :param opcode: The opcode of the payload, see `OPCODE_*` :param mask: Whether the payload is masked. :param length: The length of the frame. :param flags: The RSV* flags. :return: A bytestring encoded header.
https://github.com/jrief/django-websocket-redis/blob/318dde37d0d8371c898c2341336f518192fd06db/ws4redis/websocket.py#L384-L425
import logging import six import struct from socket import error as socket_error from ws4redis.utf8validator import Utf8Validator from ws4redis.exceptions import WebSocketError, FrameTooLargeException logger = logging.getLogger('django.request') if six.PY3: xrange = range class WebSocket(object): __slots__ = ('_closed', 'stream', 'utf8validator', 'utf8validate_last') OPCODE_CONTINUATION = 0x00 OPCODE_TEXT = 0x01 OPCODE_BINARY = 0x02 OPCODE_CLOSE = 0x08 OPCODE_PING = 0x09 OPCODE_PONG = 0x0a def __init__(self, wsgi_input): self._closed = False self.stream = Stream(wsgi_input) self.utf8validator = Utf8Validator() self.utf8validate_last = None def __del__(self): try: self.close() except: pass def _decode_bytes(self, bytestring): if not bytestring: return u'' try: return bytestring.decode('utf-8') except UnicodeDecodeError: self.close(1007) raise def _encode_bytes(self, text): if isinstance(text, six.binary_type): return text if not isinstance(text, six.text_type): text = six.text_type(text or '') return text.encode('utf-8') def _is_valid_close_code(self, code): if code < 1000: return False if 1004 <= code <= 1006: return False if 1012 <= code <= 1016: return False if code == 1100: return False if 2000 <= code <= 2999: return False return True def get_file_descriptor(self): return self.stream.fileno @property def closed(self): return self._closed def handle_close(self, header, payload): if not payload: self.close(1000, None) return if len(payload) < 2: raise WebSocketError('Invalid close frame: {0} {1}'.format(header, payload)) rv = payload[:2] if six.PY2: code = struct.unpack('!H', str(rv))[0] else: code = struct.unpack('!H', bytes(rv))[0] payload = payload[2:] if payload: validator = Utf8Validator() val = validator.validate(payload) if not val[0]: raise UnicodeError if not self._is_valid_close_code(code): raise WebSocketError('Invalid close code {0}'.format(code)) self.close(code, payload) def handle_ping(self, header, payload): self.send_frame(payload, self.OPCODE_PONG) def handle_pong(self, header, payload): pass def read_frame(self): header = Header.decode_header(self.stream) if header.flags: raise WebSocketError if not header.length: return header, '' try: payload = self.stream.read(header.length) except socket_error: payload = '' except Exception: logger.debug("{}: {}".format(type(e), six.text_type(e))) payload = '' if len(payload) != header.length: raise WebSocketError('Unexpected EOF reading frame payload') if header.mask: payload = header.unmask_payload(payload) return header, payload def validate_utf8(self, payload): self.utf8validate_last = self.utf8validator.validate(payload) if not self.utf8validate_last[0]: raise UnicodeError("Encountered invalid UTF-8 while processing " "text message at payload octet index " "{0:d}".format(self.utf8validate_last[3])) def read_message(self): opcode = None message = None while True: header, payload = self.read_frame() f_opcode = header.opcode if f_opcode in (self.OPCODE_TEXT, self.OPCODE_BINARY): if opcode: raise WebSocketError("The opcode in non-fin frame is expected to be zero, got {0!r}".format(f_opcode)) self.utf8validator.reset() self.utf8validate_last = (True, True, 0, 0) opcode = f_opcode elif f_opcode == self.OPCODE_CONTINUATION: if not opcode: raise WebSocketError("Unexpected frame with opcode=0") elif f_opcode == self.OPCODE_PING: self.handle_ping(header, payload) continue elif f_opcode == self.OPCODE_PONG: self.handle_pong(header, payload) continue elif f_opcode == self.OPCODE_CLOSE: self.handle_close(header, payload) return else: raise WebSocketError("Unexpected opcode={0!r}".format(f_opcode)) if opcode == self.OPCODE_TEXT: self.validate_utf8(payload) if six.PY3: payload = payload.decode() if message is None: message = six.text_type() if opcode == self.OPCODE_TEXT else six.binary_type() message += payload if header.fin: break if opcode == self.OPCODE_TEXT: if six.PY2: self.validate_utf8(message) else: self.validate_utf8(message.encode()) return message else: return bytearray(message) def receive(self): if self._closed: raise WebSocketError("Connection is already closed") try: return self.read_message() except UnicodeError as e: logger.info('websocket.receive: UnicodeError {}'.format(e)) self.close(1007) except WebSocketError as e: logger.info('websocket.receive: WebSocketError {}'.format(e)) self.close(1002) except Exception as e: logger.info('websocket.receive: Unknown error {}'.format(e)) raise e def flush(self): pass def send_frame(self, message, opcode): if self._closed: raise WebSocketError("Connection is already closed") if opcode == self.OPCODE_TEXT: message = self._encode_bytes(message) elif opcode == self.OPCODE_BINARY: message = six.binary_type(message) header = Header.encode_header(True, opcode, '', len(message), 0) try: self.stream.write(header + message) except socket_error: raise WebSocketError("Socket is dead") def send(self, message, binary=False): if binary is None: binary = not isinstance(message, six.string_types) opcode = self.OPCODE_BINARY if binary else self.OPCODE_TEXT try: self.send_frame(message, opcode) except WebSocketError: raise WebSocketError("Socket is dead") def close(self, code=1000, message=''): try: message = self._encode_bytes(message) self.send_frame( struct.pack('!H%ds' % len(message), code, message), opcode=self.OPCODE_CLOSE) except WebSocketError: logger.debug("Failed to write closing frame -> closing socket") finally: logger.debug("Closed WebSocket") self._closed = True self.stream = None class Stream(object): __slots__ = ('read', 'write', 'fileno') def __init__(self, wsgi_input): if six.PY2: self.read = wsgi_input._sock.recv self.write = wsgi_input._sock.sendall else: self.read = wsgi_input.raw._sock.recv self.write = wsgi_input.raw._sock.sendall self.fileno = wsgi_input.fileno() class Header(object): __slots__ = ('fin', 'mask', 'opcode', 'flags', 'length') FIN_MASK = 0x80 OPCODE_MASK = 0x0f MASK_MASK = 0x80 LENGTH_MASK = 0x7f RSV0_MASK = 0x40 RSV1_MASK = 0x20 RSV2_MASK = 0x10 HEADER_FLAG_MASK = RSV0_MASK | RSV1_MASK | RSV2_MASK def __init__(self, fin=0, opcode=0, flags=0, length=0): self.mask = '' self.fin = fin self.opcode = opcode self.flags = flags self.length = length def mask_payload(self, payload): payload = bytearray(payload) mask = bytearray(self.mask) for i in xrange(self.length): payload[i] ^= mask[i % 4] if six.PY3: return bytes(payload) return str(payload) unmask_payload = mask_payload def __repr__(self): return ("<Header fin={0} opcode={1} length={2} flags={3} at " "0x{4:x}>").format(self.fin, self.opcode, self.length, self.flags, id(self)) @classmethod def decode_header(cls, stream): read = stream.read data = read(2) if len(data) != 2: raise WebSocketError("Unexpected EOF while decoding header") first_byte, second_byte = struct.unpack('!BB', data) header = cls( fin=first_byte & cls.FIN_MASK == cls.FIN_MASK, opcode=first_byte & cls.OPCODE_MASK, flags=first_byte & cls.HEADER_FLAG_MASK, length=second_byte & cls.LENGTH_MASK) has_mask = second_byte & cls.MASK_MASK == cls.MASK_MASK if header.opcode > 0x07: if not header.fin: raise WebSocketError('Received fragmented control frame: {0!r}'.format(data)) if header.length > 125: raise FrameTooLargeException('Control frame cannot be larger than 125 bytes: {0!r}'.format(data)) if header.length == 126: data = read(2) if len(data) != 2: raise WebSocketError('Unexpected EOF while decoding header') header.length = struct.unpack('!H', data)[0] elif header.length == 127: data = read(8) if len(data) != 8: raise WebSocketError('Unexpected EOF while decoding header') header.length = struct.unpack('!Q', data)[0] if has_mask: mask = read(4) if len(mask) != 4: raise WebSocketError('Unexpected EOF while decoding header') header.mask = mask return header @classmethod
MIT License
modoboa/modoboa
modoboa/admin/app_settings.py
AdminParametersForm.clean_dkim_keys_storage_dir
python
def clean_dkim_keys_storage_dir(self): storage_dir = self.cleaned_data.get("dkim_keys_storage_dir", "") if storage_dir: if not os.path.isdir(storage_dir): raise forms.ValidationError( ugettext_lazy("Directory not found.") ) code, output = exec_cmd("which openssl") if code: raise forms.ValidationError( ugettext_lazy( "openssl not found, please make sure it is installed.") ) return storage_dir
Check that directory exists.
https://github.com/modoboa/modoboa/blob/ac93e1f9d7dfe434a428409b9a6478f97807f153/modoboa/admin/app_settings.py#L247-L261
import collections import os from django import forms from django.conf import settings from django.utils.encoding import force_text from django.utils.translation import ugettext as _, ugettext_lazy from django.forms.fields import GenericIPAddressField from modoboa.lib.form_utils import SeparatorField, YesNoField from modoboa.lib.sysutils import exec_cmd from modoboa.parameters import forms as param_forms from . import constants class AdminParametersForm(param_forms.AdminParametersForm): app = "admin" dom_sep = SeparatorField(label=ugettext_lazy("Domains")) enable_mx_checks = YesNoField( label=ugettext_lazy("Enable MX checks"), initial=True, help_text=ugettext_lazy( "Check that every domain has a valid MX record" ) ) valid_mxs = forms.CharField( label=ugettext_lazy("Valid MXs"), initial="", help_text=ugettext_lazy( "A list of IP or network address every MX record should match." " A warning will be sent if a record does not respect it." ), widget=forms.Textarea, required=False ) domains_must_have_authorized_mx = YesNoField( label=ugettext_lazy("New domains must use authorized MX(s)"), initial=False, help_text=ugettext_lazy( "Prevent the creation of a new domain if its MX record does " "not use one of the defined addresses." ) ) enable_spf_checks = YesNoField( label=ugettext_lazy("Enable SPF checks"), initial=True, help_text=ugettext_lazy( "Check if every domain has a valid SPF record" ) ) enable_dkim_checks = YesNoField( label=ugettext_lazy("Enable DKIM checks"), initial=True, help_text=ugettext_lazy( "Check if every domain with DKIM signin enabled has a valid DNS " "record" ) ) enable_dmarc_checks = YesNoField( label=ugettext_lazy("Enable DMARC checks"), initial=True, help_text=ugettext_lazy( "Check if every domain has a valid DMARC record" ) ) enable_autoconfig_checks = YesNoField( label=ugettext_lazy("Enable autoconfig checks"), initial=True, help_text=ugettext_lazy( "Check if every domain has a valid records for autoconfiguration" ) ) enable_dnsbl_checks = YesNoField( label=ugettext_lazy("Enable DNSBL checks"), initial=True, help_text=ugettext_lazy( "Check every domain against major DNSBL providers" ) ) custom_dns_server = GenericIPAddressField( label=ugettext_lazy("Custom DNS server"), required=False, help_text=ugettext_lazy( "Use a custom DNS server instead of local server configuration" ) ) dkim_keys_storage_dir = forms.CharField( label=ugettext_lazy("DKIM keys storage directory"), initial="", help_text=ugettext_lazy( "Absolute path of the directory where DKIM private keys will " "be stored. Make sure this directory belongs to root user " "and is not readable by the outside world." ), required=False ) dkim_default_key_length = forms.ChoiceField( label=ugettext_lazy("Default DKIM key length"), initial=2048, choices=constants.DKIM_KEY_LENGTHS, help_text=ugettext_lazy( "Default length in bits for newly generated DKIM keys." ) ) default_domain_quota = forms.IntegerField( label=ugettext_lazy("Default domain quota"), initial=0, help_text=ugettext_lazy( "Default quota (in MB) applied to freshly created domains with no " "value specified. A value of 0 means no quota." ) ) default_domain_message_limit = forms.IntegerField( label=ugettext_lazy("Default domain sending limit"), required=False, help_text=ugettext_lazy( "Number of messages freshly created domains will be " "allowed to send per day. Leave empty for no limit." ) ) mbsep = SeparatorField(label=ugettext_lazy("Mailboxes")) handle_mailboxes = YesNoField( label=ugettext_lazy("Handle mailboxes on filesystem"), initial=False, help_text=ugettext_lazy( "Rename or remove mailboxes on the filesystem when they get" " renamed or removed within Modoboa" ) ) default_mailbox_quota = forms.IntegerField( label=ugettext_lazy("Default mailbox quota"), initial=0, help_text=ugettext_lazy( "Default mailbox quota (in MB) applied to freshly created " "mailboxes with no value specified. A value of 0 means no quota." ) ) default_mailbox_message_limit = forms.IntegerField( label=ugettext_lazy("Default mailbox sending limit"), required=False, help_text=ugettext_lazy( "Number of messages freshly created mailboxes will be " "allowed to send per day. Leave empty for no limit." ) ) auto_account_removal = YesNoField( label=ugettext_lazy("Automatic account removal"), initial=False, help_text=ugettext_lazy( "When a mailbox is removed, also remove the associated account") ) auto_create_domain_and_mailbox = YesNoField( label=ugettext_lazy("Automatic domain/mailbox creation"), initial=True, help_text=ugettext_lazy( "Create a domain and a mailbox when an account is automatically " "created." ) ) create_alias_on_mbox_rename = YesNoField( label=ugettext_lazy("Create an alias when a mailbox is renamed"), initial=False, help_text=ugettext_lazy( "Create an alias using the old address when a mailbox is renamed." ) ) visibility_rules = { "valid_mxs": "enable_mx_checks=True", "domains_must_have_authorized_mx": "enable_mx_checks=True" } def __init__(self, *args, **kwargs): super(AdminParametersForm, self).__init__(*args, **kwargs) self.field_widths = { "default_domain_quota": 2, "default_mailbox_quota": 2 } hide_fields = False dpath = None code, output = exec_cmd("which dovecot") if not code: dpath = force_text(output).strip() else: known_paths = getattr( settings, "DOVECOT_LOOKUP_PATH", ("/usr/sbin/dovecot", "/usr/local/sbin/dovecot") ) for fpath in known_paths: if os.path.isfile(fpath) and os.access(fpath, os.X_OK): dpath = fpath if dpath: try: code, version = exec_cmd("%s --version" % dpath) except OSError: hide_fields = True else: version = force_text(version) if code or not version.strip().startswith("2"): hide_fields = True else: hide_fields = True if hide_fields: del self.fields["handle_mailboxes"] def clean_default_domain_quota(self): if self.cleaned_data["default_domain_quota"] < 0: raise forms.ValidationError( ugettext_lazy("Must be a positive integer") ) return self.cleaned_data["default_domain_quota"] def clean_default_mailbox_quota(self): if self.cleaned_data["default_mailbox_quota"] < 0: raise forms.ValidationError( ugettext_lazy("Must be a positive integer") ) return self.cleaned_data["default_mailbox_quota"]
ISC License
openstack-archive/dragonflow
dragonflow/cli/utils.py
print_dict
python
def print_dict(dct, dict_property="Property", wrap=0): pt = prettytable.PrettyTable([dict_property, 'Value']) pt.align = 'l' for k, v in dct.items(): if isinstance(v, dict): v = six.text_type(keys_and_vals_to_strs(v)) if wrap > 0: v = textwrap.fill(six.text_type(v), wrap) if v and isinstance(v, six.string_types) and r'\n' in v: lines = v.strip().split(r'\n') col1 = k for line in lines: pt.add_row([col1, line]) col1 = '' elif isinstance(v, list): val = str([str(i) for i in v]) if val is None: val = '-' pt.add_row([k, val]) else: if v is None: v = '-' pt.add_row([k, v]) if six.PY3: print(encodeutils.safe_encode(pt.get_string()).decode()) else: print(encodeutils.safe_encode(pt.get_string()))
Print a `dict` as a table of two columns. :param dct: `dict` to print :param dict_property: name of the first column :param wrap: wrapping for the second column
https://github.com/openstack-archive/dragonflow/blob/4dc36ed6490e2ed53b47dece883cdbd78ea96033/dragonflow/cli/utils.py#L130-L166
from oslo_utils import encodeutils import prettytable import six import textwrap from dragonflow._i18n import _ from dragonflow.common import exceptions def get_list_table_columns_and_formatters(fields, objs, exclude_fields=(), filters=None): if objs and isinstance(objs, list): obj = objs[0] else: obj = None fields = None columns = [] formatters = {} if fields: non_existent_fields = [] exclude_fields = set(exclude_fields) for field in fields: if field not in obj: non_existent_fields.append(field) continue if field in exclude_fields: continue field_title, formatter = make_field_formatter(field, filters) columns.append(field_title) formatters[field_title] = formatter exclude_fields.add(field) if non_existent_fields: raise exceptions.CommandError( non_existent_fields=non_existent_fields) return columns, formatters def keys_and_vals_to_strs(dictionary): def to_str(k_or_v): if isinstance(k_or_v, dict): return keys_and_vals_to_strs(k_or_v) elif isinstance(k_or_v, six.text_type): return str(k_or_v) else: return k_or_v return dict((to_str(k), to_str(v)) for k, v in dictionary.items()) def _format_field_name(attr): parts = attr.rsplit(':', 1) name = parts[-1].replace('_', ' ') if name.isupper() or name.islower(): name = name.title() parts[-1] = name return ': '.join(parts) def make_field_formatter(attr, filters=None): filter_ = None if filters: filter_ = filters.get(attr) def get_field(obj): field = getattr(obj, attr, '') if field and filter_: field = filter_(field) return field name = _format_field_name(attr) formatter = get_field return name, formatter
Apache License 2.0
vida-nyu/reprozip
reprozip/reprozip/common.py
record_usage
python
def record_usage(**kwargs): if _usage_report is not None: _usage_report.note(kwargs)
Records some info in the current usage report.
https://github.com/vida-nyu/reprozip/blob/6bc445000c3d54875a2d96ac6ac89b47f7887bd0/reprozip/reprozip/common.py#L712-L716
from __future__ import division, print_function, unicode_literals import atexit import contextlib import copy from datetime import datetime from distutils.version import LooseVersion import functools import gzip import logging import logging.handlers import os from rpaths import PosixPath, Path import sys import tarfile import usagestats import yaml from .utils import iteritems, itervalues, unicode_, stderr, UniqueNames, escape, optional_return_type, isodatetime, hsize, join_root, copyfile logger = logging.getLogger(__name__.split('.', 1)[0]) FILE_READ = 0x01 FILE_WRITE = 0x02 FILE_WDIR = 0x04 FILE_STAT = 0x08 FILE_LINK = 0x10 class File(object): comment = None def __init__(self, path, size=None): self.path = path self.size = size def __eq__(self, other): return (isinstance(other, File) and self.path == other.path) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self.path) class Package(object): def __init__(self, name, version, files=None, packfiles=True, size=None): self.name = name self.version = version self.files = list(files) if files is not None else [] self.packfiles = packfiles self.size = size def __eq__(self, other): return (isinstance(other, Package) and self.name == other.name and self.version == other.version) def __ne__(self, other): return not self.__eq__(other) def add_file(self, file_): self.files.append(file_) def __unicode__(self): return '%s (%s)' % (self.name, self.version) __str__ = __unicode__ class RPZPack(object): def __init__(self, pack): self.pack = Path(pack) self.tar = tarfile.open(str(self.pack), 'r:*') f = self.tar.extractfile('METADATA/version') version = f.read() f.close() if version.startswith(b'REPROZIP VERSION '): try: version = int(version[17:].rstrip()) except ValueError: version = None if version in (1, 2): self.version = version self.data_prefix = PosixPath(b'DATA') else: raise ValueError( "Unknown format version %r (maybe you should upgrade " "reprounzip? I only know versions 1 and 2" % version) else: raise ValueError("File doesn't appear to be a RPZ pack") if self.version == 1: self.data = self.tar elif version == 2: self.data = tarfile.open( fileobj=self.tar.extractfile('DATA.tar.gz'), mode='r:*') else: assert False def remove_data_prefix(self, path): if not isinstance(path, PosixPath): path = PosixPath(path) components = path.components[1:] if not components: return path.__class__('') return path.__class__(*components) def open_config(self): return self.tar.extractfile('METADATA/config.yml') def extract_config(self, target): self._extract_file(self.tar.getmember('METADATA/config.yml'), target) def _extract_file(self, member, target): member = copy.copy(member) member.name = str(target.components[-1]) self.tar.extract(member, path=str(Path.cwd() / target.parent)) target.chmod(0o644) assert target.is_file() def _extract_file_gz(self, member, target): f_in = self.tar.extractfile(member) f_in_gz = gzip.open(f_in) f_out = target.open('wb') try: chunk = f_in_gz.read(4096) while len(chunk) == 4096: f_out.write(chunk) chunk = f_in_gz.read(4096) if chunk: f_out.write(chunk) finally: f_out.close() f_in_gz.close() f_in.close() @contextlib.contextmanager def with_config(self): fd, tmp = Path.tempfile(prefix='reprounzip_') os.close(fd) self.extract_config(tmp) yield tmp tmp.remove() def extract_trace(self, target): target = Path(target) if self.version == 1: member = self.tar.getmember('METADATA/trace.sqlite3') self._extract_file(member, target) elif self.version == 2: try: member = self.tar.getmember('METADATA/trace.sqlite3.gz') except KeyError: pass else: self._extract_file_gz(member, target) return member = self.tar.getmember('METADATA/trace.sqlite3') self._extract_file(member, target) else: assert False @contextlib.contextmanager def with_trace(self): fd, tmp = Path.tempfile(prefix='reprounzip_') os.close(fd) self.extract_trace(tmp) yield tmp tmp.remove() def list_data(self): return [copy.copy(m) for m in self.data.getmembers() if m.name.startswith('DATA/')] def data_filenames(self): return set(PosixPath(m.name[4:]) for m in self.data.getmembers() if m.name.startswith('DATA/')) def get_data(self, path): path = PosixPath(path) path = join_root(PosixPath(b'DATA'), path) return copy.copy(self.data.getmember(path)) def extract_data(self, root, members): self.data.extractall(str(root), members) def copy_data_tar(self, target): if self.version == 1: self.pack.copyfile(target) elif self.version == 2: with target.open('wb') as fp: data = self.tar.extractfile('DATA.tar.gz') copyfile(data, fp) data.close() def close(self): if self.data is not self.tar: self.data.close() self.tar.close() self.data = self.tar = None class InvalidConfig(ValueError): def read_files(files, File=File): if files is None: return [] return [File(PosixPath(f)) for f in files] def read_packages(packages, File=File, Package=Package): if packages is None: return [] new_pkgs = [] for pkg in packages: pkg['files'] = read_files(pkg['files'], File) new_pkgs.append(Package(**pkg)) return new_pkgs Config = optional_return_type(['runs', 'packages', 'other_files'], ['inputs_outputs', 'additional_patterns', 'format_version']) @functools.total_ordering class InputOutputFile(object): def __init__(self, path, read_runs, write_runs): self.path = path self.read_runs = read_runs self.write_runs = write_runs def __eq__(self, other): return ((self.path, self.read_runs, self.write_runs) == (other.path, other.read_runs, other.write_runs)) def __lt__(self, other): return self.path < other.path def __repr__(self): return "<InputOutputFile(path=%r, read_runs=%r, write_runs=%r)>" % ( self.path, self.read_runs, self.write_runs) def load_iofiles(config, runs): files_list = config.get('inputs_outputs') or [] if 'inputs_outputs' not in config: for i, run in enumerate(runs): for rkey, wkey in (('input_files', 'read_by_runs'), ('output_files', 'written_by_runs')): for k, p in iteritems(run.pop(rkey, {})): files_list.append({'name': k, 'path': p, wkey: [i]}) files = {} paths = {} required_keys = set(['name', 'path']) optional_keys = set(['read_by_runs', 'written_by_runs']) uniquenames = UniqueNames() for i, f in enumerate(files_list): keys = set(f) if (not keys.issubset(required_keys | optional_keys) or not keys.issuperset(required_keys)): raise InvalidConfig("File #%d has invalid keys") name = f['name'] path = PosixPath(f['path']) readers = sorted(f.get('read_by_runs', [])) writers = sorted(f.get('written_by_runs', [])) if ( not isinstance(readers, (tuple, list)) or not all(isinstance(e, int) for e in readers) ): raise InvalidConfig("read_by_runs should be a list of integers") if ( not isinstance(writers, (tuple, list)) or not all(isinstance(e, int) for e in writers) ): raise InvalidConfig("written_by_runs should be a list of integers") if name in files: if files[name].path != path: old_name, name = name, uniquenames(name) logger.warning("File name appears multiple times: %s\n" "Using name %s instead", old_name, name) else: uniquenames.insert(name) if path in paths: if paths[path] == name: logger.warning("File appears multiple times: %s", name) else: logger.warning("Two files have the same path (but different " "names): %s, %s\nUsing name %s", name, paths[path], paths[path]) name = paths[path] files[name].read_runs.update(readers) files[name].write_runs.update(writers) else: paths[path] = name files[name] = InputOutputFile(path, readers, writers) return files def load_config(filename, canonical, File=File, Package=Package): with filename.open(encoding='utf-8') as fp: config = yaml.safe_load(fp) ver = LooseVersion(config['version']) keys_ = set(config) if 'version' not in keys_: raise InvalidConfig("Missing version") elif not LooseVersion('0.2') <= ver < LooseVersion('0.9'): pkgname = (__package__ or __name__).split('.', 1)[0] raise InvalidConfig("Loading configuration file in unknown format %s; " "this probably means that you should upgrade " "%s" % (ver, pkgname)) unknown_keys = keys_ - set(['pack_id', 'version', 'runs', 'inputs_outputs', 'packages', 'other_files', 'additional_patterns', 'input_files', 'output_files']) if unknown_keys: logger.warning("Unrecognized sections in configuration: %s", ', '.join(unknown_keys)) runs = config.get('runs') or [] packages = read_packages(config.get('packages'), File, Package) other_files = read_files(config.get('other_files'), File) inputs_outputs = load_iofiles(config, runs) for i, run in enumerate(runs): run['input_files'] = dict((n, f.path) for n, f in iteritems(inputs_outputs) if i in f.read_runs) run['output_files'] = dict((n, f.path) for n, f in iteritems(inputs_outputs) if i in f.write_runs) for i, run in enumerate(runs): if run.get('id') is None: run['id'] = "run%d" % i record_usage_package(runs, packages, other_files, inputs_outputs, pack_id=config.get('pack_id')) kwargs = {'format_version': ver, 'inputs_outputs': inputs_outputs} if canonical: if 'additional_patterns' in config: raise InvalidConfig("Canonical configuration file shouldn't have " "additional_patterns key anymore") else: kwargs['additional_patterns'] = config.get('additional_patterns') or [] return Config(runs, packages, other_files, **kwargs) def write_file(fp, fi, indent=0): fp.write("%s - \"%s\"%s\n" % ( " " * indent, escape(unicode_(fi.path)), ' # %s' % fi.comment if fi.comment is not None else '')) def write_package(fp, pkg, indent=0): indent_str = " " * indent fp.write("%s - name: \"%s\"\n" % (indent_str, escape(pkg.name))) fp.write("%s version: \"%s\"\n" % (indent_str, escape(pkg.version))) if pkg.size is not None: fp.write("%s size: %d\n" % (indent_str, pkg.size)) fp.write("%s packfiles: %s\n" % (indent_str, 'true' if pkg.packfiles else 'false')) fp.write("%s files:\n" "%s # Total files used: %s\n" % ( indent_str, indent_str, hsize(sum(fi.size for fi in pkg.files if fi.size is not None)))) if pkg.size is not None: fp.write("%s # Installed package size: %s\n" % ( indent_str, hsize(pkg.size))) for fi in sorted(pkg.files, key=lambda fi_: fi_.path): write_file(fp, fi, indent + 1) def save_config(filename, runs, packages, other_files, reprozip_version, inputs_outputs=None, canonical=False, pack_id=None): dump = lambda x: yaml.safe_dump(x, encoding='utf-8', allow_unicode=True) with filename.open('w', encoding='utf-8', newline='\n') as fp: fp.write("""\ # ReproZip configuration file # This file was generated by reprozip {version} at {date} {what} # Run info{pack_id} version: "{format!s}" """.format(pack_id=(('\npack_id: "%s"' % pack_id) if pack_id is not None else ''), version=escape(reprozip_version), format='0.8', date=isodatetime(), what=("# It was generated by the packer and you shouldn't need to " "edit it" if canonical else "# You might want to edit this file before running the " "packer\n# See 'reprozip pack -h' for help"))) fp.write("runs:\n") for i, run in enumerate(runs): run = dict((k, v) for k, v in iteritems(run) if k not in ('input_files', 'output_files')) fp.write("# Run %d\n" % i) fp.write(dump([run]).decode('utf-8')) fp.write("\n") fp.write("""\ # Input and output files # Inputs are files that are only read by a run; reprounzip can replace these # files on demand to run the experiment with custom data. # Outputs are files that are generated by a run; reprounzip can extract these # files from the experiment on demand, for the user to examine. # The name field is the identifier the user will use to access these files. inputs_outputs:""") for n, f in iteritems(inputs_outputs): fp.write("""\ - name: {name} path: {path} written_by_runs: {writers} read_by_runs: {readers}""".format(name=n, path=unicode_(f.path), readers=repr(f.read_runs), writers=repr(f.write_runs))) fp.write("""\ # Files to pack # All the files below were used by the program; they will be included in the # generated package # These files come from packages; we can thus choose not to include them, as it # will simply be possible to install that package on the destination system # They are included anyway by default packages: """) for pkg in sorted(packages, key=lambda p: p.name): write_package(fp, pkg) fp.write("""\ # These files do not appear to come with an installed package -- you probably # want them packed other_files: """) for f in sorted(other_files, key=lambda fi: fi.path): write_file(fp, f) if not canonical: fp.write("""\ # If you want to include additional files in the pack, you can list additional # patterns of files that will be included additional_patterns: # Example: # - /etc/apache2/** # Everything under apache2/ # - /var/log/apache2/*.log # Log files directly under apache2/ # - /var/lib/lxc/*/rootfs/home/**/*.py # All Python files of all users in # # that container """) class LoggingDateFormatter(logging.Formatter): converter = datetime.fromtimestamp def formatTime(self, record, datefmt=None): ct = self.converter(record.created) t = ct.strftime("%H:%M:%S") s = "%s.%03d" % (t, record.msecs) return s def setup_logging(tag, verbosity): levels = [logging.CRITICAL, logging.WARNING, logging.INFO, logging.DEBUG] console_level = levels[min(verbosity, 3)] file_level = logging.INFO min_level = min(console_level, file_level) fmt = "[%s] %%(asctime)s %%(levelname)s: %%(message)s" % tag formatter = LoggingDateFormatter(fmt) handler = logging.StreamHandler() handler.setLevel(console_level) handler.setFormatter(formatter) rootlogger = logging.root rootlogger.setLevel(min_level) rootlogger.addHandler(handler) if os.environ.get('REPROZIP_NO_LOGFILE', '').lower() in ('', 'false', '0', 'off'): dotrpz = Path('~/.reprozip').expand_user() try: if not dotrpz.is_dir(): dotrpz.mkdir() filehandler = logging.handlers.RotatingFileHandler( str(dotrpz / 'log'), mode='a', delay=False, maxBytes=400000, backupCount=5) except (IOError, OSError): logger.warning("Couldn't create log file %s", dotrpz / 'log') else: filehandler.setFormatter(formatter) filehandler.setLevel(file_level) rootlogger.addHandler(filehandler) filehandler.emit(logging.root.makeRecord( __name__.split('.', 1)[0], logging.INFO, "(log start)", 0, "Log opened %s %s", (datetime.now().strftime("%Y-%m-%d"), sys.argv), None)) logging.getLogger('urllib3').setLevel(logging.INFO) _usage_report = None def setup_usage_report(name, version): global _usage_report certificate_file = get_reprozip_ca_certificate() _usage_report = usagestats.Stats( '~/.reprozip/usage_stats', usagestats.Prompt(enable='%s usage_report --enable' % name, disable='%s usage_report --disable' % name), os.environ.get('REPROZIP_USAGE_URL', 'https://stats.reprozip.org/'), version='%s %s' % (name, version), unique_user_id=True, env_var='REPROZIP_USAGE_STATS', ssl_verify=certificate_file.path) try: os.getcwd().encode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): record_usage(cwd_ascii=False) else: record_usage(cwd_ascii=True) def enable_usage_report(enable): if enable: _usage_report.enable_reporting() stderr.write("Thank you, usage reports will be sent automatically " "from now on.\n") else: _usage_report.disable_reporting() stderr.write("Usage reports will not be collected nor sent.\n")
BSD 3-Clause New or Revised License
erigones/esdc-ce
api/vm/snapshot/utils.py
filter_snap_type
python
def filter_snap_type(query_filter, data): stype = data.get('type', None) if stype: try: stype = int(stype) if stype not in dict(Snapshot.TYPE): raise ValueError query_filter['type'] = stype except Exception: raise InvalidInput('Invalid snapshot type') return query_filter
Validate snapshot type and update dictionary used for queryset filtering
https://github.com/erigones/esdc-ce/blob/f83a62d0d430e3c8f9aac23d958583b0efce4312/api/vm/snapshot/utils.py#L87-L101
from api.exceptions import ObjectNotFound, InvalidInput, VmIsNotOperational from vms.models import Vm, Snapshot VM_STATUS_OPERATIONAL = frozenset([Vm.RUNNING, Vm.STOPPED, Vm.STOPPING]) def is_vm_operational(fun): def wrap(view, vm, *args, **kwargs): if vm.status not in VM_STATUS_OPERATIONAL: raise VmIsNotOperational return fun(view, vm, *args, **kwargs) return wrap def detail_dict(name, ser, data=None): if data is None: data = ser.detail_dict() data['disk_id'] = ser.object.disk_id data[name] = ser.object.name return data def get_disk_id(request, vm, data=None, key='disk_id', default=1, disk_id=None): assert data is not None or disk_id is not None if disk_id is None: disk_id = data.get(key, default) try: disk_id = int(disk_id) if not disk_id > 0: raise ValueError disk = vm.json_active_get_disks()[disk_id - 1] zfs_filesystem = disk['zfs_filesystem'] real_disk_id = Snapshot.get_real_disk_id(disk) except Exception: raise InvalidInput('Invalid %s' % key) return disk_id, real_disk_id, zfs_filesystem def get_snapshots(request, vm, real_disk_id, data): snapnames = data.get('snapnames', None) if not (snapnames and isinstance(snapnames, (list, tuple))): raise InvalidInput('Invalid snapnames') snapnames = map(str, snapnames) snaps = Snapshot.objects.select_related('vm').filter(vm=vm, disk_id=real_disk_id, name__in=snapnames) if not snaps: raise ObjectNotFound(model=Snapshot) return snaps, snapnames def filter_disk_id(vm, query_filter, data, default=None): disk_id = data.get('disk_id', default) if disk_id is not None: try: disk_id = int(disk_id) if not disk_id > 0: raise ValueError if vm: query_filter['disk_id'] = Snapshot.get_disk_id(vm, disk_id) else: query_filter['vm_disk_id'] = disk_id - 1 except Exception: raise InvalidInput('Invalid disk_id') return query_filter
Apache License 2.0
kozea/cairocffi
cairocffi/matrix.py
Matrix.copy
python
def copy(self): return type(self)(*self.as_tuple())
Return a new copy of this matrix.
https://github.com/kozea/cairocffi/blob/2473d1bb82a52ca781edec595a95951509db2969/cairocffi/matrix.py#L71-L73
from . import _check_status, cairo, ffi class Matrix(object): def __init__(self, xx=1, yx=0, xy=0, yy=1, x0=0, y0=0): self._pointer = ffi.new('cairo_matrix_t *') cairo.cairo_matrix_init(self._pointer, xx, yx, xy, yy, x0, y0) @classmethod def init_rotate(cls, radians): result = cls() cairo.cairo_matrix_init_rotate(result._pointer, radians) return result def as_tuple(self): ptr = self._pointer return (ptr.xx, ptr.yx, ptr.xy, ptr.yy, ptr.x0, ptr.y0)
BSD 3-Clause New or Revised License
jonathanfeng/new_horizons
venv/lib/python3.7/site-packages/jinja2/filters.py
do_random
python
def do_random(context, seq): try: return random.choice(seq) except IndexError: return context.environment.undefined("No random item, sequence was empty.")
Return a random item from the sequence.
https://github.com/jonathanfeng/new_horizons/blob/0ec21c8f8423932611e1e0bf24548dcef912bc54/venv/lib/python3.7/site-packages/jinja2/filters.py#L534-L539
import math import random import re import warnings from collections import namedtuple from itertools import chain from itertools import groupby from markupsafe import escape from markupsafe import Markup from markupsafe import soft_unicode from ._compat import abc from ._compat import imap from ._compat import iteritems from ._compat import string_types from ._compat import text_type from .exceptions import FilterArgumentError from .runtime import Undefined from .utils import htmlsafe_json_dumps from .utils import pformat from .utils import unicode_urlencode from .utils import urlize _word_re = re.compile(r"\w+", re.UNICODE) _word_beginning_split_re = re.compile(r"([-\s\(\{\[\<]+)", re.UNICODE) def contextfilter(f): f.contextfilter = True return f def evalcontextfilter(f): f.evalcontextfilter = True return f def environmentfilter(f): f.environmentfilter = True return f def ignore_case(value): return value.lower() if isinstance(value, string_types) else value def make_attrgetter(environment, attribute, postprocess=None, default=None): attribute = _prepare_attribute_parts(attribute) def attrgetter(item): for part in attribute: item = environment.getitem(item, part) if default and isinstance(item, Undefined): item = default if postprocess is not None: item = postprocess(item) return item return attrgetter def make_multi_attrgetter(environment, attribute, postprocess=None): attribute_parts = ( attribute.split(",") if isinstance(attribute, string_types) else [attribute] ) attribute = [ _prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts ] def attrgetter(item): items = [None] * len(attribute) for i, attribute_part in enumerate(attribute): item_i = item for part in attribute_part: item_i = environment.getitem(item_i, part) if postprocess is not None: item_i = postprocess(item_i) items[i] = item_i return items return attrgetter def _prepare_attribute_parts(attr): if attr is None: return [] elif isinstance(attr, string_types): return [int(x) if x.isdigit() else x for x in attr.split(".")] else: return [attr] def do_forceescape(value): if hasattr(value, "__html__"): value = value.__html__() return escape(text_type(value)) def do_urlencode(value): if isinstance(value, string_types) or not isinstance(value, abc.Iterable): return unicode_urlencode(value) if isinstance(value, dict): items = iteritems(value) else: items = iter(value) return u"&".join( "%s=%s" % (unicode_urlencode(k, for_qs=True), unicode_urlencode(v, for_qs=True)) for k, v in items ) @evalcontextfilter def do_replace(eval_ctx, s, old, new, count=None): if count is None: count = -1 if not eval_ctx.autoescape: return text_type(s).replace(text_type(old), text_type(new), count) if ( hasattr(old, "__html__") or hasattr(new, "__html__") and not hasattr(s, "__html__") ): s = escape(s) else: s = soft_unicode(s) return s.replace(soft_unicode(old), soft_unicode(new), count) def do_upper(s): return soft_unicode(s).upper() def do_lower(s): return soft_unicode(s).lower() @evalcontextfilter def do_xmlattr(_eval_ctx, d, autospace=True): rv = u" ".join( u'%s="%s"' % (escape(key), escape(value)) for key, value in iteritems(d) if value is not None and not isinstance(value, Undefined) ) if autospace and rv: rv = u" " + rv if _eval_ctx.autoescape: rv = Markup(rv) return rv def do_capitalize(s): return soft_unicode(s).capitalize() def do_title(s): return "".join( [ item[0].upper() + item[1:].lower() for item in _word_beginning_split_re.split(soft_unicode(s)) if item ] ) def do_dictsort(value, case_sensitive=False, by="key", reverse=False): if by == "key": pos = 0 elif by == "value": pos = 1 else: raise FilterArgumentError('You can only sort by either "key" or "value"') def sort_func(item): value = item[pos] if not case_sensitive: value = ignore_case(value) return value return sorted(value.items(), key=sort_func, reverse=reverse) @environmentfilter def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None): key_func = make_multi_attrgetter( environment, attribute, postprocess=ignore_case if not case_sensitive else None ) return sorted(value, key=key_func, reverse=reverse) @environmentfilter def do_unique(environment, value, case_sensitive=False, attribute=None): getter = make_attrgetter( environment, attribute, postprocess=ignore_case if not case_sensitive else None ) seen = set() for item in value: key = getter(item) if key not in seen: seen.add(key) yield item def _min_or_max(environment, value, func, case_sensitive, attribute): it = iter(value) try: first = next(it) except StopIteration: return environment.undefined("No aggregated item, sequence was empty.") key_func = make_attrgetter( environment, attribute, postprocess=ignore_case if not case_sensitive else None ) return func(chain([first], it), key=key_func) @environmentfilter def do_min(environment, value, case_sensitive=False, attribute=None): return _min_or_max(environment, value, min, case_sensitive, attribute) @environmentfilter def do_max(environment, value, case_sensitive=False, attribute=None): return _min_or_max(environment, value, max, case_sensitive, attribute) def do_default(value, default_value=u"", boolean=False): if isinstance(value, Undefined) or (boolean and not value): return default_value return value @evalcontextfilter def do_join(eval_ctx, value, d=u"", attribute=None): if attribute is not None: value = imap(make_attrgetter(eval_ctx.environment, attribute), value) if not eval_ctx.autoescape: return text_type(d).join(imap(text_type, value)) if not hasattr(d, "__html__"): value = list(value) do_escape = False for idx, item in enumerate(value): if hasattr(item, "__html__"): do_escape = True else: value[idx] = text_type(item) if do_escape: d = escape(d) else: d = text_type(d) return d.join(value) return soft_unicode(d).join(imap(soft_unicode, value)) def do_center(value, width=80): return text_type(value).center(width) @environmentfilter def do_first(environment, seq): try: return next(iter(seq)) except StopIteration: return environment.undefined("No first item, sequence was empty.") @environmentfilter def do_last(environment, seq): try: return next(iter(reversed(seq))) except StopIteration: return environment.undefined("No last item, sequence was empty.") @contextfilter
MIT License
linkedin/oncall
src/oncall/api/v0/team_iris_escalate.py
on_post
python
def on_post(req, resp, team): data = load_json_body(req) plan = data.get('plan') dynamic = False if plan == URGENT: plan_settings = iris.settings['urgent_plan'] dynamic = True elif plan == MEDIUM: plan_settings = iris.settings['medium_plan'] dynamic = True elif plan == CUSTOM or plan is None: connection = db.connect() cursor = connection.cursor() cursor.execute('SELECT iris_plan FROM team WHERE name = %s', team) if cursor.rowcount == 0: cursor.close() connection.close() raise HTTPBadRequest('Iris escalation failed', 'No escalation plan specified ' 'and team has no custom escalation plan defined') plan_name = cursor.fetchone()[0] cursor.close() connection.close() else: raise HTTPBadRequest('Iris escalation failed', 'Invalid escalation plan') requester = req.context.get('user') if not requester: requester = req.context['app'] data['requester'] = requester if 'description' not in data or data['description'] == '': raise HTTPBadRequest('Iris escalation failed', 'Escalation cannot have an empty description') try: if dynamic: plan_name = plan_settings['name'] targets = plan_settings['dynamic_targets'] for t in targets: if 'target' not in t: t['target'] = team re = iris.client.post(iris.client.url + 'incidents', json={'plan': plan_name, 'context': data, 'dynamic_targets': targets}) re.raise_for_status() incident_id = re.json() else: incident_id = iris.client.incident(plan_name, context=data) except (ValueError, ConnectionError, HTTPError) as e: raise HTTPBadRequest('Iris escalation failed', 'Iris client error: %s' % e) resp.body = str(incident_id)
Escalate to a team using Iris. Configured in the 'iris_plan_integration' section of the configuration file. Escalation plan is specified via keyword, currently: 'urgent', 'medium', or 'custom'. These keywords correspond to the plan specified in the iris_plan_integration urgent_plan key, the iris integration medium_plan key, and the team's iris plan defined in the DB, respectively. If no plan is specified, the team's custom plan will be used. If iris plan integration is not activated, this endpoint will be disabled. **Example request:** .. sourcecode:: http POST /v0/events HTTP/1.1 Content-Type: application/json { "description": "Something bad happened!", "plan": "urgent" } :statuscode 200: Incident created :statuscode 400: Escalation failed, missing description/No escalation plan specified for team/Iris client error.
https://github.com/linkedin/oncall/blob/36bdf087e1994c7733e11f290d23392003038965/src/oncall/api/v0/team_iris_escalate.py#L13-L87
from ... import db, iris from ...utils import load_json_body from ...auth import login_required from ...constants import URGENT, MEDIUM, CUSTOM from falcon import HTTPBadRequest from requests import ConnectionError, HTTPError @login_required
BSD 2-Clause Simplified License
sdispater/orator
orator/query/builder.py
QueryBuilder.where
python
def where(self, column, operator=Null(), value=None, boolean="and"): if isinstance(column, dict): nested = self.new_query() for key, value in column.items(): nested.where(key, "=", value) return self.where_nested(nested, boolean) if isinstance(column, QueryBuilder): return self.where_nested(column, boolean) if isinstance(column, list): nested = self.new_query() for condition in column: if isinstance(condition, list) and len(condition) == 3: nested.where(condition[0], condition[1], condition[2]) else: raise ArgumentError("Invalid conditions in where() clause") return self.where_nested(nested, boolean) if value is None: if not isinstance(operator, Null): value = operator operator = "=" else: raise ArgumentError("Value must be provided") if operator not in self._operators: value = operator operator = "=" if isinstance(value, QueryBuilder): return self._where_sub(column, operator, value, boolean) if value is None: return self.where_null(column, boolean, operator != "=") type = "basic" self.wheres.append( { "type": type, "column": column, "operator": operator, "value": value, "boolean": boolean, } ) if not isinstance(value, QueryExpression): self.add_binding(value, "where") return self
Add a where clause to the query :param column: The column of the where clause, can also be a QueryBuilder instance for sub where :type column: str or QueryBuilder :param operator: The operator of the where clause :type operator: str :param value: The value of the where clause :type value: mixed :param boolean: The boolean of the where clause :type boolean: str :return: The current QueryBuilder instance :rtype: QueryBuilder
https://github.com/sdispater/orator/blob/0666e522be914db285b6936e3c36801fc1a9c2e7/orator/query/builder.py#L346-L419
import re import copy import datetime from itertools import chain from collections import OrderedDict from .expression import QueryExpression from .join_clause import JoinClause from ..pagination import Paginator, LengthAwarePaginator from ..utils import basestring, Null from ..exceptions import ArgumentError from ..support import Collection class QueryBuilder(object): _operators = [ "=", "<", ">", "<=", ">=", "<>", "!=", "like", "like binary", "not like", "between", "ilike", "&", "|", "^", "<<", ">>", "rlike", "regexp", "not regexp", "~", "~*", "!~", "!~*", "similar to", "not similar to", ] def __init__(self, connection, grammar, processor): self._grammar = grammar self._processor = processor self._connection = connection self._bindings = OrderedDict() for type in ["select", "join", "where", "having", "order"]: self._bindings[type] = [] self.aggregate_ = None self.columns = [] self.distinct_ = False self.from__ = "" self.joins = [] self.wheres = [] self.groups = [] self.havings = [] self.orders = [] self.limit_ = None self.offset_ = None self.unions = [] self.union_limit = None self.union_offset = None self.union_orders = [] self.lock_ = None self._backups = {} self._use_write_connection = False def select(self, *columns): if not columns: columns = ["*"] self.columns = list(columns) return self def select_raw(self, expression, bindings=None): self.add_select(QueryExpression(expression)) if bindings: self.add_binding(bindings, "select") return self def select_sub(self, query, as_): if isinstance(query, QueryBuilder): bindings = query.get_bindings() query = query.to_sql() elif isinstance(query, basestring): bindings = [] else: raise ArgumentError("Invalid subselect") return self.select_raw( "(%s) AS %s" % (query, self._grammar.wrap(as_)), bindings ) def add_select(self, *column): if not column: column = [] self.columns += list(column) return self def distinct(self): self.distinct_ = True return self def from_(self, table): self.from__ = table return self def join(self, table, one=None, operator=None, two=None, type="inner", where=False): if isinstance(table, JoinClause): self.joins.append(table) else: if one is None: raise ArgumentError('Missing "one" argument') join = JoinClause(table, type) self.joins.append(join.on(one, operator, two, "and", where)) return self def join_where(self, table, one, operator, two, type="inner"): return self.join(table, one, operator, two, type, True) def left_join(self, table, one=None, operator=None, two=None): if isinstance(table, JoinClause): table.type = "left" return self.join(table, one, operator, two, "left") def left_join_where(self, table, one, operator, two): return self.join_where(table, one, operator, two, "left") def right_join(self, table, one=None, operator=None, two=None): if isinstance(table, JoinClause): table.type = "right" return self.join(table, one, operator, two, "right") def right_join_where(self, table, one, operator, two): return self.join_where(table, one, operator, two, "right")
MIT License
botfront/rasa-for-botfront
rasa/core/policies/policy.py
Policy.featurizer
python
def featurizer(self): return self.__featurizer
Returns the policy's featurizer.
https://github.com/botfront/rasa-for-botfront/blob/6e0e48d0059e197b5f686df1e27935769c3641b7/rasa/core/policies/policy.py#L125-L127
import copy import json import logging from enum import Enum from pathlib import Path from typing import ( Any, List, Optional, Text, Dict, Callable, Type, Union, Tuple, TYPE_CHECKING, ) import numpy as np from rasa.core.exceptions import UnsupportedDialogueModelError from rasa.shared.core.events import Event import rasa.shared.utils.common import rasa.utils.common import rasa.shared.utils.io from rasa.shared.core.domain import Domain from rasa.core.featurizers.single_state_featurizer import SingleStateFeaturizer from rasa.core.featurizers.tracker_featurizers import ( TrackerFeaturizer, MaxHistoryTrackerFeaturizer, FEATURIZER_FILE, ) from rasa.shared.nlu.interpreter import NaturalLanguageInterpreter from rasa.shared.core.trackers import DialogueStateTracker from rasa.shared.core.generator import TrackerWithCachedStates from rasa.core.constants import DEFAULT_POLICY_PRIORITY from rasa.shared.core.constants import USER, SLOTS, PREVIOUS_ACTION, ACTIVE_LOOP from rasa.shared.nlu.constants import ENTITIES, INTENT, TEXT, ACTION_TEXT, ACTION_NAME from rasa.utils.tensorflow.constants import EPOCHS if TYPE_CHECKING: from rasa.shared.nlu.training_data.features import Features logger = logging.getLogger(__name__) class SupportedData(Enum): ML_DATA = 1 RULE_DATA = 2 ML_AND_RULE_DATA = 3 @staticmethod def trackers_for_policy( policy: Union["Policy", Type["Policy"]], trackers: Union[List[DialogueStateTracker], List[TrackerWithCachedStates]], ) -> Union[List[DialogueStateTracker], List[TrackerWithCachedStates]]: supported_data = policy.supported_data() if supported_data == SupportedData.RULE_DATA: return [tracker for tracker in trackers if tracker.is_rule_tracker] if supported_data == SupportedData.ML_DATA: return [tracker for tracker in trackers if not tracker.is_rule_tracker] return trackers class Policy: @staticmethod def supported_data() -> SupportedData: return SupportedData.ML_DATA @staticmethod def _standard_featurizer() -> MaxHistoryTrackerFeaturizer: return MaxHistoryTrackerFeaturizer(SingleStateFeaturizer()) @classmethod def _create_featurizer( cls, featurizer: Optional[TrackerFeaturizer] = None ) -> TrackerFeaturizer: if featurizer: return copy.deepcopy(featurizer) else: return cls._standard_featurizer() def __init__( self, featurizer: Optional[TrackerFeaturizer] = None, priority: int = DEFAULT_POLICY_PRIORITY, should_finetune: bool = False, **kwargs: Any, ) -> None: self.__featurizer = self._create_featurizer(featurizer) self.priority = priority self.finetune_mode = should_finetune @property
Apache License 2.0
qiboteam/qibo
src/qibo/hamiltonians.py
XXZ
python
def XXZ(nqubits, delta=0.5, dense=True): if dense: condition = lambda i, j: i in {j % nqubits, (j+1) % nqubits} hx = _build_spin_model(nqubits, matrices.X, condition) hy = _build_spin_model(nqubits, matrices.Y, condition) hz = _build_spin_model(nqubits, matrices.Z, condition) matrix = hx + hy + delta * hz return Hamiltonian(nqubits, matrix) hx = K.np.kron(matrices.X, matrices.X) hy = K.np.kron(matrices.Y, matrices.Y) hz = K.np.kron(matrices.Z, matrices.Z) matrix = hx + hy + delta * hz terms = [HamiltonianTerm(matrix, i, i + 1) for i in range(nqubits - 1)] terms.append(HamiltonianTerm(matrix, nqubits - 1, 0)) ham = SymbolicHamiltonian() ham.terms = terms return ham
Heisenberg XXZ model with periodic boundary conditions. .. math:: H = \\sum _{i=0}^N \\left ( X_iX_{i + 1} + Y_iY_{i + 1} + \\delta Z_iZ_{i + 1} \\right ). Args: nqubits (int): number of quantum bits. delta (float): coefficient for the Z component (default 0.5). dense (bool): If ``True`` it creates the Hamiltonian as a :class:`qibo.core.hamiltonians.Hamiltonian`, otherwise it creates a :class:`qibo.core.hamiltonians.SymbolicHamiltonian`. Example: .. testcode:: from qibo.hamiltonians import XXZ h = XXZ(3) # initialized XXZ model with 3 qubits
https://github.com/qiboteam/qibo/blob/d8bd2d3de0d8eb12a428a9125302e318480e982a/src/qibo/hamiltonians.py#L31-L66
from qibo import matrices, K from qibo.config import raise_error from qibo.core.hamiltonians import Hamiltonian, SymbolicHamiltonian, TrotterHamiltonian from qibo.core.terms import HamiltonianTerm def multikron(matrix_list): h = 1 for m in matrix_list: h = K.np.kron(h, m) return h def _build_spin_model(nqubits, matrix, condition): h = sum(multikron( (matrix if condition(i, j) else matrices.I for j in range(nqubits))) for i in range(nqubits)) return h
Apache License 2.0
red-hat-storage/ocs-ci
ocs_ci/ocs/node.py
add_new_node_and_label_it
python
def add_new_node_and_label_it(machineset_name, num_nodes=1, mark_for_ocs_label=True): initial_nodes = get_worker_nodes() log.info(f"Current available worker nodes are {initial_nodes}") machineset_replica_count = machine.get_replica_count(machineset_name) log.info(f"{machineset_name} has replica count: {machineset_replica_count}") log.info(f"Increasing the replica count by {num_nodes}") machine.add_node(machineset_name, count=machineset_replica_count + num_nodes) log.info( f"{machineset_name} now has replica " f"count: {machineset_replica_count + num_nodes}" ) log.info("Waiting for the new node to be in ready state") machine.wait_for_new_node_to_be_ready(machineset_name) nodes_after_new_spun_node = get_worker_nodes() new_spun_nodes = list(set(nodes_after_new_spun_node) - set(initial_nodes)) log.info(f"New spun nodes: {new_spun_nodes}") if mark_for_ocs_label: node_obj = ocp.OCP(kind="node") for new_spun_node in new_spun_nodes: if is_node_labeled(new_spun_node): logging.info( f"node {new_spun_node} is already labeled with the OCS storage label" ) else: node_obj.add_label( resource_name=new_spun_node, label=constants.OPERATOR_NODE_LABEL ) logging.info( f"Successfully labeled {new_spun_node} with OCS storage label" ) return new_spun_nodes
Add a new node for ipi and label it Args: machineset_name (str): Name of the machine set num_nodes (int): number of nodes to add mark_for_ocs_label (bool): True if label the new node eg: add_new_node_and_label_it("new-tdesala-zlqzn-worker-us-east-2a") Returns: list: new spun node names
https://github.com/red-hat-storage/ocs-ci/blob/81bc3dd3c2bccbf875ffa8fa5fa2eb0ac9d52b7e/ocs_ci/ocs/node.py#L315-L370
import copy import logging import re import time from prettytable import PrettyTable from collections import defaultdict from operator import itemgetter from subprocess import TimeoutExpired from semantic_version import Version from ocs_ci.ocs.machine import get_machine_objs from ocs_ci.framework import config from ocs_ci.ocs.exceptions import ( TimeoutExpiredError, NotAllNodesCreated, CommandFailed, ResourceNotFoundError, ) from ocs_ci.ocs.ocp import OCP from ocs_ci.ocs.resources.ocs import OCS from ocs_ci.ocs import constants, exceptions, ocp, defaults from ocs_ci.utility.utils import TimeoutSampler, convert_device_size from ocs_ci.ocs import machine from ocs_ci.ocs.resources import pod from ocs_ci.utility.utils import set_selinux_permissions from ocs_ci.ocs.resources.pv import ( get_pv_objs_in_sc, verify_new_pvs_available_in_sc, delete_released_pvs_in_sc, get_pv_size, get_node_pv_objs, ) log = logging.getLogger(__name__) def get_node_objs(node_names=None): nodes_obj = OCP(kind="node") node_dicts = nodes_obj.get()["items"] if not node_names: nodes = [OCS(**node_obj) for node_obj in node_dicts] else: nodes = [ OCS(**node_obj) for node_obj in node_dicts if (node_obj.get("metadata").get("name") in node_names) ] assert nodes, "Failed to get the nodes OCS objects" return nodes def get_nodes(node_type=constants.WORKER_MACHINE, num_of_nodes=None): if ( config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM and node_type == constants.WORKER_MACHINE ): typed_nodes = [ node for node in get_node_objs() if node_type in node.ocp.get_resource(resource_name=node.name, column="ROLES") and constants.INFRA_MACHINE not in node.ocp.get_resource(resource_name=node.name, column="ROLES") ] else: typed_nodes = [ node for node in get_node_objs() if node_type in node.ocp.get_resource(resource_name=node.name, column="ROLES") ] if num_of_nodes: typed_nodes = typed_nodes[:num_of_nodes] return typed_nodes def get_all_nodes(): ocp_node_obj = ocp.OCP(kind=constants.NODE) node_items = ocp_node_obj.get().get("items") return [node["metadata"]["name"] for node in node_items] def wait_for_nodes_status(node_names=None, status=constants.NODE_READY, timeout=180): try: if not node_names: for sample in TimeoutSampler(60, 3, get_node_objs): if sample: node_names = [node.name for node in sample] break nodes_not_in_state = copy.deepcopy(node_names) log.info(f"Waiting for nodes {node_names} to reach status {status}") for sample in TimeoutSampler(timeout, 3, get_node_objs, nodes_not_in_state): for node in sample: if node.ocp.get_resource_status(node.name) == status: log.info(f"Node {node.name} reached status {status}") nodes_not_in_state.remove(node.name) if not nodes_not_in_state: break log.info(f"The following nodes reached status {status}: {node_names}") except TimeoutExpiredError: log.error( f"The following nodes haven't reached status {status}: " f"{nodes_not_in_state}" ) raise exceptions.ResourceWrongStatusException( node_names, [n.describe() for n in get_node_objs(node_names)] ) def unschedule_nodes(node_names): ocp = OCP(kind="node") node_names_str = " ".join(node_names) log.info(f"Unscheduling nodes {node_names_str}") ocp.exec_oc_cmd(f"adm cordon {node_names_str}") wait_for_nodes_status(node_names, status=constants.NODE_READY_SCHEDULING_DISABLED) def schedule_nodes(node_names): ocp = OCP(kind="node") node_names_str = " ".join(node_names) ocp.exec_oc_cmd(f"adm uncordon {node_names_str}") log.info(f"Scheduling nodes {node_names_str}") wait_for_nodes_status(node_names) def drain_nodes(node_names): ocp = OCP(kind="node") node_names_str = " ".join(node_names) log.info(f"Draining nodes {node_names_str}") try: ocp.exec_oc_cmd( f"adm drain {node_names_str} --force=true --ignore-daemonsets " f"--delete-local-data", timeout=1800, ) except TimeoutExpired: ct_pod = pod.get_ceph_tools_pod() ceph_status = ct_pod.exec_cmd_on_pod("ceph status", out_yaml_format=False) log.error(f"Drain command failed to complete. Ceph status: {ceph_status}") raise def get_typed_worker_nodes(os_id="rhcos"): worker_nodes = get_nodes(node_type="worker") return [ node for node in worker_nodes if node.get().get("metadata").get("labels").get("node.openshift.io/os_id") == os_id ] def remove_nodes(nodes): ocp = OCP(kind="node") node_names = [node.get().get("metadata").get("name") for node in nodes] node_names_str = " ".join(node_names) unschedule_nodes(node_names) drain_nodes(node_names) log.info(f"Deleting nodes {node_names_str}") ocp.exec_oc_cmd(f"delete nodes {node_names_str}") def get_node_ips(node_type="worker"): ocp = OCP(kind=constants.NODE) if node_type == "worker": nodes = ocp.get(selector=constants.WORKER_LABEL).get("items") if node_type == "master:": nodes = ocp.get(selector=constants.MASTER_LABEL).get("items") if config.ENV_DATA["platform"].lower() == constants.AWS_PLATFORM: raise NotImplementedError elif config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM: return [ each["address"] for node in nodes for each in node["status"]["addresses"] if each["type"] == "ExternalIP" ] else: raise NotImplementedError def get_node_ip_addresses(ipkind): ocp = OCP(kind=constants.NODE) masternodes = ocp.get(selector=constants.MASTER_LABEL).get("items") workernodes = ocp.get(selector=constants.WORKER_LABEL).get("items") nodes = masternodes + workernodes return { node["metadata"]["name"]: each["address"] for node in nodes for each in node["status"]["addresses"] if each["type"] == ipkind }
MIT License
zwicker-group/py-pde
pde/trackers/trackers.py
DataTracker.handle
python
def handle(self, field: FieldBase, t: float) -> None: self.times.append(t) if self._num_args == 1: self.data.append(self._callback(field)) else: self.data.append(self._callback(field, t))
handle data supplied to this tracker Args: field (:class:`~pde.fields.FieldBase`): The current state of the simulation t (float): The associated time
https://github.com/zwicker-group/py-pde/blob/0549f7c74a52705e1d29e62d27b5578251c2054c/pde/trackers/trackers.py#L557-L570
import inspect import os.path import sys import time from datetime import timedelta from pathlib import Path from typing import List from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Optional, Union import numpy as np from ..fields import FieldCollection from ..fields.base import FieldBase from ..tools.docstrings import fill_in_docstring from ..tools.misc import module_available from ..tools.output import get_progress_bar_class from ..tools.parse_duration import parse_duration from ..tools.typing import Real from .base import FinishedSimulation, InfoDict, TrackerBase from .intervals import IntervalData, RealtimeIntervals if TYPE_CHECKING: import pandas from ..visualization.movies import Movie class CallbackTracker(TrackerBase): @fill_in_docstring def __init__(self, func: Callable, interval: IntervalData = 1): super().__init__(interval=interval) self._callback = func self._num_args = len(inspect.signature(func).parameters) if not 0 < self._num_args < 3: raise ValueError( "`func` must be a function accepting one or two arguments, not " f"{self._num_args}" ) def handle(self, field: FieldBase, t: float) -> None: if self._num_args == 1: self._callback(field) else: self._callback(field, t) class ProgressTracker(TrackerBase): name = "progress" @fill_in_docstring def __init__( self, interval: IntervalData = None, ndigits: int = 5, leave: bool = True ): if interval is None: interval = RealtimeIntervals(duration=1) super().__init__(interval=interval) self.ndigits = ndigits self.leave = leave def initialize(self, field: FieldBase, info: InfoDict = None) -> float: result = super().initialize(field, info) controller_info = {} if info is None else info.get("controller", {}) pb_cls = get_progress_bar_class() self.progress_bar = pb_cls( total=controller_info.get("t_end"), initial=controller_info.get("t_start", 0), leave=self.leave, ) self.progress_bar.set_description("Initializing") return result def handle(self, field: FieldBase, t: float) -> None: if self.progress_bar.total: t_new = min(t, self.progress_bar.total) else: t_new = t self.progress_bar.n = round(t_new, self.ndigits) self.progress_bar.set_description("") def finalize(self, info: InfoDict = None) -> None: super().finalize(info) self.progress_bar.set_description("") controller_info = {} if info is None else info.get("controller", {}) t_final = controller_info.get("t_final", -np.inf) t_end = controller_info.get("t_end", -np.inf) if t_final >= t_end and self.progress_bar.total: self.progress_bar.n = self.progress_bar.total self.progress_bar.refresh() if ( controller_info.get("successful", False) and self.leave and hasattr(self.progress_bar, "sp") ): try: self.progress_bar.sp(bar_style="success") except TypeError: self.progress_bar.close() else: self.progress_bar.disable = True else: self.progress_bar.close() def __del__(self): if hasattr(self, "progress_bar") and not self.progress_bar.disable: self.progress_bar.close() class PrintTracker(TrackerBase): name = "print" @fill_in_docstring def __init__(self, interval: IntervalData = 1, stream: IO[str] = sys.stdout): super().__init__(interval=interval) self.stream = stream def handle(self, field: FieldBase, t: float) -> None: data = f"c={field.data.mean():.3g}±{field.data.std():.3g}" self.stream.write(f"t={t:g}, {data}\n") self.stream.flush() class PlotTracker(TrackerBase): @fill_in_docstring def __init__( self, interval: IntervalData = 1, *, title: Union[str, Callable] = "Time: {time:g}", output_file: Optional[str] = None, movie: Union[str, Path, "Movie"] = None, show: bool = None, max_fps: float = np.inf, plot_args: Dict[str, Any] = None, ): from ..visualization.movies import Movie super().__init__(interval=interval) self.title = title self.output_file = output_file self.max_fps = max_fps self.plot_args = {} if plot_args is None else plot_args.copy() self.plot_args["action"] = "create" if movie is None: self.movie: Optional[Movie] = None self._save_movie = False elif isinstance(movie, Movie): self.movie = movie self._save_movie = False elif isinstance(movie, (str, Path)): self.movie = Movie(filename=str(movie)) self._save_movie = True else: raise TypeError(f"Unknown type of `movie`: {movie.__class__.__name__}") self._write_images = self._save_movie or self.output_file if show is None: self.show = not self._write_images else: self.show = show def initialize(self, state: FieldBase, info: InfoDict = None) -> float: from ..tools.plotting import get_plotting_context self._context = get_plotting_context(title="Initializing...", show=self.show) with self._context: self._plot_reference = state.plot(**self.plot_args) if self._context.supports_update: if hasattr(state.plot, "update_method"): if state.plot.update_method is None: if state.plot.mpl_class == "axes": self._update_method = "update_ax" elif state.plot.mpl_class == "figure": self._update_method = "update_fig" else: mpl_class = state.plot.mpl_class raise RuntimeError( f"Unknown mpl_class on plot method: {mpl_class}" ) else: self._update_method = "update_data" else: raise RuntimeError( "PlotTracker does not work since the state of type " f"{state.__class__.__name__} does not use the plot protocol of " "`pde.tools.plotting`." ) else: self._update_method = "replot" self._logger.info(f'Update method: "{self._update_method}"') self._last_update = time.monotonic() return super().initialize(state, info=info) def handle(self, state: FieldBase, t: float) -> None: if not self._write_images: time_passed = time.monotonic() - self._last_update if time_passed < 1 / self.max_fps: return if callable(self.title): self._context.title = str(self.title(state, t)) else: self._context.title = self.title.format(time=t) with self._context: if self._update_method == "update_data": update_func = getattr(state, state.plot.update_method) update_func(self._plot_reference) elif self._update_method == "update_fig": fig = self._context.fig fig.clf() state.plot(fig=fig, **self.plot_args) elif self._update_method == "update_ax": fig = self._context.fig fig.clf() ax = fig.add_subplot(1, 1, 1) state.plot(ax=ax, **self.plot_args) elif self._update_method == "replot": state.plot(**self.plot_args) else: raise RuntimeError(f"Unknown update method `{self._update_method}`") if self.output_file and self._context.fig is not None: self._context.fig.savefig(self.output_file) if self.movie: self.movie.add_figure(self._context.fig) self._last_update = time.monotonic() def finalize(self, info: InfoDict = None) -> None: super().finalize(info) if self._save_movie: self.movie.save() self.movie._end() if not self.show: self._context.close() class LivePlotTracker(PlotTracker): name = "plot" @fill_in_docstring def __init__( self, interval: IntervalData = "0:03", *, show: bool = True, max_fps: float = 2, **kwargs, ): super().__init__(interval=interval, show=show, max_fps=max_fps, **kwargs) class DataTracker(CallbackTracker): @fill_in_docstring def __init__( self, func: Callable, interval: IntervalData = 1, filename: str = None ): super().__init__(func=func, interval=interval) self.filename = filename self.times: List[float] = [] self.data: List[Any] = []
MIT License
myriadrf/pylms7002soapy
pyLMS7002Soapy/LMS7002_CDS.py
LMS7002_CDS.CDS_MCLK1
python
def CDS_MCLK1(self): return self._readReg('CFG0', 'CDS_MCLK1<1:0>')
Get the value of CDS_MCLK1<1:0>
https://github.com/myriadrf/pylms7002soapy/blob/4f828eb9282c302dc6b187d91df5e77c8a6f2d61/pyLMS7002Soapy/LMS7002_CDS.py#L44-L48
from pyLMS7002Soapy.LMS7002_base import LMS7002_base class LMS7002_CDS(LMS7002_base): __slots__ = [] def __init__(self, chip): self.chip = chip self.channel = None self.prefix = "CDS_" @property def CDS_MCLK2(self): return self._readReg('CFG0', 'CDS_MCLK2<1:0>') @CDS_MCLK2.setter def CDS_MCLK2(self, value): if not (0 <= value <= 3): raise ValueError("Value must be [0..3]") self._writeReg('CFG0', 'CDS_MCLK2<1:0>', value) @property
Apache License 2.0
amccaugh/phidl
phidl/device_layout.py
_GeometryHelper.y
python
def y(self, destination): destination = (self.center[0], destination) self.move(destination = destination, origin = self.center, axis = 'y')
Sets the y-coordinate of the center of the bounding box. Parameters ---------- destination : int or float y-coordinate of the bbox center.
https://github.com/amccaugh/phidl/blob/eae1cea6de172db72207e926c8a6500bc9dfcddf/phidl/device_layout.py#L519-L528
from __future__ import division from __future__ import print_function from __future__ import absolute_import import gdspy from copy import deepcopy as _deepcopy import numpy as np from numpy import sqrt, mod, pi, sin, cos from numpy.linalg import norm import warnings import hashlib from phidl.constants import _CSS3_NAMES_TO_HEX import gdspy.library gdspy.library.use_current_library = False __version__ = '1.6.0' def _rotate_points(points, angle = 45, center = (0, 0)): if angle == 0: return points angle = angle * pi/180 ca = cos(angle) sa = sin(angle) sa = np.array((-sa, sa)) c0 = np.array(center) if np.asarray(points).ndim == 2: return (points - c0)*ca + (points - c0)[:,::-1]*sa + c0 if np.asarray(points).ndim == 1: return (points - c0)*ca + (points - c0)[::-1]*sa + c0 def _reflect_points(points, p1 = (0, 0), p2 = (1, 0)): points = np.array(points); p1 = np.array(p1); p2 = np.array(p2) if np.asarray(points).ndim == 1: return 2*(p1 + (p2-p1)*np.dot((p2-p1),(points-p1))/norm(p2-p1)**2) - points if np.asarray(points).ndim == 2: return np.array([2*(p1 + (p2-p1)*np.dot((p2-p1),(p-p1))/norm(p2-p1)**2) - p for p in points]) def _is_iterable(items): return isinstance(items, (list, tuple, set, np.ndarray)) def _parse_coordinate(c): if isinstance(c, Port): return c.midpoint elif np.array(c).size == 2: return c else: raise ValueError('[PHIDL] Could not parse coordinate, input should be array-like (e.g. [1.5,2.3] or a Port') def _parse_move(origin, destination, axis): if destination is None: destination = origin origin = [0,0] d = _parse_coordinate(destination) o = _parse_coordinate(origin) if axis == 'x': d = (d[0], o[1]) if axis == 'y': d = (o[0], d[1]) dx,dy = np.array(d) - o return dx,dy def _distribute(elements, direction = 'x', spacing = 100, separation = True, edge = None): if len(elements) == 0: return elements if direction not in ({'x','y'}): raise ValueError("[PHIDL] distribute(): 'direction' argument must be either 'x' or'y'") if (direction == 'x') and (edge not in ({'x', 'xmin', 'xmax'})) and (separation == False): raise ValueError("[PHIDL] distribute(): When `separation` == False and direction == 'x'," + " the `edge` argument must be one of {'x', 'xmin', 'xmax'}") if (direction == 'y') and (edge not in ({'y', 'ymin', 'ymax'})) and (separation == False): raise ValueError("[PHIDL] distribute(): When `separation` == False and direction == 'y'," + " the `edge` argument must be one of {'y', 'ymin', 'ymax'}") if (direction == 'y'): sizes = [e.ysize for e in elements] if (direction == 'x'): sizes = [e.xsize for e in elements] spacing = np.array([spacing]*len(elements)) if separation == True: if direction == 'x': edge = 'xmin' if direction == 'y': edge = 'ymin' else: sizes = np.zeros(len(spacing)) start = elements[0].__getattribute__(edge) positions = np.cumsum(np.concatenate(([start], (spacing + sizes)))) for n, e in enumerate(elements): e.__setattr__(edge, positions[n]) return elements def _align(elements, alignment = 'ymax'): if len(elements) == 0: return elements if alignment not in (['x','y','xmin', 'xmax', 'ymin','ymax']): raise ValueError("[PHIDL] 'alignment' argument must be one of 'x','y','xmin', 'xmax', 'ymin','ymax'") value = Group(elements).__getattribute__(alignment) for e in elements: e.__setattr__(alignment, value) return elements def _line_distances(points, start, end): if np.all(start == end): return np.linalg.norm(points - start, axis=1) vec = end - start cross = np.cross(vec, start - points) return np.divide(abs(cross), np.linalg.norm(vec)) def _simplify(points, tolerance=0): M = np.asarray(points) start, end = M[0], M[-1] dists = _line_distances(M, start, end) index = np.argmax(dists) dmax = dists[index] if dmax > tolerance: result1 = _simplify(M[:index + 1], tolerance) result2 = _simplify(M[index:], tolerance) result = np.vstack((result1[:-1], result2)) else: result = np.array([start, end]) return result def reset(): Layer.layer_dict = {} Device._next_uid = 0 class LayerSet(object): def __init__(self): self._layers = {} def add_layer(self, name = 'unnamed', gds_layer = 0, gds_datatype = 0, description = None, color = None, inverted = False, alpha = 0.6, dither = None): new_layer = Layer(gds_layer = gds_layer, gds_datatype = gds_datatype, name = name, description = description, inverted = inverted, color = color, alpha = alpha, dither = dither) if name in self._layers: raise ValueError('[PHIDL] LayerSet: Tried to add layer named ' '"%s"' % (name) + ', but a layer with that ' 'name already exists in this LayerSet') else: self._layers[name] = new_layer def __getitem__(self, val): try: return self._layers[val] except: raise ValueError('[PHIDL] LayerSet: Tried to access layer ' 'named "%s"' % (val) + ' which does not exist') def __repr__(self): return ('LayerSet (%s layers total)' % (len(self._layers))) class Layer(object): layer_dict = {} def __init__(self, gds_layer = 0, gds_datatype = 0, name = 'unnamed', description = None, inverted = False, color = None, alpha = 0.6, dither = None): if isinstance(gds_layer, Layer): l = gds_layer gds_datatype = l.gds_datatype name = l.name description = l.description alpha = l.alpha dither = l.dither inverted = l.inverted gds_layer = l.gds_layer self.gds_layer = gds_layer self.gds_datatype = gds_datatype self.name = name self.description = description self.inverted = inverted self.alpha = alpha self.dither = dither try: if color is None: self.color = None elif np.size(color) == 3: color = np.array(color) if np.any(color > 1) or np.any(color < 0): raise ValueError color = np.array(np.round(color*255), dtype = int) self.color = "#{:02x}{:02x}{:02x}".format(*color) elif color[0] == '#': if len(color) != 7: raise ValueError int(color[1:],16) self.color = color else: self.color = _CSS3_NAMES_TO_HEX[color.lower()] except: raise ValueError("[PHIDL] Layer() color must be specified as a " + "0-1 RGB triplet, (e.g. [0.5, 0.1, 0.9]), an HTML hex color string " + "(e.g. '#a31df4'), or a CSS3 color name (e.g. 'gold' or " + "see http://www.w3schools.com/colors/colors_names.asp )") Layer.layer_dict[(gds_layer, gds_datatype)] = self def __repr__(self): return ('Layer (name %s, GDS layer %s, GDS datatype %s, description %s, color %s)' % (self.name, self.gds_layer, self.gds_datatype, self.description, self.color)) def _parse_layer(layer): if isinstance(layer, Layer): gds_layer, gds_datatype = layer.gds_layer, layer.gds_datatype elif np.shape(layer) == (2,): gds_layer, gds_datatype = layer[0], layer[1] elif np.shape(layer) == (1,): gds_layer, gds_datatype = layer[0], 0 elif layer is None: gds_layer, gds_datatype = 0, 0 elif isinstance(layer, (int, float)): gds_layer, gds_datatype = layer, 0 else: raise ValueError("""[PHIDL] _parse_layer() was passed something that could not be interpreted as a layer: layer = %s""" % layer) return (gds_layer, gds_datatype) class _GeometryHelper(object): @property def center(self): return np.sum(self.bbox,0)/2 @center.setter def center(self, destination): self.move(destination = destination, origin = self.center) @property def x(self): return np.sum(self.bbox,0)[0]/2 @x.setter def x(self, destination): destination = (destination, self.center[1]) self.move(destination = destination, origin = self.center, axis = 'x') @property def y(self): return np.sum(self.bbox,0)[1]/2 @y.setter
MIT License
maebert/shellcraft
src/shellcraft/grammar.py
Grammar.parse_grammar
python
def parse_grammar(self, grammar): weight_re = r"%((?:[\d]*\.)?[\d]+)" result = defaultdict(list) for line in grammar.splitlines(): if "->" in line: symbol, extension = line.split("->") for extension in extension.split("|"): weight = re.search(weight_re, extension) if weight: extension = re.sub(weight_re, "", extension) weight = float(weight.group(1)) else: weight = 1.0 result[symbol.strip()].append((extension.strip(), weight)) for symbol, extensions in result.items(): total_weight = sum(ext[1] for ext in extensions) result[symbol] = [(ext[0], ext[1] / total_weight) for ext in extensions] return dict(result)
Return a dictionary mapping symbols to extensions. Example: >>> grammar = ''' @s -> @n @v @s -> @n @v @n @n -> dog | cat @v -> chases %3 | eats %2.0''' >>> parse_grammar(grammar) { "@s": [ [ "@n @v", 0.3 ], [ "@n @v @n", 0.7 ] ], "@v": [ [ "chases", 0.75 ], [ "eats", 0.25 ] ], "@n": [ [ "dog", 0.5 ], [ "cat", 0.5 ] ] } Args: grammar: str Returns: dict
https://github.com/maebert/shellcraft/blob/bab16de366073d18327f6bf92294f42c5728cd25/src/shellcraft/grammar.py#L62-L112
from collections import defaultdict import random import re import sys import os VERBS = {"research": "researching", "mine": "mining", "craft": "crafting"} class MaximumDepthExceeded(Exception): pass class SymbolNotFound(Exception): pass class Grammar(object): grammars = {} def __init__(self, grammar_string): self.grammar = self.parse_grammar(grammar_string) @classmethod def load(cls, grammar_file): with open( os.path.join( os.path.dirname(os.path.abspath(__file__)), "data", grammar_file + ".grammar", ) ) as f: cls.grammars[grammar_file] = cls(f.read()) return cls.grammars[grammar_file] def weighted_choice(self, options, weights): target = random.random() * sum(weights) acc = 0 for idx, weight in enumerate(weights): acc += weight if acc > target: return options[idx]
MIT License
hpd/opensphericalcamera
python/osc/bubl.py
Bublcam.stream
python
def stream(self): acquired = False url = self._request("commands/execute") body = json.dumps({"name": "camera._bublStream", "parameters": { "sessionId": self.sid }}) try: response = requests.post(url, data=body, stream=True) except Exception, e: self._httpError(e) return acquired if response.status_code == 200: response = req.json() bublStreamPort = response['_bublStreamPort'] bublStreamEndPoint = response['_bublStreamEndPoint'] else: bublStreamPort = None bublStreamEndPoint = None self._oscError(response) return (bublStreamPort, bublStreamEndPoint)
_bublStream Return the port and end point to use for rtsp video streaming Reference: https://github.com/BublTechnology/osc-client/blob/master/lib/BublOscClient.js#L59
https://github.com/hpd/opensphericalcamera/blob/2456f6003777fe4403b41d39db1bb4ab7f3deffb/python/osc/bubl.py#L255-L287
import json import requests import timeit import osc __author__ = 'Haarm-Pieter Duiker' __copyright__ = 'Copyright (C) 2016 - Duiker Research Corp' __license__ = '' __maintainer__ = 'Haarm-Pieter Duiker' __email__ = 'support@duikerresearch.org' __status__ = 'Production' __major_version__ = '1' __minor_version__ = '0' __change_version__ = '0' __version__ = '.'.join((__major_version__, __minor_version__, __change_version__)) __all__ = ['Bublcam'] class Bublcam(osc.OpenSphericalCamera): def __init__(self, ip_base="192.168.0.100", httpPort=80): osc.OpenSphericalCamera.__init__(self, ip_base, httpPort) def updateFirmware(self, firmwareFilename): url = self._request("_bublUpdate") with open(firmwareFilename, 'rb') as handle: body = handle.read() try: req = requests.post(url, data=body, headers={'Content-Type': 'application/octet-stream'}) except Exception, e: self._httpError(e) return None if req.status_code == 200: response = req.json() else: self._oscError(req) response = None return response def bublGetImage(self, fileUri): acquired = False if fileUri: url = self._request("_bublGetImage/%s" % fileUri) fileName = fileUri.split("/")[1] try: response = requests.get(url, stream=True) except Exception, e: self._httpError(e) return acquired if response.status_code == 200: with open(fileName, 'wb') as handle: for block in response.iter_content(1024): handle.write(block) acquired = True else: self._oscError(req) return acquired def stop(self, commandId): url = self._request("commands/_bublStop") body = json.dumps({ "id": commandId }) try: req = requests.post(url, data=body) except Exception, e: self._httpError(e) return None if req.status_code == 200: response = req.json() else: self._oscError(req) response = None return response def poll(self, commandId, fingerprint, waitTimeout): url = self._request("commands/_bublPoll") body = json.dumps({ "id": commandId, "fingerprint" : fingerprint, "waitTimeout" : waitTimeout }) try: req = requests.post(url, data=body) except Exception, e: self._httpError(e) return None if req.status_code == 200: response = req.json() else: self._oscError(req) response = None return response def captureVideo(self): url = self._request("commands/execute") body = json.dumps({"name": "camera._bublCaptureVideo", "parameters": { "sessionId": self.sid } }) try: req = requests.post(url, data=body) except Exception, e: self._httpError(e) return None if req.status_code == 200: response = req.json() else: self._oscError(req) response = None return response def shutdown(self, shutdownDelay): url = self._request("commands/execute") body = json.dumps({"name": "camera._bublShutdown", "parameters": { "sessionId": self.sid, "shutdownDelay" : shutdownDelay } }) try: req = requests.post(url, data=body) except Exception, e: self._httpError(e) return None if req.status_code == 200: response = req.json() else: self._oscError(req) response = None return response
MIT License
lukaabra/subcrawl
subtitles.py
SubtitleDownloader.download_from_opensubtitles
python
def download_from_opensubtitles(self): with ServerProxy("https://api.opensubtitles.org/xml-rpc") as proxy: self.opensubs_token = self.log_in_opensubtitles(proxy) if self.opensubs_token != "error": self.prompt_label.setText("Connected to OpenSubtitles database") for payload_for_sub_search in (self._create_payload_for_subtitle_searching(entry) for entry in self.interactor.retrieve("selected_movies")): self._perform_query_and_store(payload_for_sub_search, proxy) self.interactor.commit_and_renew_cursor() self._perform_file_download(proxy) self.interactor.clear_db("search_subs") self.interactor.clear_db("download_subs") self.prompt_label.setText("Finishing up ...") proxy.LogOut(self.opensubs_token) self.prompt_label.setText("Download finished! Downloaded {} files".format(self.downloaded_files)) self.downloaded_files = 0
Logs the user into the OpenSubtitles API. If the log in is successful then payloads are created for querying the OpenSubtitles database. The query result is passed to the download function. Meanwhile the Prompt Label in the GUI is updated with information. After all the downloading and querying is finished, the user is logged out.
https://github.com/lukaabra/subcrawl/blob/85da48f513dceed2ed9f9c60f09914996357e7f9/subtitles.py#L224-L253
import json import gzip import os import base64 from socket import gaierror from http.client import ResponseNotReady from xmlrpc.client import ServerProxy, ProtocolError, Fault, expat class SubtitlePreference(object): def __init__(self): self.language_name = "Albanian" self.language_iso2 = "sq" self.language_iso3 = "alb" self.sub_source_preference = ("OpenSubtitles", "SubDB") def add_language(self, language_preference: str): with open("resources/iso 639 2.json", "r") as languages_file: languages_json = json.load(languages_file) for language in languages_json: if language_preference == language["English_Name"]: self.language_name = language["English_Name"] self.language_iso2 = language["Alpha2_Code"] self.language_iso3 = language["Alpha3b_Code"] def change_sub_source(self, sub_source_list: list): self.sub_source_preference = tuple(sub_source_list) def __str__(self): return "Subtitle language preference:\t{0.language_name} - {0.language_iso2} - {0.language_iso3}\n" "Subtitle sources preference: {0.sub_source_preference}\n".format(self) class SubtitleDownloader(object): def __init__(self, subtitle_preference: SubtitlePreference, prompt_label, progress_bar, interactor): self.preference = subtitle_preference self.prompt_label = prompt_label self.progress_bar = progress_bar self.interactor = interactor self.downloaded_files = 0 self.opensubs_token = None self.sub_file_extensions = (".RAR", ".ZIP", ".SRT") def _create_payload_for_subtitle_searching(self, entry: tuple) -> dict: try: entry_id = entry[0] entry_title = entry[4] movie_directory, _ = os.path.split(entry[2]) except KeyError: payload_for_sub_search = dict() else: payload_for_sub_search = {"imdbid": entry_id, "query": entry_title, "sublanguageid": self.preference.language_iso3, "movie directory": movie_directory} return payload_for_sub_search def _perform_query_and_store(self, payload_for_sub_search: dict, proxy: ServerProxy): try: query_result = proxy.SearchSubtitles(self.opensubs_token, [payload_for_sub_search], {"limit": 10}) except Fault: self.prompt_label.setText("A fault has occurred") except ProtocolError: self.prompt_label.setText("A ProtocolError has occurred.") else: if query_result["status"] == "200 OK": if query_result["data"]: payload_for_download = self._create_download_data(query_result["data"], payload_for_sub_search) self.interactor.add_subtitle_search_data_to_db(payload_for_download) else: self.prompt_label.setText("There is no subtitles in this language for {}". format(payload_for_sub_search["query"])) else: self.prompt_label.setText("Wrong status code: {}".format(query_result["status"])) def _create_download_data(self, query_results: dict, payload_for_sub_search: dict): for result in query_results: subtitle_name, download_link, sub_id = result["SubFileName"], result["SubDownloadLink"], result["IDSubtitleFile"] movie_id = payload_for_sub_search["imdbid"] movie_directory = payload_for_sub_search["movie directory"] if subtitle_name.upper().endswith(self.sub_file_extensions): payload_for_download = {"imdbid": movie_id, "file name": subtitle_name, "IDSubtitleFile": sub_id, "movie directory": movie_directory} return payload_for_download def _perform_file_download(self, proxy): subtitle_ids = [sub_id for sub_id, _, __, ___ in self.interactor.retrieve("search_subs")] while len(subtitle_ids) >= 19: self._download_file(proxy, subtitle_ids[:19]) subtitle_ids = subtitle_ids[19:] print(len(subtitle_ids)) if subtitle_ids: self._download_file(proxy, subtitle_ids) def _download_file(self, proxy, subtitle_ids): download_data = dict() try: download_data = proxy.DownloadSubtitles(self.opensubs_token, subtitle_ids) except ProtocolError as e: download_data["status"] = e self.prompt_label.setText("There has been a ProtocolError during downloading") except ResponseNotReady as e: download_data["status"] = e self.prompt_label.setText("There has been a ResponseNotReady Error during downloading") if download_data["status"] == "200 OK": self._store_byte_data_to_db(download_data) self._get_stored_byte_data() else: self.prompt_label.setText("There was an error while trying to download your file: {}" .format(download_data["status"])) def _store_byte_data_to_db(self, download_data): for individual_download_dict in download_data["data"]: self.interactor.add_subtitle_download_data_to_db(tuple(individual_download_dict.values())) self.interactor.commit_and_renew_cursor() def _get_stored_byte_data(self): for sub_id, byte_data in self.interactor.retrieve("download_subs"): search_condition = ("subs_id", sub_id) for _, __, sub_name, movie_directory in self.interactor.retrieve("search_subs", search_condition): subtitle_path = movie_directory + "\\" + sub_name + ".gzip" self._write_file(byte_data, subtitle_path) break def _write_file(self, byte_data: str, subtitle_path: str): with open(subtitle_path, "wb") as subtitle_file: subtitle_file.write(base64.decodebytes(byte_data.encode())) with gzip.open(subtitle_path, 'rb') as gzip_file: content = gzip_file.read() with open(subtitle_path[:-4], 'wb') as srt_file: srt_file.write(content) self.downloaded_files += 1 os.remove(subtitle_path)
MIT License
sibirrer/lenstronomy
lenstronomy/Util/util.py
hyper2F2_array
python
def hyper2F2_array(a, b, c, d, x): if isinstance(x, int) or isinstance(x, float): out = mpmath.hyp2f2(a, b, c, d, x) else: n = len(x) out = np.zeros(n) for i in range(n): out[i] = mpmath.hyp2f2(a, b, c, d, x[i]) return out
:param a: :param b: :param c: :param d: :param x: :return:
https://github.com/sibirrer/lenstronomy/blob/e6d0e179a98ecb0c4db25cdf7cfb73e83c6aeded/lenstronomy/Util/util.py#L559-L576
__author__ = 'Simon Birrer' import numpy as np import mpmath import itertools from lenstronomy.Util.numba_util import jit from lenstronomy.Util.package_util import exporter export, __all__ = exporter() @export def merge_dicts(*dict_args): result = {} for dictionary in dict_args: result.update(dictionary) return result @export def approx_theta_E(ximg, yimg): dis = [] xinds, yinds = [0, 0, 0, 1, 1, 2], [1, 2, 3, 2, 3, 3] for (i, j) in zip(xinds, yinds): dx, dy = ximg[i] - ximg[j], yimg[i] - yimg[j] dr = (dx ** 2 + dy ** 2) ** 0.5 dis.append(dr) dis = np.array(dis) greatest = np.argmax(dis) dr_greatest = dis[greatest] dis[greatest] = 0 second_greatest = np.argmax(dis) dr_second = dis[second_greatest] return 0.5 * (dr_greatest * dr_second) ** 0.5 @export def sort_image_index(ximg, yimg, xref, yref): assert len(xref) == len(ximg) ximg, yimg = np.array(ximg), np.array(yimg) x_self = np.array(list(itertools.permutations(ximg))) y_self = np.array(list(itertools.permutations(yimg))) indexes = [0, 1, 2, 3] index_iterations = list(itertools.permutations(indexes)) delta_r = [] for i in range(0, int(len(x_self))): dr = 0 for j in range(0, int(len(x_self[0]))): dr += (x_self[i][j] - xref[j]) ** 2 + (y_self[i][j] - yref[j]) ** 2 delta_r.append(dr ** .5) min_indexes = np.array(index_iterations[np.argmin(delta_r)]) return min_indexes @export @jit() def rotate(xcoords, ycoords, angle): return xcoords * np.cos(angle) + ycoords * np.sin(angle), -xcoords * np.sin(angle) + ycoords * np.cos(angle) @export def map_coord2pix(ra, dec, x_0, y_0, M): x, y = M.dot(np.array([ra, dec])) return x + x_0, y + y_0 @export def array2image(array, nx=0, ny=0): if nx == 0 or ny == 0: n = int(np.sqrt(len(array))) if n ** 2 != len(array): raise ValueError("lenght of input array given as %s is not square of integer number!" % (len(array))) nx, ny = n, n image = array.reshape(int(nx), int(ny)) return image @export def image2array(image): nx, ny = image.shape imgh = np.reshape(image, nx * ny) return imgh @export def array2cube(array, n_1, n_23): n = int(np.sqrt(n_23)) if n ** 2 != n_23: raise ValueError("2nd and 3rd dims (%s) are not square of integer number!" % n_23) n_2, n_3 = n, n cube = array.reshape(n_1, n_2, n_3) return cube @export def cube2array(cube): n_1, n_2, n_3 = cube.shape array = cube.reshape(n_1 * n_2 * n_3) return array @export def make_grid(numPix, deltapix, subgrid_res=1, left_lower=False): if isinstance(numPix, (tuple, list, np.ndarray)): assert len(numPix) == 2 if any(x != round(x) for x in numPix): raise ValueError("numPix contains non-integers: %s" % numPix) numPix = np.asarray(numPix, dtype=np.int) else: if numPix != round(numPix): raise ValueError("Attempt to specify non-int numPix: %s" % numPix) numPix = np.array([numPix, numPix], dtype=np.int) numPix_eff = (numPix * subgrid_res).astype(np.int) deltapix_eff = deltapix / float(subgrid_res) x_grid = np.tile(np.arange(numPix_eff[0]), numPix_eff[1]) * deltapix_eff y_grid = np.repeat(np.arange(numPix_eff[1]), numPix_eff[0]) * deltapix_eff if left_lower is True: shift = -1. / 2 + 1. / (2 * subgrid_res) * np.array([1, 1]) else: shift = deltapix_eff * (numPix_eff - 1) / 2 return x_grid - shift[0], y_grid - shift[1] @export def make_grid_transformed(numPix, Mpix2Angle): x_grid, y_grid = make_grid(numPix, deltapix=1) ra_grid, dec_grid = map_coord2pix(x_grid, y_grid, 0, 0, Mpix2Angle) return ra_grid, dec_grid @export def make_grid_with_coordtransform(numPix, deltapix, subgrid_res=1, center_ra=0, center_dec=0, left_lower=False, inverse=True): numPix_eff = numPix * subgrid_res deltapix_eff = deltapix / float(subgrid_res) a = np.arange(numPix_eff) matrix = np.dstack(np.meshgrid(a, a)).reshape(-1, 2) if inverse is True: delta_x = -deltapix_eff else: delta_x = deltapix_eff if left_lower is True: ra_grid = matrix[:, 0] * delta_x dec_grid = matrix[:, 1] * deltapix_eff else: ra_grid = (matrix[:, 0] - (numPix_eff - 1) / 2.) * delta_x dec_grid = (matrix[:, 1] - (numPix_eff - 1) / 2.) * deltapix_eff shift = (subgrid_res - 1) / (2. * subgrid_res) * deltapix ra_grid -= shift + center_ra dec_grid -= shift + center_dec ra_at_xy_0 = ra_grid[0] dec_at_xy_0 = dec_grid[0] Mpix2coord = np.array([[delta_x, 0], [0, deltapix_eff]]) Mcoord2pix = np.linalg.inv(Mpix2coord) x_at_radec_0, y_at_radec_0 = map_coord2pix(-ra_at_xy_0, -dec_at_xy_0, x_0=0, y_0=0, M=Mcoord2pix) return ra_grid, dec_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix @export def grid_from_coordinate_transform(nx, ny, Mpix2coord, ra_at_xy_0, dec_at_xy_0): a = np.arange(nx) b = np.arange(ny) matrix = np.dstack(np.meshgrid(a, b)).reshape(-1, 2) x_grid = matrix[:, 0] y_grid = matrix[:, 1] ra_grid = x_grid * Mpix2coord[0, 0] + y_grid * Mpix2coord[0, 1] + ra_at_xy_0 dec_grid = x_grid * Mpix2coord[1, 0] + y_grid * Mpix2coord[1, 1] + dec_at_xy_0 return ra_grid, dec_grid @export def get_axes(x, y): n = int(np.sqrt(len(x))) if n ** 2 != len(x): raise ValueError("lenght of input array given as %s is not square of integer number!" % (len(x))) x_image = x.reshape(n, n) y_image = y.reshape(n, n) x_axes = x_image[0, :] y_axes = y_image[:, 0] return x_axes, y_axes @export def averaging(grid, numGrid, numPix): Nbig = numGrid Nsmall = numPix small = grid.reshape([int(Nsmall), int(Nbig / Nsmall), int(Nsmall), int(Nbig / Nsmall)]).mean(3).mean(1) return small @export def displaceAbs(x, y, sourcePos_x, sourcePos_y): x_mapped = x - sourcePos_x y_mapped = y - sourcePos_y absmapped = np.sqrt(x_mapped ** 2 + y_mapped ** 2) return absmapped @export def get_distance(x_mins, y_mins, x_true, y_true): if len(x_mins) != len(x_true): return 10 ** 10 dist = 0 x_true_list = np.array(x_true) y_true_list = np.array(y_true) for i in range(0, len(x_mins)): dist_list = (x_mins[i] - x_true_list) ** 2 + (y_mins[i] - y_true_list) ** 2 dist += min(dist_list) k = np.where(dist_list == min(dist_list)) if type(k) != int: k = k[0] x_true_list = np.delete(x_true_list, k) y_true_list = np.delete(y_true_list, k) return dist @export def compare_distance(x_mapped, y_mapped): X2 = 0 for i in range(0, len(x_mapped) - 1): for j in range(i + 1, len(x_mapped)): dx = x_mapped[i] - x_mapped[j] dy = y_mapped[i] - y_mapped[j] X2 += dx ** 2 + dy ** 2 return X2 @export def min_square_dist(x_1, y_1, x_2, y_2): dist = np.zeros_like(x_1) for i in range(len(x_1)): dist[i] = np.min((x_1[i] - x_2) ** 2 + (y_1[i] - y_2) ** 2) return dist @export def selectBest(array, criteria, numSelect, highest=True): n = len(array) m = len(criteria) if n != m: raise ValueError('Elements in array (%s) not equal to elements in criteria (%s)' % (n, m)) if n < numSelect: return array array_sorted = array[criteria.argsort()] if highest: result = array_sorted[n - numSelect:] else: result = array_sorted[0:numSelect] return result[::-1] @export def select_best(array, criteria, num_select, highest=True): n = len(array) m = len(criteria) if n != m: raise ValueError('Elements in array (%s) not equal to elements in criteria (%s)' % (n, m)) if n < num_select: return array array = np.array(array) if highest is True: indexes = criteria.argsort()[::-1][:num_select] else: indexes = criteria.argsort()[::-1][n - num_select:] return array[indexes] @export def points_on_circle(radius, num_points, connect_ends=True): if connect_ends: angle = np.linspace(0, 2 * np.pi, num_points) else: angle = np.linspace(0, 2 * np.pi * (1 - 1./num_points), num_points) x_coord = np.cos(angle) * radius y_coord = np.sin(angle) * radius return x_coord, y_coord @export @jit() def neighborSelect(a, x, y): dim = int(np.sqrt(len(a))) values = [] x_mins = [] y_mins = [] for i in range(dim + 1, len(a) - dim - 1): if (a[i] < a[i - 1] and a[i] < a[i + 1] and a[i] < a[i - dim] and a[i] < a[i + dim] and a[i] < a[i - (dim - 1)] and a[i] < a[i - (dim + 1)] and a[i] < a[i + (dim - 1)] and a[i] < a[i + (dim + 1)]): if (a[i] < a[(i - 2 * dim - 1) % dim ** 2] and a[i] < a[(i - 2 * dim + 1) % dim ** 2] and a[i] < a[(i - dim - 2) % dim ** 2] and a[i] < a[(i - dim + 2) % dim ** 2] and a[i] < a[(i + dim - 2) % dim ** 2] and a[i] < a[(i + dim + 2) % dim ** 2] and a[i] < a[(i + 2 * dim - 1) % dim ** 2] and a[i] < a[(i + 2 * dim + 1) % dim ** 2] and a[i] < a[(i + 2 * dim) % dim ** 2] and a[i] < a[(i - 2 * dim) % dim ** 2] and a[i] < a[(i - 2) % dim ** 2] and a[i] < a[(i + 2) % dim ** 2]): x_mins.append(x[i]) y_mins.append(y[i]) values.append(a[i]) return np.array(x_mins), np.array(y_mins), np.array(values) @export def fwhm2sigma(fwhm): sigma = fwhm / (2 * np.sqrt(2 * np.log(2))) return sigma @export def sigma2fwhm(sigma): fwhm = sigma * (2 * np.sqrt(2 * np.log(2))) return fwhm @export
MIT License
nvidia/apex
apex/contrib/optimizers/distributed_fused_adam_v2.py
DistributedFusedAdamV2.has_overflow
python
def has_overflow(self): has_overflow = self._has_overflow self._has_overflow = False return has_overflow
Check if overflows were detected by any call to step(...) method. Clears the overflow flag.
https://github.com/nvidia/apex/blob/14ccf5986401104121d0ef286a29386904af3bb7/apex/contrib/optimizers/distributed_fused_adam_v2.py#L497-L503
import math import torch import importlib import amp_C from apex.multi_tensor_apply import multi_tensor_applier class DistributedFusedAdamV2(torch.optim.Optimizer): def __init__(self, params, lr=1e-3, bias_correction = True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt = False, weight_decay=0., max_grad_norm=0., amsgrad=False, use_mt=False, amp_scale_adjustment=1.0, overlap_reductions=True, full_pipeline=True, compute_L2_grad_norm=False, distributed_weight_update=0, dwu_group_size=0, dwu_num_blocks=4, dwu_num_rs_pg=1, dwu_num_ar_pg=4, dwu_num_ag_pg=0, revert_method=1, flat_mt=False, dwu_num_chunks=4, predivide=True, e5m2_allgather=False, do_not_flatten_model=False): global fused_adam_cuda fused_adam_cuda = importlib.import_module("fused_adam_cuda") self._amp_scale_adjustment = amp_scale_adjustment if use_mt: raise RuntimeError('DistributedFusedAdam does not support use_mt.') if amsgrad: raise RuntimeError('DistributedFusedAdam does not support the AMSGrad variant.') defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(DistributedFusedAdamV2, self).__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 self._overflow_buf = torch.cuda.IntTensor([0]) self._has_overflow = False assert (len(self.param_groups) == 1), "More than one parameter group is not supported." self._revert_method = revert_method if self._revert_method > 1: print("revert_method -> double buffer fp32 parameters, will consume more memory") self._last_step = False self._overlap_reductions = overlap_reductions self._global_scale = None self._num_blocks = dwu_num_blocks self._num_chunks = dwu_num_chunks self._predivide = predivide self._e5m2_allgather = e5m2_allgather self._do_not_flatten_model = do_not_flatten_model self._full_pipeline = full_pipeline self._compute_L2_grad_norm = compute_L2_grad_norm self._L2_grad_norm = None self._group_size = torch.cuda.device_count() if dwu_group_size <= 0 else dwu_group_size self._world_size = torch.distributed.get_world_size() self._num_groups = self._world_size // self._group_size self._rank_in_group = torch.distributed.get_rank() % self._group_size p_offset = 0 p_i = 0 self._param_state = None self._model_params = [] self._grads_info = [] self._grad_accs = [] for group in self.param_groups: self._param_group = group prev = None for p in group['params']: torch.distributed.broadcast(p,0) if not p.requires_grad: continue self._model_params.append(p) state = self.state[p] if len(state) == 0: state['step'] = 0 if self._param_state is None: self._param_state = state p_grads_size = p.numel() def wrapper(param, param_i, param_grads_size, param_offset): param_tmp = param.expand_as(param) grad_acc = param_tmp.grad_fn.next_functions[0][0] def allreduce_hook(*unused): self._do_overlapped_reduction(param_i, param_grads_size, param_offset, param) grad_acc.register_hook(allreduce_hook) self._grad_accs.append(grad_acc) self._grads_info.append({"param_grads_size":p_grads_size, "param_offset":p_offset}) wrapper(p, p_i, p_grads_size, p_offset) p_offset += p_grads_size if prev is not None and (prev.data_ptr() + prev.numel() * prev.element_size() != p.data_ptr()): p_offset = ((p_offset + 63) // 64) * 64 prev = p p_i += 1 self._grads_generated = [False]*len(self._grads_info) self._flat_mt = flat_mt self._grads = [] if self._overlap_reductions: self._current_block = self._num_blocks self._net_total_param_size = p_offset self._total_param_size = p_offset dwu_min_page_size = 256 * self._num_blocks * self._num_chunks * self._group_size self._total_param_size = ((self._total_param_size + dwu_min_page_size - 1) // dwu_min_page_size) * dwu_min_page_size self._block_size = self._total_param_size // self._num_blocks self._shard_size = self._block_size // self._group_size self._chunk_size = self._shard_size // self._num_chunks print("self._net_total_param_size=%d, self._total_param_size=%d, dwu_min_page_size=%d, self._block_size=%d, self._shard_size=%d, self._chunk_size=%d" % (self._net_total_param_size, self._total_param_size,dwu_min_page_size,self._block_size,self._shard_size,self._chunk_size)) self._low_param_i = [0]*self._num_blocks for block_id in range(self._num_blocks-1,-1,-1): p_i = len(self._grads_info)-1 while p_i > 0 and self._grads_info[p_i]["param_offset"] > block_id*self._block_size: p_i -= 1 self._low_param_i[block_id] = p_i print(self._low_param_i) self._flat_grads = torch.zeros([self._total_param_size], dtype=torch.float16, device='cuda') self._new_params = torch.zeros([self._total_param_size], dtype=torch.uint8 if self._e5m2_allgather else torch.float16, device='cuda') self._mega_shard_size = self._num_blocks * self._num_chunks * self._chunk_size self._fp32_p = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda') self._fp32_m = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda') self._fp32_v = torch.zeros([self._mega_shard_size], dtype=torch.float32, device='cuda') self._fp16_p = torch.zeros([self._mega_shard_size], dtype=torch.uint8 if self._e5m2_allgather else torch.float16, device='cuda') self._fp16_g = torch.zeros([self._mega_shard_size], dtype=torch.float16, device='cuda') self._individual_flat_grads = [] for p_i, (grads_info, p) in enumerate(zip(self._grads_info, self._model_params)): self._individual_flat_grads.append(self._flat_grads[grads_info["param_offset"]:grads_info["param_offset"]+grads_info["param_grads_size"]].view_as(p)) def _flat_split(p): def __blockify(p): return [p[block_id*self._block_size:(block_id+1)*self._block_size] for block_id in range(self._num_blocks)] def __shardify(p): return [p[shard_id*self._shard_size:(shard_id+1)*self._shard_size] for shard_id in range(self._group_size)] def __chunkify(p): return [p[chunk_id*self._chunk_size:(chunk_id+1)*self._chunk_size] for chunk_id in range(self._group_size)] list_of_blocks = __blockify(self._flat_grads) list_of_list_of_shards = [__shardify(block) for block in list_of_blocks] list_of_list_of_list_of_chunks = [[__chunkify(shard) for shard in shards] for shards in list_of_list_of_shards] return list_of_blocks, list_of_list_of_shards, list_of_list_of_list_of_chunks self._flat_grads_blocks, self._flat_grads_shards, self._flat_grads_chunks = _flat_split(self._flat_grads) def _full_packed_split(p): def __shardify(p): return [p[mega_shard*self._mega_shard_size:(mega_shard+1)*self._mega_shard_size] for mega_shard in range(self._group_size)] def __blockify(p): return [p[block_id*self._num_chunks*self._chunk_size:(block_id+1)*self._num_chunks*self._chunk_size] for block_id in range(self._num_blocks)] def __chunkify(p): return [p[chunk_id*self._chunk_size:(chunk_id+1)*self._chunk_size] for chunk_id in range(self._num_chunks)] list_of_mega_shards = __shardify(p) list_of_list_of_mega_blocks = [__blockify(mega_shard) for mega_shard in list_of_mega_shards] list_of_list_of_list_of_mega_chunks = [[__chunkify(mega_block) for mega_block in mega_blocks] for mega_blocks in list_of_list_of_mega_blocks] return list_of_mega_shards, list_of_list_of_mega_blocks, list_of_list_of_list_of_mega_chunks self._new_params_mega_shards, self._new_params_mega_blocks, self._new_params_mega_chunks = _full_packed_split(self._new_params) def _packed_split(p): def __packed_blockify(p): packed_block_size = self._num_chunks*self._chunk_size return [p[block_id*packed_block_size:(block_id+1)*packed_block_size] for block_id in range(self._num_blocks)] def __packed_chunkify(p): return [p[chunk_id*self._chunk_size:(chunk_id+1)*self._chunk_size] for chunk_id in range(self._num_chunks)] list_of_blocks = __packed_blockify(p) list_of_list_of_chunks = [__packed_chunkify(block) for block in list_of_blocks] return list_of_blocks, list_of_list_of_chunks self._fp32_p_blocks, self._fp32_p_chunks = _packed_split(self._fp32_p) self._fp32_m_blocks, self._fp32_m_chunks = _packed_split(self._fp32_m) self._fp32_v_blocks, self._fp32_v_chunks = _packed_split(self._fp32_v) self._fp16_p_blocks, self._fp16_p_chunks = _packed_split(self._fp16_p) self._fp16_g_blocks, self._fp16_g_chunks = _packed_split(self._fp16_g) self._packed_flat_to_model_params = [] for shard_id in range(self._group_size): for block_id in range(self._num_blocks): flat_shard_start = (block_id * self._group_size + shard_id) * self._shard_size flat_shard_end = flat_shard_start + self._shard_size for p, grads_info in zip(self._model_params, self._grads_info): flat_grad_start = grads_info["param_offset"] flat_grad_end = flat_grad_start + grads_info["param_grads_size"] clipped_start = (lambda a,b: a if a > b else b)(flat_grad_start, flat_shard_start) clipped_end = (lambda a,b: a if a < b else b)(flat_grad_end, flat_shard_end) if clipped_start < clipped_end: grad_offset = clipped_start - flat_grad_start grad_length = clipped_end - clipped_start shard_offset = clipped_start - flat_shard_start model_param_fragment = p.view(-1)[grad_offset:grad_offset+grad_length] new_param_packed_fragment = self._new_params_mega_blocks[shard_id][block_id][shard_offset:shard_offset+grad_length] self._packed_flat_to_model_params.append( (new_param_packed_fragment, model_param_fragment) ) if shard_id == self._rank_in_group: master_param_fragment = self._fp32_p_blocks[block_id][shard_offset:shard_offset+grad_length] print("model_param_fragment.size()=%s, new_param_packed_fragment.size()=%s, master_param_fragment.size()=%s" % (str(model_param_fragment.size()), str(new_param_packed_fragment.size()), str(master_param_fragment.size()))) master_param_fragment.copy_(model_param_fragment) p_in, p_out = zip(*self._packed_flat_to_model_params) self._packed_flat_to_model_params = [p_in, p_out] self._distributed_weight_update = distributed_weight_update self._num_rs_pg = dwu_num_rs_pg self._num_ar_pg = dwu_num_ar_pg self._num_ag_pg = dwu_num_ag_pg if self._num_groups > 1: self._ar_pg = [] for dev_i in range(self._group_size): ranks = [dev_i+j*self._group_size for j in range(self._num_groups)] for i in range(self._num_ar_pg): grp = torch.distributed.new_group(ranks=ranks) if torch.distributed.get_rank() in ranks: self._ar_pg.append(grp) self._ar_st = [torch.cuda.Stream() for _ in range(self._num_ar_pg)] for ar_pg in self._ar_pg: torch.distributed.all_reduce(self._overflow_buf,group=ar_pg) rs_ranks = [] for group_i in range(self._num_groups): rs_ranks.append([group_i*self._group_size+j for j in range(self._group_size)]) self._rs_pg = [] for group_i in range(self._num_groups): ranks = rs_ranks[group_i] for i in range(self._num_rs_pg): grp = torch.distributed.new_group(ranks=ranks) if torch.distributed.get_rank() in ranks: self._rs_pg.append(grp) if self._compute_L2_grad_norm and torch.distributed.get_rank() in ranks: self._l2_grad_norm_pg = torch.distributed.new_group(ranks=ranks) torch.distributed.all_reduce(self._overflow_buf,group=self._l2_grad_norm_pg) self._rs_st = [torch.cuda.Stream() for _ in range(self._num_rs_pg)] for rs_pg in self._rs_pg: torch.distributed.all_reduce(self._overflow_buf,group=rs_pg) if self._num_ag_pg == 0: self._ag_pg = self._rs_pg self._ag_st = self._rs_st self._num_ag_pg = self._num_rs_pg else: self._ag_pg = [] for group_i in range(self._num_groups): ranks = rs_ranks[group_i] for i in range(self._num_ag_pg): grp = torch.distributed.new_group(ranks=ranks) if torch.distributed.get_rank() in ranks: self._ag_pg.append(grp) self._ag_st = [torch.cuda.Stream() for _ in range(self._num_ag_pg)] for ag_pg in self._ag_pg: torch.distributed.all_reduce(self._overflow_buf,group=ag_pg) self._l2_grad_norm_st = torch.cuda.Stream() if self._compute_L2_grad_norm else None self._completion_st = torch.cuda.Stream() self._reductions_works = [None]*self._num_blocks self._allgather_works = [None]*self._num_blocks import inspect assert ('no_copy' in inspect.getfullargspec(torch.distributed.reduce_scatter).args), "This version of c10d does not support no_copy option" def set_last_step(self, last_step): self._last_step = last_step def _get_flush_block(self): flush_block = [] if self._current_block > 0 and self._grads_generated[self._low_param_i[self._current_block-1]]: num_grads = len(self._grads_generated) contiguous_idx = num_grads while contiguous_idx > 0 and self._grads_generated[contiguous_idx-1]: contiguous_idx -= 1 if contiguous_idx < num_grads and self._grads_info[contiguous_idx]["param_offset"] <= (self._current_block-1)*self._block_size: self._current_block -= 1 start = self._current_block * self._block_size end = (self._current_block+1) * self._block_size flush_block = [start, end] return flush_block def _pipeline_block_reductions(self, block_id): self._flatten_grad_mt(1.0/self._world_size if self._predivide else 1.0) works = [None]*self._num_chunks rs_stream = self._rs_st[block_id%self._num_rs_pg] rs_stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(rs_stream): rs_work = torch.distributed.reduce_scatter(self._fp16_g_blocks[block_id],self._flat_grads_shards[block_id],group=self._rs_pg[block_id%self._num_rs_pg],async_op=True,no_copy=True) for chunk_id in range(self._num_chunks): works[chunk_id] = rs_work if self._num_groups > 1: for chunk_id in range(self._num_chunks): glob_chunk_id = block_id * self._num_chunks + chunk_id ar_stream = self._ar_st[glob_chunk_id%self._num_ar_pg] with torch.cuda.stream(ar_stream): rs_work.wait() works[chunk_id] = torch.distributed.all_reduce(self._fp16_g_chunks[block_id][chunk_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True) self._reductions_works[block_id] = works if self._compute_L2_grad_norm and block_id == 0: with torch.cuda.stream(self._l2_grad_norm_st): for block_id in range(self._num_blocks): for chunk_id in range(self._num_chunks): self._reductions_works[block_id][chunk_id].wait() l2_grad_norm_sq = torch.empty([1], device='cuda') l2_grad_norm_sq = self._fp16_g.norm(dtype=torch.float32, p=2)**2 torch.distributed.all_reduce(l2_grad_norm_sq, group=self._l2_grad_norm_pg) self._L2_grad_norm = l2_grad_norm_sq.sqrt().item() def __launch_step_kernel(self, p, p_copy, m, v, g): combined_scale = self._global_scale if self._param_group['max_grad_norm'] > 0 and math.isfinite(self.L2_grad_norm): combined_scale = self._param_group['max_grad_norm'] / (self.L2_grad_norm / self._global_scale + 1e-6) combined_scale = self._global_scale / min(1, combined_scale) bias_correction = 1 if self._param_group['bias_correction'] else 0 beta1, beta2 = self._param_group['betas'] fused_adam_cuda.reversible_adam( p, p_copy, m, v, g, self._param_group['lr'], beta1, beta2, self._param_group['eps'], combined_scale, self._param_state['step']+1, self.eps_mode, bias_correction, self._param_group['weight_decay']) def _pipeline_block_step(self, block_id): ag_stream = self._ag_st[block_id%self._num_ag_pg] with torch.cuda.stream(ag_stream): for chunk_id in range(self._num_chunks): self._reductions_works[block_id][chunk_id].wait() self.__launch_step_kernel( self._fp32_p_blocks[block_id], self._fp16_p_blocks[block_id], self._fp32_m_blocks[block_id], self._fp32_v_blocks[block_id], self._fp16_g_blocks[block_id]) if block_id == 0: for other_ag_stream in self._ag_st: self._completion_st.wait_stream(other_ag_stream) with torch.cuda.stream(self._completion_st): torch.distributed.all_gather(self._new_params_mega_shards, self._fp16_p, group=self._ag_pg[0], no_copy=True) def _pipeline_step(self): with torch.cuda.stream(self._completion_st): for block_id in range(self._num_blocks): for chunk_id in range(self._num_chunks): self._reductions_works[block_id][chunk_id].wait() self.__launch_step_kernel( self._fp32_p, self._fp16_p, self._fp32_m, self._fp32_v, self._fp16_g) torch.distributed.all_gather(self._new_params_mega_shards, self._fp16_p, group=self._ag_pg[0], no_copy=True) def _flatten_grad_mt(self, scale): if self._flat_mt and len(self._grads) > 0: self._overflow_buf.zero_() multi_tensor_applier( amp_C.multi_tensor_scale, self._overflow_buf, list(zip(*self._grads)), scale) self._grads = [] def _do_overlapped_reduction(self, param_i, param_grads_size, param_offset, param): if self._flat_mt: self._grads.append( (param.grad, self._individual_flat_grads[param_i]) ) else: torch.div(param.grad, self._world_size if self._predivide else 1.0, out=self._individual_flat_grads[param_i]) self._grads_generated[param_i]=True if not self._last_step: if self._overlap_reductions: flush_block = self._get_flush_block() while flush_block: block_id = flush_block[0] // self._block_size self._pipeline_block_reductions(block_id) if self._full_pipeline: self._pipeline_block_step(block_id) flush_block = self._get_flush_block() def set_global_scale(self, global_scale): self._global_scale = global_scale @property def global_scale(self): return self._global_scale @property
BSD 3-Clause New or Revised License
hyroai/gamla
gamla/functional.py
sort_by
python
def sort_by(key: Callable): def sort_by(seq: Iterable): return sorted(seq, key=key) return sort_by
Return a new list containing all items from the iterable in ascending order, sorted by a key. >>> sort_by(len)(["hi!", "my", "name", "is"]) ['my', 'is', 'hi!', 'name']
https://github.com/hyroai/gamla/blob/371d895ecff2833ca68d13025b695b26cd68ac17/gamla/functional.py#L33-L42
import dataclasses import functools import hashlib import heapq import inspect import itertools import json import random from concurrent import futures from operator import truediv from typing import ( AbstractSet, Any, Callable, Collection, Dict, Iterable, List, Sequence, Text, Tuple, TypeVar, Union, ) import heapq_max import toolz from gamla import currying, excepts_decorator, operator from gamla.optimized import sync
MIT License
renfredxh/pyllettown
tmx.py
ObjectLayer.collide
python
def collide(self, rect, propname): r = [] for object in self.get_in_region(rect.left, rect.top, rect.right, rect.bottom): if propname in object or propname in self.properties: r.append(object) return r
Find all objects the rect is touching that have the indicated property name set.
https://github.com/renfredxh/pyllettown/blob/dfbc1965ec84270565f8e95878abcc4b4c1e6b57/tmx.py#L589-L598
import sys import struct import pygame from pygame.locals import * from pygame import Rect from xml.etree import ElementTree from base64 import b64decode from zlib import decompress class Tile(object): def __init__(self, gid, surface, tileset): self.gid = gid self.surface = surface self.tile_width = tileset.tile_width self.tile_height = tileset.tile_height self.properties = {} @classmethod def fromSurface(cls, surface): class ts: tile_width, tile_height = surface.get_size() return cls(0, surface, ts) def loadxml(self, tag): props = tag.find('properties') if props is None: return for c in props.findall('property'): name = c.attrib['name'] value = c.attrib['value'] if value.isdigit(): value = int(value) self.properties[name] = value def __repr__(self): return '<Tile %d>' % self.gid class Tileset(object): def __init__(self, name, tile_width, tile_height, firstgid): self.name = name self.tile_width = tile_width self.tile_height = tile_height self.firstgid = firstgid self.tiles = [] self.properties = {} @classmethod def fromxml(cls, tag, firstgid=None): if 'source' in tag.attrib: firstgid = int(tag.attrib['firstgid']) with open(tag.attrib['source']) as f: tileset = ElementTree.fromstring(f.read()) return cls.fromxml(tileset, firstgid) name = tag.attrib['name'] if firstgid is None: firstgid = int(tag.attrib['firstgid']) tile_width = int(tag.attrib['tilewidth']) tile_height = int(tag.attrib['tileheight']) tileset = cls(name, tile_width, tile_height, firstgid) for c in tag.getchildren(): if c.tag == "image": tileset.add_image(c.attrib['source']) elif c.tag == 'tile': gid = tileset.firstgid + int(c.attrib['id']) tileset.get_tile(gid).loadxml(c) return tileset def add_image(self, file): image = pygame.image.load(file).convert_alpha() if not image: sys.exit("Error creating new Tileset: file %s not found" % file) id = self.firstgid for line in range(image.get_height() // self.tile_height): for column in range(image.get_width() // self.tile_width): pos = Rect(column * self.tile_width, line * self.tile_height, self.tile_width, self.tile_height) self.tiles.append(Tile(id, image.subsurface(pos), self)) id += 1 def get_tile(self, gid): return self.tiles[gid - self.firstgid] class Tilesets(dict): def add(self, tileset): for i, tile in enumerate(tileset.tiles): i += tileset.firstgid self[i] = tile class Cell(object): def __init__(self, x, y, px, py, tile): self.x, self.y = x, y self.px, self.py = px, py self.tile = tile self.topleft = (px, py) self.left = px self.right = px + tile.tile_width self.top = py self.bottom = py + tile.tile_height self.center = (px + tile.tile_width // 2, py + tile.tile_height // 2) self._added_properties = {} self._deleted_properties = set() def __repr__(self): return '<Cell %s,%s %d>' % (self.px, self.py, self.tile.gid) def __contains__(self, key): if key in self._deleted_properties: return False return key in self._added_properties or key in self.tile.properties def __getitem__(self, key): if key in self._deleted_properties: raise KeyError(key) if key in self._added_properties: return self._added_properties[key] if key in self.tile.properties: return self.tile.properties[key] raise KeyError(key) def __setitem__(self, key, value): self._added_properties[key] = value def __delitem__(self, key): self._deleted_properties.add(key) def intersects(self, other): if self.px + self.tile.tile_width < other.x: return False if other.x + other.width - 1 < self.px: return False if self.py + self.tile.tile_height < other.y: return False if other.y + other.height - 1 < self.py: return False return True class LayerIterator(object): def __init__(self, layer): self.layer = layer self.i, self.j = 0, 0 def __next__(self): if self.i == self.layer.width - 1: self.j += 1 self.i = 0 if self.j == self.layer.height - 1: raise StopIteration() value = self.layer[self.i, self.j] self.i += 1 return value class Layer(object): def __init__(self, name, visible, map): self.name = name self.visible = visible self.position = (0, 0) self.px_width = map.px_width self.px_height = map.px_height self.tile_width = map.tile_width self.tile_height = map.tile_height self.width = map.width self.height = map.height self.tilesets = map.tilesets self.group = pygame.sprite.Group() self.properties = {} self.cells = {} def __repr__(self): return '<Layer "%s" at 0x%x>' % (self.name, id(self)) def __getitem__(self, pos): return self.cells.get(pos) def __setitem__(self, pos, tile): x, y = pos px = x * self.tile_width py = y * self.tile_width self.cells[pos] = Cell(x, y, px, py, tile) def __iter__(self): return LayerIterator(self) @classmethod def fromxml(cls, tag, map): layer = cls(tag.attrib['name'], int(tag.attrib.get('visible', 1)), map) data = tag.find('data') if data is None: raise ValueError('layer %s does not contain <data>' % layer.name) data = data.text.strip() data = data.encode() data = decompress(b64decode(data)) data = struct.unpack('<%di' % (len(data)/4,), data) assert len(data) == layer.width * layer.height for i, gid in enumerate(data): if gid < 1: continue tile = map.tilesets[gid] x = i % layer.width y = i // layer.width layer.cells[x,y] = Cell(x, y, x*map.tile_width, y*map.tile_height, tile) return layer def update(self, dt, *args): pass def set_view(self, x, y, w, h, viewport_ox=0, viewport_oy=0): self.view_x, self.view_y = x, y self.view_w, self.view_h = w, h x -= viewport_ox y -= viewport_oy self.position = (x, y) def draw(self, surface): ox, oy = self.position w, h = self.view_w, self.view_h for x in range(ox, ox + w + self.tile_width, self.tile_width): i = x // self.tile_width for y in range(oy, oy + h + self.tile_height, self.tile_height): j = y // self.tile_height if (i, j) not in self.cells: continue cell = self.cells[i, j] surface.blit(cell.tile.surface, (cell.px - ox, cell.py - oy)) def find(self, *properties): r = [] for propname in properties: for cell in list(self.cells.values()): if cell and propname in cell: r.append(cell) return r def match(self, **properties): r = [] for propname in properties: for cell in list(self.cells.values()): if propname not in cell: continue if properties[propname] == cell[propname]: r.append(cell) return r def collide(self, rect, propname): r = [] for cell in self.get_in_region(rect.left, rect.top, rect.right, rect.bottom): if not cell.intersects(rect): continue if propname in cell: r.append(cell) return r def get_in_region(self, x1, y1, x2, y2): i1 = max(0, x1 // self.tile_width) j1 = max(0, y1 // self.tile_height) i2 = min(self.width, x2 // self.tile_width + 1) j2 = min(self.height, y2 // self.tile_height + 1) return [self.cells[i, j] for i in range(int(i1), int(i2)) for j in range(int(j1), int(j2)) if (i, j) in self.cells] def get_at(self, x, y): i = x // self.tile_width j = y // self.tile_height return self.cells.get((i, j)) def neighbors(self, index): i, j = index n = [] if i < self.width - 1: n.append((i + 1, j)) if i > 0: n.append((i - 1, j)) if j < self.height - 1: n.append((i, j + 1)) if j > 0: n.append((i, j - 1)) return n class Object(object): def __init__(self, type, x, y, width=0, height=0, name=None, gid=None, tile=None, visible=1): self.type = type self.px = x self.left = x if tile: y -= tile.tile_height width = tile.tile_width height = tile.tile_height self.py = y self.top = y self.width = width self.right = x + width self.height = height self.bottom = y + height self.name = name self.gid = gid self.tile = tile self.visible = visible self.properties = {} self._added_properties = {} self._deleted_properties = set() def __repr__(self): if self.tile: return '<Object %s,%s %s,%s tile=%d>' % (self.px, self.py, self.width, self.height, self.gid) else: return '<Object %s,%s %s,%s>' % (self.px, self.py, self.width, self.height) def __contains__(self, key): if key in self._deleted_properties: return False if key in self._added_properties: return True if key in self.properties: return True return self.tile and key in self.tile.properties def __getitem__(self, key): if key in self._deleted_properties: raise KeyError(key) if key in self._added_properties: return self._added_properties[key] if key in self.properties: return self.properties[key] if self.tile and key in self.tile.properties: return self.tile.properties[key] raise KeyError(key) def __setitem__(self, key, value): self._added_properties[key] = value def __delitem__(self, key): self._deleted_properties.add(key) def draw(self, surface, view_x, view_y): if not self.visible: return x, y = (self.px - view_x, self.py - view_y) if self.tile: surface.blit(self.tile.surface, (x, y)) else: r = pygame.Rect((x, y), (self.width, self.height)) pygame.draw.rect(surface, (255, 100, 100), r, 2) @classmethod def fromxml(cls, tag, map): if 'gid' in tag.attrib: gid = int(tag.attrib['gid']) tile = map.tilesets[gid] w = tile.tile_width h = tile.tile_height else: gid = None tile = None w = int(tag.attrib['width']) h = int(tag.attrib['height']) o = cls(tag.attrib.get('type', 'rect'), int(tag.attrib['x']), int(tag.attrib['y']), w, h, tag.attrib.get('name'), gid, tile, int(tag.attrib.get('visible', 1))) props = tag.find('properties') if props is None: return o for c in props.findall('property'): name = c.attrib['name'] value = c.attrib['value'] if value.isdigit(): value = int(value) o.properties[name] = value return o def intersects(self, x1, y1, x2, y2): if x2 < self.px: return False if y2 < self.py: return False if x1 > self.px + self.width: return False if y1 > self.py + self.height: return False return True class ObjectLayer(object): def __init__(self, name, color, objects, opacity=1, visible=1, position=(0, 0)): self.name = name self.color = color self.objects = objects self.opacity = opacity self.visible = visible self.position = position self.properties = {} def __repr__(self): return '<ObjectLayer "%s" at 0x%x>' % (self.name, id(self)) @classmethod def fromxml(cls, tag, map): layer = cls(tag.attrib['name'], tag.attrib.get('color'), [], float(tag.attrib.get('opacity', 1)), int(tag.attrib.get('visible', 1))) for object in tag.findall('object'): layer.objects.append(Object.fromxml(object, map)) for c in tag.findall('property'): name = c.attrib['name'] value = c.attrib['value'] if value.isdigit(): value = int(value) layer.properties[name] = value return layer def update(self, dt, *args): pass def set_view(self, x, y, w, h, viewport_ox=0, viewport_oy=0): self.view_x, self.view_y = x, y self.view_w, self.view_h = w, h x -= viewport_ox y -= viewport_oy self.position = (x, y) def draw(self, surface): if not self.visible: return ox, oy = self.position w, h = self.view_w, self.view_h for object in self.objects: object.draw(surface, self.view_x, self.view_y) def find(self, *properties): r = [] for propname in properties: for object in self.objects: if object and propname in object or propname in self.properties: r.append(object) return r def match(self, **properties): r = [] for propname in properties: for object in self.objects: if propname in object: val = object[propname] elif propname in self.properties: val = self.properties[propname] else: continue if properties[propname] == val: r.append(object) return r
MIT License
opsdroid/opsdroid
opsdroid/connector/matrix/connector.py
ensure_room_id_and_send
python
def ensure_room_id_and_send(func): @functools.wraps(func) async def ensure_room_id(self, event): if not event.target.startswith(("!", "#")): event.target = self.room_ids[event.target] if not event.target.startswith("!"): response = await self.connection.room_resolve_alias(event.target) if isinstance(response, nio.RoomResolveAliasError): _LOGGER.error( f"Error resolving room id for {event.target}: {response.message} (status code {response.status_code})" ) else: event.target = response.room_id try: return_val = await func(self, event) except aiohttp.client_exceptions.ServerDisconnectedError: _LOGGER.debug(_("Server had disconnected, retrying send.")) return_val = await func(self, event) if isinstance(return_val, nio.responses.ErrorResponse): raise MatrixException(return_val) return return_val return ensure_room_id
Ensure that the target for the event is a matrix room id. Also retry the function call if the server disconnects.
https://github.com/opsdroid/opsdroid/blob/9a48364869ded7cdd2420b43b0c2c153e846439e/opsdroid/connector/matrix/connector.py#L46-L79
import functools import json import logging import re from pathlib import Path from urllib.parse import urlparse import aiohttp import nio import nio.responses import nio.exceptions from opsdroid import const, events from opsdroid.connector import Connector, register_event from voluptuous import Inclusive, Required from . import events as matrixevents from .create_events import MatrixEventCreator from .html_cleaner import clean _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = { Inclusive("mxid", "login"): str, Inclusive("password", "login"): str, "access_token": str, Required("rooms"): dict, "homeserver": str, "nick": str, "room_specific_nicks": bool, "device_name": str, "device_id": str, "store_path": str, "enable_encryption": bool, } __all__ = ["ConnectorMatrix"] class MatrixException(Exception): def __init__(self, nio_error): self.nio_error = nio_error
Apache License 2.0
bihealth/sodar_core
projectroles/templatetags/projectroles_tags.py
get_backend_plugins
python
def get_backend_plugins(): return get_active_plugins('backend')
Get active backend plugins
https://github.com/bihealth/sodar_core/blob/4176f762b77fae4dfdf24d51328938b94d3a64ce/projectroles/templatetags/projectroles_tags.py#L49-L51
from django import template from django.conf import settings from django.urls import reverse from django.utils import timezone from projectroles.models import ( RoleAssignment, RemoteProject, SODAR_CONSTANTS, PROJECT_TAG_STARRED, ) from projectroles.plugins import get_active_plugins from projectroles.project_tags import get_tag_state from projectroles.templatetags.projectroles_common_tags import get_info_link register = template.Library() PROJECT_TYPE_PROJECT = SODAR_CONSTANTS['PROJECT_TYPE_PROJECT'] PROJECT_TYPE_CATEGORY = SODAR_CONSTANTS['PROJECT_TYPE_CATEGORY'] PROJECT_ROLE_OWNER = SODAR_CONSTANTS['PROJECT_ROLE_OWNER'] REMOTE_LEVEL_NONE = SODAR_CONSTANTS['REMOTE_LEVEL_NONE'] REMOTE_LEVEL_REVOKED = SODAR_CONSTANTS['REMOTE_LEVEL_REVOKED'] INDENT_PX = 25 ACTIVE_LEVEL_TYPES = [ SODAR_CONSTANTS['REMOTE_LEVEL_NONE'], SODAR_CONSTANTS['REMOTE_LEVEL_READ_ROLES'], ] @register.simple_tag def sodar_constant(value): return SODAR_CONSTANTS[value] if value in SODAR_CONSTANTS else None @register.simple_tag
MIT License
doanguyen/lasotuvi
lasotuvi/DiaBan.py
diaBan.nhapDaiHan
python
def nhapDaiHan(self, cucSo, gioiTinh): for cung in self.thapNhiCung: khoangCach = khoangCachCung(cung.cungSo, self.cungMenh, gioiTinh) cung.daiHan(cucSo + khoangCach * 10) return self
Nhap dai han Args: cucSo (TYPE): Description gioiTinh (TYPE): Description Returns: TYPE: Description
https://github.com/doanguyen/lasotuvi/blob/ace8379a3ea033674e8a9096f5b98a9e2041eace/lasotuvi/DiaBan.py#L153-L166
from lasotuvi.AmDuong import diaChi, dichCung, khoangCachCung class cungDiaBan(object): def __init__(self, cungID): hanhCung = [None, "Thủy", "Thổ", "Mộc", "Mộc", "Thổ", "Hỏa", "Hỏa", "Thổ", "Kim", "Kim", "Thổ", "Thủy"] self.cungSo = cungID self.hanhCung = hanhCung[cungID] self.cungSao = [] self.cungAmDuong = -1 if (self.cungSo % 2 == 0) else 1 self.cungTen = diaChi[self.cungSo]['tenChi'] self.cungThan = False def themSao(self, sao): dacTinhSao(self.cungSo, sao) self.cungSao.append(sao.__dict__) return self def cungChu(self, tenCungChu): self.cungChu = tenCungChu return self def daiHan(self, daiHan): self.cungDaiHan = daiHan return self def tieuHan(self, tieuHan): self.cungTieuHan = diaChi[tieuHan + 1]['tenChi'] return self def anCungThan(self): self.cungThan = True def anTuan(self): self.tuanTrung = True def anTriet(self): self.trietLo = True class diaBan(object): def __init__(self, thangSinhAmLich, gioSinhAmLich): super(diaBan, self).__init__() self.thangSinhAmLich = thangSinhAmLich self.gioSinhAmLich = gioSinhAmLich self.thapNhiCung = [cungDiaBan(i) for i in range(13)] self.nhapCungChu() self.nhapCungThan() def cungChu(self, thangSinhAmLich, gioSinhAmLich): self.cungThan = dichCung(3, thangSinhAmLich - 1, gioSinhAmLich - 1) self.cungMenh = dichCung(3, thangSinhAmLich - 1, - (gioSinhAmLich) + 1) cungPhuMau = dichCung(self.cungMenh, 1) cungPhucDuc = dichCung(self.cungMenh, 2) cungDienTrach = dichCung(self.cungMenh, 3) cungQuanLoc = dichCung(self.cungMenh, 4) self.cungNoboc = dichCung(self.cungMenh, 5) cungThienDi = dichCung(self.cungMenh, 6) self.cungTatAch = dichCung(self.cungMenh, 7) cungTaiBach = dichCung(self.cungMenh, 8) cungTuTuc = dichCung(self.cungMenh, 9) cungTheThiep = dichCung(self.cungMenh, 10) cungHuynhDe = dichCung(self.cungMenh, 11) cungChuThapNhiCung = [ { 'cungId': 1, 'tenCung': "Mệnh", 'cungSoDiaBan': self.cungMenh }, { 'cungId': 2, 'tenCung': "Phụ mẫu", 'cungSoDiaBan': cungPhuMau }, { 'cungId': 3, 'tenCung': "Phúc đức", 'cungSoDiaBan': cungPhucDuc }, { 'cungId': 4, 'tenCung': "Điền trạch", 'cungSoDiaBan': cungDienTrach }, { 'cungId': 5, 'tenCung': "Quan lộc", 'cungSoDiaBan': cungQuanLoc }, { 'cungId': 6, 'tenCung': "Nô bộc", 'cungSoDiaBan': self.cungNoboc }, { 'cungId': 7, 'tenCung': "Thiên di", 'cungSoDiaBan': cungThienDi }, { 'cungId': 8, 'tenCung': "Tật Ách", 'cungSoDiaBan': self.cungTatAch }, { 'cungId': 9, 'tenCung': "Tài Bạch", 'cungSoDiaBan': cungTaiBach }, { 'cungId': 10, 'tenCung': "Tử tức", 'cungSoDiaBan': cungTuTuc }, { 'cungId': 11, 'tenCung': "Phu thê", 'cungSoDiaBan': cungTheThiep }, { 'cungId': 12, 'tenCung': "Huynh đệ", 'cungSoDiaBan': cungHuynhDe } ] return cungChuThapNhiCung def nhapCungChu(self): for cung in self.cungChu(self.thangSinhAmLich, self.gioSinhAmLich): self.thapNhiCung[cung['cungSoDiaBan']].cungChu(cung['tenCung']) return self
MIT License
redis-collections/redis-collections
redis_collections/dicts.py
Dict.__init__
python
def __init__(self, *args, **kwargs): data = args[0] if args else kwargs.pop('data', None) writeback = kwargs.pop('writeback', False) super().__init__(**kwargs) self.writeback = writeback self.cache = {} if data: self.update(data)
Create a new Dict object. If the first argument (*data*) is another mapping type, create the new Dict with its items as the initial data. Or, If the first argument is an iterable of ``(key, value)`` pairs, create the new Dict with those items as the initial data. Unlike Python's built-in :class:`dict` type, initial items cannot be set using keyword arguments. Keyword arguments are passed to the :class:`RedisCollection` constructor. :param data: Initial data. :type data: iterable or mapping :param redis: Redis client instance. If not provided, default Redis connection is used. :type redis: :class:`redis.StrictRedis` :param key: Redis key for the collection. Collections with the same key point to the same data. If not provided, a random string is generated. :type key: str :param writeback: If ``True``, keep a local cache of changes for storing modifications to mutable values. Changes will be written to Redis after calling the ``sync`` method. :type writeback: bool
https://github.com/redis-collections/redis-collections/blob/04af71c6bc5864ce5a7ce147c147e0bddee665ca/redis_collections/dicts.py#L53-L93
import collections.abc as collections_abc import collections import operator from redis.client import Pipeline from .base import RedisCollection class Dict(RedisCollection, collections_abc.MutableMapping): _pickle_key = RedisCollection._pickle_3 _unpickle_key = RedisCollection._unpickle _pickle_value = RedisCollection._pickle_3 class __missing_value: def __repr__(self): return '<missing value>' __marker = __missing_value()
ISC License
docusign/docusign-python-client
docusign_esign/models/bcc_email_archive.py
BccEmailArchive.__ne__
python
def __ne__(self, other): if not isinstance(other, BccEmailArchive): return True return self.to_dict() != other.to_dict()
Returns true if both objects are not equal
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/bcc_email_archive.py#L358-L363
import pprint import re import six from docusign_esign.client.configuration import Configuration class BccEmailArchive(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'account_id': 'str', 'bcc_email_archive_id': 'str', 'created': 'str', 'created_by': 'UserInfo', 'email': 'str', 'email_notification_id': 'str', 'modified': 'str', 'modified_by': 'UserInfo', 'status': 'str', 'uri': 'str' } attribute_map = { 'account_id': 'accountId', 'bcc_email_archive_id': 'bccEmailArchiveId', 'created': 'created', 'created_by': 'createdBy', 'email': 'email', 'email_notification_id': 'emailNotificationId', 'modified': 'modified', 'modified_by': 'modifiedBy', 'status': 'status', 'uri': 'uri' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._account_id = None self._bcc_email_archive_id = None self._created = None self._created_by = None self._email = None self._email_notification_id = None self._modified = None self._modified_by = None self._status = None self._uri = None self.discriminator = None setattr(self, "_{}".format('account_id'), kwargs.get('account_id', None)) setattr(self, "_{}".format('bcc_email_archive_id'), kwargs.get('bcc_email_archive_id', None)) setattr(self, "_{}".format('created'), kwargs.get('created', None)) setattr(self, "_{}".format('created_by'), kwargs.get('created_by', None)) setattr(self, "_{}".format('email'), kwargs.get('email', None)) setattr(self, "_{}".format('email_notification_id'), kwargs.get('email_notification_id', None)) setattr(self, "_{}".format('modified'), kwargs.get('modified', None)) setattr(self, "_{}".format('modified_by'), kwargs.get('modified_by', None)) setattr(self, "_{}".format('status'), kwargs.get('status', None)) setattr(self, "_{}".format('uri'), kwargs.get('uri', None)) @property def account_id(self): return self._account_id @account_id.setter def account_id(self, account_id): self._account_id = account_id @property def bcc_email_archive_id(self): return self._bcc_email_archive_id @bcc_email_archive_id.setter def bcc_email_archive_id(self, bcc_email_archive_id): self._bcc_email_archive_id = bcc_email_archive_id @property def created(self): return self._created @created.setter def created(self, created): self._created = created @property def created_by(self): return self._created_by @created_by.setter def created_by(self, created_by): self._created_by = created_by @property def email(self): return self._email @email.setter def email(self, email): self._email = email @property def email_notification_id(self): return self._email_notification_id @email_notification_id.setter def email_notification_id(self, email_notification_id): self._email_notification_id = email_notification_id @property def modified(self): return self._modified @modified.setter def modified(self, modified): self._modified = modified @property def modified_by(self): return self._modified_by @modified_by.setter def modified_by(self, modified_by): self._modified_by = modified_by @property def status(self): return self._status @status.setter def status(self, status): self._status = status @property def uri(self): return self._uri @uri.setter def uri(self, uri): self._uri = uri def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BccEmailArchive, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, BccEmailArchive): return False return self.to_dict() == other.to_dict()
MIT License
foremast/foremast
src/foremast/version.py
get_version
python
def get_version(): version = 'Not installed.' try: version = pkg_resources.get_distribution(__package__).version except pkg_resources.DistributionNotFound: pass return version
Retrieve package version.
https://github.com/foremast/foremast/blob/889f81b3d018301cb5f1640b14c115ae9d602cee/src/foremast/version.py#L5-L14
import pkg_resources
Apache License 2.0
uber-archive/plato-research-dialogue-system
plato/utilities/parser/data_parser.py
DataParser.initialize
python
def initialize(self, **kwargs): pass
Initialize the internal structures of the data parser. :param kwargs: :return:
https://github.com/uber-archive/plato-research-dialogue-system/blob/1db30be390df6903be89fdf5a515debc7d7defb4/plato/utilities/parser/data_parser.py#L30-L38
__author__ = "Alexandros Papangelis" from abc import ABC, abstractmethod class DataParser(ABC): @abstractmethod
Apache License 2.0
gtaylor/python-colormath
colormath/chromatic_adaptation.py
apply_chromatic_adaptation_on_color
python
def apply_chromatic_adaptation_on_color(color, targ_illum, adaptation="bradford"): xyz_x = color.xyz_x xyz_y = color.xyz_y xyz_z = color.xyz_z orig_illum = color.illuminant targ_illum = targ_illum.lower() observer = color.observer adaptation = adaptation.lower() color.xyz_x, color.xyz_y, color.xyz_z = apply_chromatic_adaptation( xyz_x, xyz_y, xyz_z, orig_illum, targ_illum, observer=observer, adaptation=adaptation, ) color.set_illuminant(targ_illum) return color
Convenience function to apply an adaptation directly to a Color object.
https://github.com/gtaylor/python-colormath/blob/4a076831fd5136f685aa7143db81eba27b2cd19a/colormath/chromatic_adaptation.py#L101-L125
import logging import numpy from numpy.linalg import pinv from colormath import color_constants logger = logging.getLogger(__name__) def _get_adaptation_matrix(wp_src, wp_dst, observer, adaptation): m_sharp = color_constants.ADAPTATION_MATRICES[adaptation] if isinstance(wp_src, str): orig_illum = wp_src.lower() wp_src = color_constants.ILLUMINANTS[observer][orig_illum] elif hasattr(wp_src, "__iter__"): wp_src = wp_src if isinstance(wp_dst, str): targ_illum = wp_dst.lower() wp_dst = color_constants.ILLUMINANTS[observer][targ_illum] elif hasattr(wp_dst, "__iter__"): wp_dst = wp_dst rgb_src = numpy.dot(m_sharp, wp_src) rgb_dst = numpy.dot(m_sharp, wp_dst) m_rat = numpy.diag(rgb_dst / rgb_src) m_xfm = numpy.dot(numpy.dot(pinv(m_sharp), m_rat), m_sharp) return m_xfm def apply_chromatic_adaptation( val_x, val_y, val_z, orig_illum, targ_illum, observer="2", adaptation="bradford" ): adaptation = adaptation.lower() if isinstance(orig_illum, str): orig_illum = orig_illum.lower() wp_src = color_constants.ILLUMINANTS[observer][orig_illum] elif hasattr(orig_illum, "__iter__"): wp_src = orig_illum if isinstance(targ_illum, str): targ_illum = targ_illum.lower() wp_dst = color_constants.ILLUMINANTS[observer][targ_illum] elif hasattr(targ_illum, "__iter__"): wp_dst = targ_illum logger.debug(" \\* Applying adaptation matrix: %s", adaptation) transform_matrix = _get_adaptation_matrix(wp_src, wp_dst, observer, adaptation) XYZ_matrix = numpy.array((val_x, val_y, val_z)) result_matrix = numpy.dot(transform_matrix, XYZ_matrix) return result_matrix[0], result_matrix[1], result_matrix[2]
BSD 3-Clause New or Revised License
auroua/insightface_tf
nets/L_Resnet_E_IR_GBN.py
resnetse_v1_block_2
python
def resnetse_v1_block_2(scope, base_depth, num_units, stride, rate=1, unit_fn=None): return Block(scope, unit_fn, [{ 'depth': base_depth * 4, 'depth_bottleneck': base_depth, 'stride': 1, 'rate': rate }] * (num_units - 1) + [{ 'depth': base_depth * 4, 'depth_bottleneck': base_depth, 'stride': stride, 'rate': rate }])
Helper function for creating a resnet_v1 bottleneck block. Args: scope: The scope of the block. base_depth: The depth of the bottleneck layer for each unit. num_units: The number of units in the block. stride: The stride of the block, implemented as a stride in the last unit. All other units have stride=1. Returns: A resnet_v1 bottleneck block.
https://github.com/auroua/insightface_tf/blob/6ffe4296460bdfea56f91521db6d6412a89249d9/nets/L_Resnet_E_IR_GBN.py#L265-L288
import tensorflow as tf import tensorlayer as tl from tensorflow.contrib.layers.python.layers import utils import collections from tensorlayer.layers import Layer, list_remove_repeat from tl_layers_modify import GroupNormLayer class ElementwiseLayer(Layer): def __init__( self, layer = [], combine_fn = tf.minimum, name ='elementwise_layer', act = None, ): Layer.__init__(self, name=name) if act: print(" [TL] ElementwiseLayer %s: size:%s fn:%s, act:%s" % ( self.name, layer[0].outputs.get_shape(), combine_fn.__name__, act.__name__)) else: print(" [TL] ElementwiseLayer %s: size:%s fn:%s" % ( self.name, layer[0].outputs.get_shape(), combine_fn.__name__)) self.outputs = layer[0].outputs for l in layer[1:]: self.outputs = combine_fn(self.outputs, l.outputs, name=name) if act: self.outputs = act(self.outputs) self.all_layers = list(layer[0].all_layers) self.all_params = list(layer[0].all_params) self.all_drop = dict(layer[0].all_drop) for i in range(1, len(layer)): self.all_layers.extend(list(layer[i].all_layers)) self.all_params.extend(list(layer[i].all_params)) self.all_drop.update(dict(layer[i].all_drop)) self.all_layers = list_remove_repeat(self.all_layers) self.all_params = list_remove_repeat(self.all_params) def subsample(inputs, factor, scope=None): if factor == 1: return inputs else: return tl.layers.MaxPool2d(inputs, [1, 1], strides=(factor, factor), name=scope) def conv2d_same(inputs, num_outputs, kernel_size, strides, rate=1, w_init=None, scope=None, trainable=None): if strides == 1: if rate == 1: nets = tl.layers.Conv2d(inputs, n_filter=num_outputs, filter_size=(kernel_size, kernel_size), b_init=None, strides=(strides, strides), W_init=w_init, act=None, padding='SAME', name=scope, use_cudnn_on_gpu=True) nets = GroupNormLayer(layer=nets, act=tf.identity, name=scope+'_bn/GroupNorm') else: nets = tl.layers.AtrousConv2dLayer(inputs, n_filter=num_outputs, filter_size=(kernel_size, kernel_size), rate=rate, act=None, W_init=w_init, padding='SAME', name=scope) nets = GroupNormLayer(layer=nets, act=tf.identity, name=scope+'_bn/GroupNorm') return nets else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg inputs = tl.layers.PadLayer(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]], name='padding_%s' % scope) if rate == 1: nets = tl.layers.Conv2d(inputs, n_filter=num_outputs, filter_size=(kernel_size, kernel_size), b_init=None, strides=(strides, strides), W_init=w_init, act=None, padding='VALID', name=scope, use_cudnn_on_gpu=True) nets = GroupNormLayer(layer=nets, act=tf.identity, name=scope+'_bn/GroupNorm') else: nets = tl.layers.AtrousConv2dLayer(inputs, n_filter=num_outputs, filter_size=(kernel_size, kernel_size), b_init=None, rate=rate, act=None, W_init=w_init, padding='SAME', name=scope) nets = GroupNormLayer(layer=nets, act=tf.identity, name=scope+'_bn/GroupNorm') return nets def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1, scope=None): with tf.variable_scope(scope, 'bottleneck_v1') as sc: depth_in = utils.last_dimension(inputs.outputs.get_shape(), min_rank=4) if depth == depth_in: shortcut = subsample(inputs, stride, 'shortcut') else: shortcut = tl.layers.Conv2d(inputs, depth, filter_size=(1, 1), strides=(stride, stride), act=None, b_init=None, name='shortcut_conv') shortcut = GroupNormLayer(layer=shortcut, act=tf.identity, name='shortcut_bn/BatchNorm') residual = tl.layers.Conv2d(inputs, depth_bottleneck, filter_size=(1, 1), strides=(1, 1), act=None, b_init=None, name='conv1') residual = GroupNormLayer(layer=residual, act=tf.nn.relu, name='conv1_bn/BatchNorm') residual = conv2d_same(residual, depth_bottleneck, kernel_size=3, strides= stride, rate=rate, scope='conv2') residual = tl.layers.Conv2d(residual, depth, filter_size=(1, 1), strides=(1, 1), act=None, b_init=None, name='conv3') residual = GroupNormLayer(layer=residual, act=tf.identity, name='conv3_bn/BatchNorm', scale_init=tf.constant_initializer(0.0)) output = ElementwiseLayer(layer=[shortcut, residual], combine_fn=tf.add, name='combine_layer', act=tf.nn.relu) return output def bottleneck_IR(inputs, depth, depth_bottleneck, stride, rate=1, w_init=None, scope=None, trainable=None): with tf.variable_scope(scope, 'bottleneck_v1') as sc: depth_in = utils.last_dimension(inputs.outputs.get_shape(), min_rank=4) if depth == depth_in: shortcut = subsample(inputs, stride, 'shortcut') else: shortcut = tl.layers.Conv2d(inputs, depth, filter_size=(1, 1), strides=(stride, stride), act=None, W_init=w_init, b_init=None, name='shortcut_conv', use_cudnn_on_gpu=True) shortcut = GroupNormLayer(layer=shortcut, act=tf.identity, name='shortcut_bn/BatchNorm') residual = GroupNormLayer(layer=inputs, act=tf.identity, name='conv1_bn1') residual = tl.layers.Conv2d(residual, depth_bottleneck, filter_size=(3, 3), strides=(1, 1), act=None, b_init=None, W_init=w_init, name='conv1', use_cudnn_on_gpu=True) residual = GroupNormLayer(layer=residual, act=tf.identity, name='conv1_bn2') residual = tl.layers.PReluLayer(residual) residual = conv2d_same(residual, depth, kernel_size=3, strides=stride, rate=rate, w_init=w_init, scope='conv2', trainable=trainable) output = ElementwiseLayer(layer=[shortcut, residual], combine_fn=tf.add, name='combine_layer', act=None) return output def bottleneck_IR_SE(inputs, depth, depth_bottleneck, stride, rate=1, w_init=None, scope=None, trainable=None): with tf.variable_scope(scope, 'bottleneck_v1') as sc: depth_in = utils.last_dimension(inputs.outputs.get_shape(), min_rank=4) if depth == depth_in: shortcut = subsample(inputs, stride, 'shortcut') else: shortcut = tl.layers.Conv2d(inputs, depth, filter_size=(1, 1), strides=(stride, stride), act=None, W_init=w_init, b_init=None, name='shortcut_conv', use_cudnn_on_gpu=True) shortcut = GroupNormLayer(layer=shortcut, act=tf.identity, name='shortcut_bn/BatchNorm') residual = GroupNormLayer(layer=inputs, act=tf.identity, name='conv1_bn1') residual = tl.layers.Conv2d(residual, depth_bottleneck, filter_size=(3, 3), strides=(1, 1), act=None, b_init=None, W_init=w_init, name='conv1', use_cudnn_on_gpu=True) residual = GroupNormLayer(layer=residual, act=tf.identity, name='conv1_bn2') residual = tl.layers.PReluLayer(residual) residual = conv2d_same(residual, depth, kernel_size=3, strides=stride, rate=rate, w_init=w_init, scope='conv2', trainable=trainable) squeeze = tl.layers.InputLayer(tf.reduce_mean(residual.outputs, axis=[1, 2]), name='squeeze_layer') excitation1 = tl.layers.DenseLayer(squeeze, n_units=int(depth/16.0), act=tf.nn.relu, W_init=w_init, name='excitation_1') excitation2 = tl.layers.DenseLayer(excitation1, n_units=depth, act=tf.nn.sigmoid, W_init=w_init, name='excitation_2') scale = tl.layers.ReshapeLayer(excitation2, shape=[tf.shape(excitation2.outputs)[0], 1, 1, depth], name='excitation_reshape') residual_se = ElementwiseLayer(layer=[residual, scale], combine_fn=tf.multiply, name='scale_layer', act=None) output = ElementwiseLayer(layer=[shortcut, residual_se], combine_fn=tf.add, name='combine_layer', act=tf.nn.relu) return output def resnet(inputs, bottle_neck, blocks, w_init=None, trainable=None, scope=None): with tf.variable_scope(scope): net_inputs = tl.layers.InputLayer(inputs, name='input_layer') if bottle_neck: net = tl.layers.Conv2d(net_inputs, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=None, W_init=w_init, b_init=None, name='conv1', use_cudnn_on_gpu=True) net = GroupNormLayer(layer=net, act=tf.identity, name='group_norm_0') net = tl.layers.PReluLayer(net, name='prelu0') else: raise ValueError('The standard resnet must support the bottleneck layer') for block in blocks: with tf.variable_scope(block.scope): for i, var in enumerate(block.args): with tf.variable_scope('unit_%d' % (i+1)): net = block.unit_fn(net, depth=var['depth'], depth_bottleneck=var['depth_bottleneck'], w_init=w_init, stride=var['stride'], rate=var['rate'], scope=None, trainable=trainable) net = GroupNormLayer(layer=net, act=tf.identity, name='E_GN_0') net = tl.layers.DropoutLayer(net, keep=0.4, name='E_Dropout') net_shape = net.outputs.get_shape() net = tl.layers.ReshapeLayer(net, shape=[-1, net_shape[1]*net_shape[2]*net_shape[3]], name='E_Reshapelayer') net = tl.layers.DenseLayer(net, n_units=512, W_init=w_init, name='E_DenseLayer') return net class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])): def resnetse_v1_block(scope, base_depth, num_units, stride, rate=1, unit_fn=None): return Block(scope, unit_fn, [{ 'depth': base_depth * 4, 'depth_bottleneck': base_depth, 'stride': stride, 'rate': rate }] + [{ 'depth': base_depth * 4, 'depth_bottleneck': base_depth, 'stride': 1, 'rate': rate }] * (num_units - 1))
MIT License
deepmipt/intent_classifier
intent_model/multiclass.py
KerasMulticlassModel.infer_on_batch
python
def infer_on_batch(self, batch, labels=None): texts = batch if labels: features = self.texts2vec(texts) onehot_labels = labels2onehot(labels, classes=self.classes) metrics_values = self.model.test_on_batch(features, onehot_labels.reshape(-1, self.n_classes)) return metrics_values else: features = self.texts2vec(texts) predictions = self.model.predict(features) return predictions
Method infers the model on the given batch Args: batch - list of texts labels - list of labels Returns: loss and metrics values on the given batch, if labels are given predictions, otherwise
https://github.com/deepmipt/intent_classifier/blob/3192644110f0ff1b032eab448ac3197ef52326a7/intent_model/multiclass.py#L142-L162
import json import copy import sys from pathlib import Path import numpy as np import keras.metrics import keras.optimizers import tensorflow as tf from keras.backend.tensorflow_backend import set_session from keras.models import Model from keras.layers import Dense, Input, concatenate, Activation from keras.layers.pooling import GlobalMaxPooling1D, MaxPooling1D from keras.layers.convolutional import Conv1D from keras.layers.core import Dropout from keras.layers.normalization import BatchNormalization from keras.regularizers import l2 from intent_model.embedding_inferable import EmbeddingInferableModel from intent_model import metrics as metrics_file from intent_model.utils import labels2onehot, log_metrics config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.visible_device_list = '0' set_session(tf.Session(config=config)) class KerasMulticlassModel(object): def __init__(self, opt, *args, **kwargs): self.opt = copy.deepcopy(opt) self.model_path_ = Path(self.opt["model_path"]) self.confident_threshold = self.opt['confident_threshold'] if 'add_metrics' in self.opt.keys(): self.add_metrics = self.opt['add_metrics'].split(' ') self.add_metrics_values = len(self.add_metrics) * [0.] else: self.add_metrics = None self.opt["module"] = opt.get("module") if self.opt['fasttext_model'] is not None: if Path(self.opt['fasttext_model']).is_file(): self.fasttext_model = EmbeddingInferableModel(embedding_fname=self.opt['fasttext_model'], embedding_dim=self.opt['embedding_size'], module=self.opt["module"]) else: self.fasttext_model = EmbeddingInferableModel(embedding_fname=self.opt['fasttext_model'], embedding_dim=self.opt['embedding_size'], embedding_url='http://lnsigo.mipt.ru/export/embeddings/reddit_fasttext_model.bin', module=self.opt["module"]) else: raise IOError("Error: FastText intent_model file path is not given") if self.opt['model_from_saved']: self.model = self.load(model_name=self.opt['model_name'], fname=self.model_path_, optimizer_name=self.opt['optimizer'], lr=self.opt['lear_rate'], decay=self.opt['lear_rate_decay'], loss_name=self.opt['loss'], metrics_names=self.opt['lear_metrics'], add_metrics_file=metrics_file) else: self.classes = np.array(self.opt['classes'].split(" ")) self.n_classes = self.classes.shape[0] self.model = self.init_model_from_scratch(model_name=self.opt['model_name'], optimizer_name=self.opt['optimizer'], lr=self.opt['lear_rate'], decay=self.opt['lear_rate_decay'], loss_name=self.opt['loss'], metrics_names=self.opt['lear_metrics'], add_metrics_file=metrics_file) self.metrics_names = self.model.metrics_names self.metrics_values = len(self.metrics_names) * [0.] def texts2vec(self, sentences): embeddings_batch = [] for sen in sentences: embeddings = [] tokens = sen.split(' ') tokens = [el for el in tokens if el != ''] if len(tokens) > self.opt['text_size']: tokens = tokens[:self.opt['text_size']] for tok in tokens: embeddings.append(self.fasttext_model.infer(tok)) if len(tokens) < self.opt['text_size']: pads = [np.zeros(self.opt['embedding_size']) for _ in range(self.opt['text_size'] - len(tokens))] embeddings = pads + embeddings embeddings = np.asarray(embeddings) embeddings_batch.append(embeddings) embeddings_batch = np.asarray(embeddings_batch) return embeddings_batch def train_on_batch(self, batch): texts = list(batch[0]) labels = list(batch[1]) features = self.texts2vec(texts) onehot_labels = labels2onehot(labels, classes=self.classes) metrics_values = self.model.train_on_batch(features, onehot_labels) return metrics_values
Apache License 2.0
mrcagney/kml2geojson
kml2geojson/main.py
build_feature_collection
python
def build_feature_collection(node, name=None): geojson = { 'type': 'FeatureCollection', 'features': [], } for placemark in get(node, 'Placemark'): feature = build_feature(placemark) if feature is not None: geojson['features'].append(feature) if name is not None: geojson['name'] = name return geojson
Build and return a (decoded) GeoJSON FeatureCollection corresponding to this KML DOM node (typically a KML Folder). If a name is given, store it in the FeatureCollection's ``'name'`` attribute.
https://github.com/mrcagney/kml2geojson/blob/ac571420262dd2ed88fbaa24cca22a51f37d6684/kml2geojson/main.py#L465-L486
import shutil import xml.dom.minidom as md import re import pathlib as pl import json GEOTYPES = [ 'Polygon', 'LineString', 'Point', 'Track', 'gx:Track', ] STYLE_TYPES = [ 'svg', 'leaflet', ] SPACE = re.compile(r'\s+') def get(node, name): return node.getElementsByTagName(name) def get1(node, name): s = get(node, name) if s: return s[0] else: return None def attr(node, name): return node.getAttribute(name) def val(node): try: node.normalize() return node.firstChild.wholeText.strip() except AttributeError: return '' def valf(node): try: return float(val(node)) except ValueError: return None def numarray(a): return [float(aa) for aa in a] def coords1(s): return numarray(re.sub(SPACE, '', s).split(',')) def coords(s): s = s.split() return [coords1(ss) for ss in s] def gx_coords1(s): return numarray(s.split(' ')) def gx_coords(node): els = get(node, 'gx:coord') coordinates = [] times = [] coordinates = [gx_coords1(val(el)) for el in els] time_els = get(node, 'when') times = [val(t) for t in time_els] return { 'coordinates': coordinates, 'times': times, } def disambiguate(names, mark='1'): names_seen = set() new_names = [] for name in names: new_name = name while new_name in names_seen: new_name += mark new_names.append(new_name) names_seen.add(new_name) return new_names def to_filename(s): s = re.sub(r'(?u)[^-\w. ]', '', s) s = s.strip().replace(' ', '_') return s def build_rgb_and_opacity(s): color = '000000' opacity = 1 if s.startswith('#'): s = s[1:] if len(s) == 8: color = s[6:8] + s[4:6] + s[2:4] opacity = round(int(s[0:2], 16)/256, 2) elif len(s) == 6: color = s[4:6] + s[2:4] + s[0:2] elif len(s) == 3: color = s[::-1] return '#' + color, opacity def build_svg_style(node): d = {} for item in get(node, 'Style'): style_id = '#' + attr(item, 'id') props = {} for x in get(item, 'PolyStyle'): color = val(get1(x, 'color')) if color: rgb, opacity = build_rgb_and_opacity(color) props['fill'] = rgb props['fill-opacity'] = opacity props['stroke'] = rgb props['stroke-opacity'] = opacity props['stroke-width'] = 1 fill = valf(get1(x, 'fill')) if fill == 0: props['fill-opacity'] = fill elif fill == 1 and 'fill-opacity' not in props: props['fill-opacity'] = fill outline = valf(get1(x, 'outline')) if outline == 0: props['stroke-opacity'] = outline elif outline == 1 and 'stroke-opacity' not in props: props['stroke-opacity'] = outline for x in get(item, 'LineStyle'): color = val(get1(x, 'color')) if color: rgb, opacity = build_rgb_and_opacity(color) props['stroke'] = rgb props['stroke-opacity'] = opacity width = valf(get1(x, 'width')) if width is not None: props['stroke-width'] = width for x in get(item, 'IconStyle'): icon = get1(x, 'Icon') if not icon: continue props = {} props['iconUrl'] = val(get1(icon, 'href')) d[style_id] = props return d def build_leaflet_style(node): d = {} for item in get(node, 'Style'): style_id = '#' + attr(item, 'id') props = {} for x in get(item, 'PolyStyle'): color = val(get1(x, 'color')) if color: rgb, opacity = build_rgb_and_opacity(color) props['fillColor'] = rgb props['fillOpacity'] = opacity props['color'] = rgb props['opacity'] = opacity props['weight'] = 1 fill = valf(get1(x, 'fill')) if fill == 0: props['fillOpacity'] = fill elif fill == 1 and 'fillOpacity' not in props: props['fillOpacity'] = fill outline = valf(get1(x, 'outline')) if outline == 0: props['opacity'] = outline elif outline == 1 and 'opacity' not in props: props['opacity'] = outline for x in get(item, 'LineStyle'): color = val(get1(x, 'color')) if color: rgb, opacity = build_rgb_and_opacity(color) props['color'] = rgb props['opacity'] = opacity width = valf(get1(x, 'width')) if width is not None: props['weight'] = width for x in get(item, 'IconStyle'): icon = get1(x, 'Icon') if not icon: continue props = {} props['iconUrl'] = val(get1(icon, 'href')) d[style_id] = props return d def build_geometry(node): geoms = [] times = [] if get1(node, 'MultiGeometry'): return build_geometry(get1(node, 'MultiGeometry')) if get1(node, 'MultiTrack'): return build_geometry(get1(node, 'MultiTrack')) if get1(node, 'gx:MultiTrack'): return build_geometry(get1(node, 'gx:MultiTrack')) for geotype in GEOTYPES: geonodes = get(node, geotype) if not geonodes: continue for geonode in geonodes: if geotype == 'Point': geoms.append({ 'type': 'Point', 'coordinates': coords1(val(get1( geonode, 'coordinates'))) }) elif geotype == 'LineString': geoms.append({ 'type': 'LineString', 'coordinates': coords(val(get1( geonode, 'coordinates'))) }) elif geotype == 'Polygon': rings = get(geonode, 'LinearRing') coordinates = [coords(val(get1(ring, 'coordinates'))) for ring in rings] geoms.append({ 'type': 'Polygon', 'coordinates': coordinates, }) elif geotype in ['Track', 'gx:Track']: track = gx_coords(geonode) geoms.append({ 'type': 'LineString', 'coordinates': track['coordinates'], }) if track['times']: times.append(track['times']) return {'geoms': geoms, 'times': times} def build_feature(node): geoms_and_times = build_geometry(node) if not geoms_and_times['geoms']: return None props = {} for x in get(node, 'name')[:1]: name = val(x) if name: props['name'] = val(x) for x in get(node, 'description')[:1]: desc = val(x) if desc: props['description'] = desc for x in get(node, 'styleUrl')[:1]: style_url = val(x) if style_url[0] != '#': style_url = '#' + style_url props['styleUrl'] = style_url for x in get(node, 'PolyStyle')[:1]: color = val(get1(x, 'color')) if color: rgb, opacity = build_rgb_and_opacity(color) props['fill'] = rgb props['fill-opacity'] = opacity props['stroke'] = rgb props['stroke-opacity'] = opacity props['stroke-width'] = 1 fill = valf(get1(x, 'fill')) if fill == 0: props['fill-opacity'] = fill elif fill == 1 and 'fill-opacity' not in props: props['fill-opacity'] = fill outline = valf(get1(x, 'outline')) if outline == 0: props['stroke-opacity'] = outline elif outline == 1 and 'stroke-opacity' not in props: props['stroke-opacity'] = outline for x in get(node, 'LineStyle')[:1]: color = val(get1(x, 'color')) if color: rgb, opacity = build_rgb_and_opacity(color) props['stroke'] = rgb props['stroke-opacity'] = opacity width = valf(get1(x, 'width')) if width: props['stroke-width'] = width for x in get(node, 'ExtendedData')[:1]: datas = get(x, 'Data') for data in datas: props[attr(data, 'name')] = val(get1(data, 'value')) simple_datas = get(x, 'SimpleData') for simple_data in simple_datas: props[attr(simple_data, 'name')] = val(simple_data) for x in get(node, 'TimeSpan')[:1]: begin = val(get1(x, 'begin')) end = val(get1(x, 'end')) props['timeSpan'] = {'begin': begin, 'end': end} if geoms_and_times['times']: times = geoms_and_times['times'] if len(times) == 1: props['times'] = times[0] else: props['times'] = times feature = { 'type': 'Feature', 'properties': props, } geoms = geoms_and_times['geoms'] if len(geoms) == 1: feature['geometry'] = geoms[0] else: feature['geometry'] = { 'type': 'GeometryCollection', 'geometries': geoms, } if attr(node, 'id'): feature['id'] = attr(node, 'id') return feature
MIT License
jiaweisheng/faan
modules.py
ScaledDotProductAttention.forward
python
def forward(self, q, k, v, scale=None, attn_mask=None): attn = torch.bmm(q, k.transpose(1, 2)) if scale: attn = attn * scale if attn_mask: attn = attn.masked_fill_(attn_mask, -np.inf) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn
:param attn_mask: [batch, time] :param scale: :param q: [batch, time, dim] :param k: [batch, time, dim] :param v: [batch, time, dim] :return:
https://github.com/jiaweisheng/faan/blob/b439b829506c4e2e9044a6b2ab7f3d844f445a95/modules.py#L72-L89
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init import math from torch.autograd import Variable class AttentionSelectContext(nn.Module): def __init__(self, dim, dropout=0.0): super(AttentionSelectContext, self).__init__() self.Bilinear = nn.Bilinear(dim, dim, 1, bias=False) self.Linear_tail = nn.Linear(dim, dim, bias=False) self.Linear_head = nn.Linear(dim, dim, bias=False) self.layer_norm = nn.LayerNorm(dim) self.dropout = nn.Dropout(dropout) def intra_attention(self, head, rel, tail, mask): head = head.unsqueeze(1).repeat(1, rel.size(1), 1) score = self.Bilinear(head, rel).squeeze(2) score = score.masked_fill_(mask, -np.inf) att = torch.softmax(score, dim=1).unsqueeze(dim=1) head = torch.bmm(att, tail).squeeze(1) return head def forward(self, left, right, mask_left=None, mask_right=None): head_left, rel_left, tail_left = left head_right, rel_right, tail_right = right weak_rel = head_right - head_left left = self.intra_attention(weak_rel, rel_left, tail_left, mask_left) right = self.intra_attention(weak_rel, rel_right, tail_right, mask_right) left = torch.relu(self.Linear_tail(left) + self.Linear_head(head_left)) right = torch.relu(self.Linear_tail(right) + self.Linear_head(head_right)) left = self.dropout(left) right = self.dropout(right) left = self.layer_norm(left + head_left) right = self.layer_norm(right + head_right) return left, right class ScaledDotProductAttention(nn.Module): def __init__(self, attn_dropout=0.0): super(ScaledDotProductAttention, self).__init__() self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2)
MIT License
chrism2671/pytrendfollow
trading/portfolio.py
Portfolio.instrument_stats
python
def instrument_stats(self): with closing(Pool()) as pool: df = pd.DataFrame(dict(pool.map(lambda x: (x.name, x.curve().stats_list()), self.valid_instruments().values()))).transpose() return df
Returns individual metrics for every Instrument in the Portfolio. Not used for trading, just for research.
https://github.com/chrism2671/pytrendfollow/blob/439232aed4d67ccf339e782b7c8b96f2dd961d43/trading/portfolio.py#L80-L87
import numpy as np import pandas as pd from functools import lru_cache import sys try: import config.strategy except ImportError: print("You need to set up the strategy file at config/strategy.py.") sys.exit() try: import config.settings except ImportError: print("You need to set up the settings file at config/settings.py.") sys.exit() from core.instrument import Instrument from core.utility import draw_sample, sharpe from trading.accountcurve import accountCurve import trading.bootstrap_portfolio as bp import seaborn import pyprind from multiprocessing_on_dill import Pool from contextlib import closing from core.logger import get_logger logger = get_logger('portfolio') class Portfolio(object): def __init__(self, weights=1, instruments=None): self.instruments = Instrument.load(instruments) self.weights = pd.Series(config.strategy.portfolio_weights) self.weights = self.weights[self.weights.index.isin(instruments)] self.inst_blacklist = [] def __repr__(self): return str(len(self.valid_instruments())) + " instruments" @lru_cache(maxsize=8) def curve(self, **kw): kw2={'portfolio_weights': self.valid_weights()} kw2.update(kw) return accountCurve(list(self.valid_instruments().values()), **kw2) def valid_instruments(self): return dict([i for i in self.instruments.items() if i[1].name not in self.inst_blacklist]) def valid_weights(self): return self.weights[~self.weights.index.isin(self.inst_blacklist)] def validate(self): import concurrent.futures bar = pyprind.ProgBar(len(self.instruments.values()), title='Validating instruments') with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: dl = {executor.submit(x.validate): x.name for x in self.instruments.values()} d = {} for fut in concurrent.futures.as_completed(dl): bar.update(item_id=dl[fut]) d[dl[fut]] = fut.result() d = pd.DataFrame(d).transpose() self.inst_blacklist = d[d['is_valid'] == False].index.tolist() return d
MIT License
operator-framework/operator-courier
operatorcourier/verified_manifest.py
VerifiedManifest.get_validation_dict_from_manifests
python
def get_validation_dict_from_manifests(self, manifests, ui_validate_io=False, repository=None): bundle_dict = None validation_dict = ValidateCmd(ui_validate_io).validation_json for version, manifest_files_info in manifests.items(): bundle_dict = BuildCmd().build_bundle(manifest_files_info) if version != FLAT_KEY: logger.info("Parsing version: %s", version) _, validation_dict_temp = ValidateCmd(ui_validate_io, self.nested) .validate(bundle_dict, repository) for log_level, msg_list in validation_dict_temp.items(): validation_dict[log_level].extend(msg_list) if not self.nested: self.bundle_dict = bundle_dict return validation_dict
Given a dict of manifest files where the key is the version of the manifest (or FLAT_KEY if the manifest files are not grouped by version), the function returns a dict containing validation info (warnings/errors). :param manifests: a dict of manifest files where the key is the version of the manifest (or FLAT_KEY if the manifest files are not grouped by version) :param ui_validate_io: the ui_validate_io flag specified from CLI :param repository: the repository value specified from CLI :return: a dict containing validation info (warnings/errors).
https://github.com/operator-framework/operator-courier/blob/4f1a4ea3db3ba901c0c492f346ffa3cdc1102649/operatorcourier/verified_manifest.py#L105-L133
import os import copy import logging import json from operatorcourier.build import BuildCmd from operatorcourier.validate import ValidateCmd from operatorcourier.errors import OpCourierBadBundle from operatorcourier.format import format_bundle from operatorcourier.manifest_parser import is_manifest_folder, get_csvs_pkg_info_from_root, get_crd_csv_files_info logger = logging.getLogger(__name__) FLAT_KEY = '__flat__' class VerifiedManifest: @property def bundle(self): if self.nested: raise AttributeError('VerifiedManifest does not have the bundle property ' 'in nested cases.') return format_bundle(self.bundle_dict) @property def validation_dict(self): return copy.deepcopy(self.__validation_dict) def __init__(self, source_dir, yamls, ui_validate_io, repository): self.nested = False if yamls: yaml_strings_with_metadata = self._set_empty_filepaths(yamls) manifests = {FLAT_KEY: yaml_strings_with_metadata} else: manifests = self.get_manifests_info(source_dir) self.bundle_dict = None self.__validation_dict = self.get_validation_dict_from_manifests(manifests, ui_validate_io, repository) self.is_valid = False if self.__validation_dict['errors'] else True def _set_empty_filepaths(self, yamls): yaml_strings_with_metadata = [] for yaml_string in yamls: yaml_tuple = ("", yaml_string) yaml_strings_with_metadata.append(yaml_tuple) return yaml_strings_with_metadata def get_manifests_info(self, source_dir): manifests = {} root_path, dir_names, root_dir_files = next(os.walk(source_dir)) csvs_path_and_content, pkg_path_and_content = get_csvs_pkg_info_from_root(source_dir) dir_paths = [os.path.join(source_dir, dir_name) for dir_name in dir_names] manifest_paths = list(filter(lambda x: is_manifest_folder(x), dir_paths)) if manifest_paths: logger.info('The source directory is in nested structure.') self.nested = True for manifest_path in manifest_paths: manifest_dir_name = os.path.basename(manifest_path) crd_files_info, csv_files_info = get_crd_csv_files_info(manifest_path) manifests[manifest_dir_name] = crd_files_info + csv_files_info for manifest_dir_name in manifests: manifests[manifest_dir_name].append(pkg_path_and_content) elif pkg_path_and_content and csvs_path_and_content: logger.info('The source directory is in flat structure.') crd_files_info, csv_files_info = get_crd_csv_files_info(root_path) files_info = [pkg_path_and_content] files_info.extend(crd_files_info + csv_files_info) manifests[FLAT_KEY] = files_info else: msg = 'The source directory structure is not in valid flat or nested format,' 'because no valid CSV file is found in root or manifest directories.' logger.error(msg) raise OpCourierBadBundle(msg, {}) return manifests
Apache License 2.0
rajammanabrolu/worldgeneration
evennia-engine/evennia/evennia/server/portal/webclient_ajax.py
AjaxWebClientSession.data_out
python
def data_out(self, **kwargs): self.sessionhandler.data_out(self, **kwargs)
Data Evennia -> User Kwargs: kwargs (any): Options to the protocol
https://github.com/rajammanabrolu/worldgeneration/blob/5e97df013399e1a401d0a7ec184c4b9eb3100edd/evennia-engine/evennia/evennia/server/portal/webclient_ajax.py#L381-L388
import json import re import time import html from twisted.web import server, resource from twisted.internet.task import LoopingCall from django.utils.functional import Promise from django.conf import settings from evennia.utils.ansi import parse_ansi from evennia.utils import utils from evennia.utils.utils import to_bytes, to_str from evennia.utils.text2html import parse_html from evennia.server import session _CLIENT_SESSIONS = utils.mod_import(settings.SESSION_ENGINE).SessionStore _RE_SCREENREADER_REGEX = re.compile( r"%s" % settings.SCREENREADER_REGEX_STRIP, re.DOTALL + re.MULTILINE ) _SERVERNAME = settings.SERVERNAME _KEEPALIVE = 30 class LazyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, Promise): return str(obj) return super().default(obj) def jsonify(obj): return to_bytes(json.dumps(obj, ensure_ascii=False, cls=LazyEncoder)) class AjaxWebClient(resource.Resource): isLeaf = True allowedMethods = ("POST",) def __init__(self): self.requests = {} self.databuffer = {} self.last_alive = {} self.keep_alive = None def _responseFailed(self, failure, csessid, request): try: del self.requests[csessid] except KeyError: pass def _keepalive(self): now = time.time() to_remove = [] keep_alives = ( (csessid, remove) for csessid, (t, remove) in self.last_alive.items() if now - t > _KEEPALIVE ) for csessid, remove in keep_alives: if remove: to_remove.append(csessid) else: self.last_alive[csessid] = (now, True) self.lineSend(csessid, ["ajax_keepalive", [], {}]) for csessid in to_remove: sessions = self.sessionhandler.sessions_from_csessid(csessid) for sess in sessions: sess.disconnect() self.last_alive.pop(csessid, None) if not self.last_alive: self.keep_alive.stop() self.keep_alive = None def get_client_sessid(self, request): return html.escape(request.args[b"csessid"][0].decode("utf-8")) def at_login(self): pass def lineSend(self, csessid, data): request = self.requests.get(csessid) if request: request.write(jsonify(data)) request.finish() del self.requests[csessid] else: dataentries = self.databuffer.get(csessid, []) dataentries.append(jsonify(data)) self.databuffer[csessid] = dataentries def client_disconnect(self, csessid): if csessid in self.requests: self.requests[csessid].finish() del self.requests[csessid] if csessid in self.databuffer: del self.databuffer[csessid] def mode_init(self, request): csessid = self.get_client_sessid(request) remote_addr = request.getClientIP() host_string = "%s (%s:%s)" % ( _SERVERNAME, request.getRequestHostname(), request.getHost().port, ) sess = AjaxWebClientSession() sess.client = self sess.init_session("ajax/comet", remote_addr, self.sessionhandler) sess.csessid = csessid csession = _CLIENT_SESSIONS(session_key=sess.csessid) uid = csession and csession.get("webclient_authenticated_uid", False) if uid: sess.uid = uid sess.logged_in = True self.last_alive[csessid] = (time.time(), False) if not self.keep_alive: self.keep_alive = LoopingCall(self._keepalive) self.keep_alive.start(_KEEPALIVE, now=False) sess.sessionhandler.connect(sess) return jsonify({"msg": host_string, "csessid": csessid}) def mode_keepalive(self, request): csessid = self.get_client_sessid(request) self.last_alive[csessid] = (time.time(), False) return b'""' def mode_input(self, request): csessid = self.get_client_sessid(request) self.last_alive[csessid] = (time.time(), False) cmdarray = json.loads(request.args.get(b"data")[0]) for sess in self.sessionhandler.sessions_from_csessid(csessid): sess.data_in(**{cmdarray[0]: [cmdarray[1], cmdarray[2]]}) return b'""' def mode_receive(self, request): csessid = html.escape(request.args[b"csessid"][0].decode("utf-8")) self.last_alive[csessid] = (time.time(), False) dataentries = self.databuffer.get(csessid) if dataentries: return dataentries.pop(0) else: request.notifyFinish().addErrback(self._responseFailed, csessid, request) if csessid in self.requests: self.requests[csessid].finish() self.requests[csessid] = request return server.NOT_DONE_YET def mode_close(self, request): csessid = self.get_client_sessid(request) try: sess = self.sessionhandler.sessions_from_csessid(csessid)[0] sess.sessionhandler.disconnect(sess) except IndexError: self.client_disconnect(csessid) return b'""' def render_POST(self, request): dmode = request.args.get(b"mode", [b"None"])[0].decode("utf-8") if dmode == "init": return self.mode_init(request) elif dmode == "input": return self.mode_input(request) elif dmode == "receive": return self.mode_receive(request) elif dmode == "close": return self.mode_close(request) elif dmode == "keepalive": return self.mode_keepalive(request) else: return b'""' class AjaxWebClientSession(session.Session): def __init__(self, *args, **kwargs): self.protocol_key = "webclient/ajax" super().__init__(*args, **kwargs) def get_client_session(self): if self.csessid: return _CLIENT_SESSIONS(session_key=self.csessid) def disconnect(self, reason="Server disconnected."): csession = self.get_client_session() if csession: csession["webclient_authenticated_uid"] = None csession.save() self.logged_in = False self.client.lineSend(self.csessid, ["connection_close", [reason], {}]) self.client.client_disconnect(self.csessid) self.sessionhandler.disconnect(self) def at_login(self): csession = self.get_client_session() if csession: csession["webclient_authenticated_uid"] = self.uid csession.save() def data_in(self, **kwargs): self.sessionhandler.data_in(self, **kwargs)
MIT License
compas-dev/compas
src/compas/files/_xml/xml_cli.py
CLRXMLTreeParser._start_element
python
def _start_element(self, reader): attributes = {} name = self._get_expanded_tag(reader) while reader.MoveToNextAttribute(): attributes[reader.Name] = reader.Value reader.MoveToElement() self._target.start(name, attributes) if reader.IsEmptyElement: self._target.end(name)
Notify the tree builder that a start element has been encountered.
https://github.com/compas-dev/compas/blob/d795a8bfe9f21ffa124d09e37e9c0ed2e3520057/src/compas/files/_xml/xml_cli.py#L208-L221
from __future__ import absolute_import from __future__ import division from __future__ import print_function import xml.etree.ElementTree as ET from urllib import addinfourl import compas from compas.files._xml.xml_shared import shared_xml_from_file from compas.files._xml.xml_shared import shared_xml_from_string __all__ = [ 'xml_from_file', 'xml_from_string', 'prettify_string', ] if compas.IPY: import clr clr.AddReference('System.Xml') from System.IO import MemoryStream from System.IO import StreamReader from System.IO import StringReader from System.Text import Encoding from System.Text.RegularExpressions import Regex from System.Text.RegularExpressions import RegexOptions from System.Xml import DtdProcessing from System.Xml import Formatting from System.Xml import ValidationType from System.Xml import XmlDocument from System.Xml import XmlNodeType from System.Xml import XmlReader from System.Xml import XmlReaderSettings from System.Xml import XmlTextWriter CRE_ENCODING = Regex("encoding=['\"](?<enc_name>.*?)['\"]", RegexOptions.Compiled) def prettify_string(rough_string): mStream = MemoryStream() writer = XmlTextWriter(mStream, Encoding.UTF8) document = XmlDocument() document.LoadXml(rough_string) writer.Formatting = Formatting.Indented writer.WriteStartDocument() document.WriteContentTo(writer) writer.Flush() mStream.Flush() mStream.Position = 0 sReader = StreamReader(mStream) formattedXml = sReader.ReadToEnd() return formattedXml def xml_from_file(source, tree_parser=None): tree_parser = tree_parser or CLRXMLTreeParser return shared_xml_from_file(source, tree_parser, addinfourl) def xml_from_string(text, tree_parser=None): tree_parser = tree_parser or CLRXMLTreeParser return shared_xml_from_string(text, tree_parser) class CLRXMLTreeParser(ET.XMLParser): def __init__(self, target=None, validating=False): if not compas.IPY: raise Exception('CLRXMLTreeParser can only be used from IronPython') settings = XmlReaderSettings() settings.IgnoreComments = True settings.IgnoreProcessingInstructions = True settings.IgnoreWhitespace = True if not validating: settings.DtdProcessing = DtdProcessing.Ignore settings.ValidationType = getattr(ValidationType, 'None') else: settings.DtdProcessing = DtdProcessing.Parse settings.ValidationType = ValidationType.DTD self.settings = settings self._target = target or ET.TreeBuilder() self._buffer = [] self._document_encoding = 'UTF-8' def feed(self, data): self._buffer.append(data) def close(self): xml_string = "".join(self._buffer) self._buffer = None reader = XmlReader.Create(StringReader(xml_string), self.settings) while reader.Read(): if reader.IsStartElement(): self._start_element(reader) elif reader.NodeType in [XmlNodeType.Text, XmlNodeType.CDATA]: self._target.data(reader.Value.decode(self._document_encoding)) elif reader.NodeType == XmlNodeType.EndElement: self._target.end(self._get_expanded_tag(reader)) elif reader.NodeType == XmlNodeType.XmlDeclaration: self._parse_xml_declaration(reader.Value) return self._target.close() def _get_expanded_tag(self, reader): if not reader.NamespaceURI: return reader.LocalName return '{{{}}}{}'.format(reader.NamespaceURI, reader.LocalName) def _parse_xml_declaration(self, xml_decl): enc_name = CRE_ENCODING.Match(xml_decl).Groups['enc_name'].Value if enc_name: self._document_encoding = enc_name
MIT License
edinburgh-genome-foundry/dnafeaturesviewer
dna_features_viewer/BiopythonTranslator/BiopythonTranslatorBase.py
BiopythonTranslatorBase.translate_feature
python
def translate_feature(self, feature): properties = dict( label=self.compute_feature_label(feature), color=self.compute_feature_color(feature), html=self.compute_feature_html(feature), fontdict=self.compute_feature_fontdict(feature), box_linewidth=self.compute_feature_box_linewidth(feature), box_color=self.compute_feature_box_color(feature), linewidth=self.compute_feature_linewidth(feature), label_link_color=self.compute_feature_label_link_color(feature), legend_text=self.compute_feature_legend_text(feature), ) if self.features_properties is not None: other_properties = self.features_properties if hasattr(other_properties, "__call__"): other_properties = other_properties(feature) properties.update(other_properties) return GraphicFeature( start=feature.location.start, end=feature.location.end, strand=feature.location.strand, **properties )
Translate a Biopython feature into a Dna Features Viewer feature.
https://github.com/edinburgh-genome-foundry/dnafeaturesviewer/blob/1833d9e428713e02cb9c25c1d867d020ad50aec9/dna_features_viewer/BiopythonTranslator/BiopythonTranslatorBase.py#L32-L56
from ..biotools import load_record from ..GraphicRecord import GraphicRecord from ..CircularGraphicRecord import CircularGraphicRecord from ..GraphicFeature import GraphicFeature class BiopythonTranslatorBase: graphic_record_parameters = {} def __init__(self, features_filters=(), features_properties=None): self.features_filters = features_filters self.features_properties = features_properties
MIT License
leokarlin/laso
oneshot/setops_models/resnet.py
resnet50_ids_pre_v3
python
def resnet50_ids_pre_v3( num_attributes: int=40, ids_embedding_size: int=512, latent_dim: int=1024, attr_layer_num: int=0, ids_layer_num: int=0, dropout_ratio: float=0, pretrained: bool=False, **kwargs): model = zoo.resnet50(pretrained=pretrained) if pretrained: for param in model.parameters(): param.requires_grad = False model = nn.Sequential(*list(model.children())[:-2]) input_dim = 512 * Bottleneck.expansion classifier_attr = AttrsClassifier( input_dim=512 * Bottleneck.expansion, num_attributes=num_attributes, latent_dim=latent_dim, layers_num=attr_layer_num, dropout_ratio=dropout_ratio, apply_spatial=True, **kwargs) embedding_ids = IDsEmbedding( input_dim=512 * Bottleneck.expansion, embedding_size=ids_embedding_size, latent_dim=latent_dim, layers_num=ids_layer_num, dropout_ratio=dropout_ratio, apply_avgpool=True, ** kwargs) return model, classifier_attr, embedding_ids
Constructs a ResNet-34 model.
https://github.com/leokarlin/laso/blob/8941bdc9316361ad03dbc2bcabd4bf9922c0ecc7/oneshot/setops_models/resnet.py#L418-L458
import math import torch.nn as nn from torchvision.models.resnet import BasicBlock, Bottleneck, model_urls import torchvision.models as zoo import torch.utils.model_zoo as model_zoo class ResNet(nn.Module): def __init__(self, block, layers, avgpool_kernel=7, **kwargs): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(avgpool_kernel, stride=1) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) return x class ResNetClassifier(nn.Module): def __init__(self, block, num_classes=1000, **kwargs): super(ResNetClassifier, self).__init__() self.fc = nn.Linear(512 * block.expansion, num_classes) def forward(self, x): x = x.view(x.size(0), -1) return self.fc(x) def resnet18(pretrained=False, **kwargs): model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) classifier = ResNetClassifier(BasicBlock, **kwargs) if pretrained: state_dict = model_zoo.load_url(model_urls['resnet18']) model.load_state_dict( {k: v for k, v in state_dict.items() if k in model.state_dict()} ) return model, classifier def resnet18_ids(num_attributes, ids_embedding_size, pretrained=False, **kwargs): model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) classifier = ResNetClassifier(BasicBlock, num_classes=num_attributes, **kwargs) classifier_ids = ResNetClassifier(BasicBlock, num_classes=ids_embedding_size, **kwargs) if pretrained: state_dict = model_zoo.load_url(model_urls['resnet18']) model.load_state_dict( {k: v for k, v in state_dict.items() if k in model.state_dict()} ) return model, classifier, classifier_ids def resnet34(pretrained=False, **kwargs): model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) classifier = ResNetClassifier(BasicBlock, **kwargs) if pretrained: state_dict = model_zoo.load_url(model_urls['resnet34']) model.load_state_dict( {k: v for k, v in state_dict.items() if k in model.state_dict()} ) return model, classifier def resnet50(pretrained=False, **kwargs): model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) classifier = ResNetClassifier(Bottleneck, **kwargs) if pretrained: state_dict = model_zoo.load_url(model_urls['resnet50']) model.load_state_dict( {k: v for k, v in state_dict.items() if k in model.state_dict()} ) return model, classifier def resnet101(pretrained=False, **kwargs): model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) classifier = ResNetClassifier(Bottleneck, **kwargs) if pretrained: state_dict = model_zoo.load_url(model_urls['resnet101']) model.load_state_dict( {k: v for k, v in state_dict.items() if k in model.state_dict()} ) return model, classifier def resnet152(pretrained=False, **kwargs): model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) classifier = ResNetClassifier(Bottleneck, **kwargs) if pretrained: state_dict = model_zoo.load_url(model_urls['resnet152']) model.load_state_dict( {k: v for k, v in state_dict.items() if k in model.state_dict()} ) return model, classifier def resnet34_ids(num_attributes, ids_embedding_size, pretrained=False, **kwargs): model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) classifier = ResNetClassifier(BasicBlock, num_classes=num_attributes, **kwargs) classifier_ids = ResNetClassifier(BasicBlock, num_classes=ids_embedding_size, **kwargs) if pretrained: raise NotImplemented("pretrained parameter not implemented.") return model, classifier, classifier_ids def resnet34_v2( num_attributes: int=40, latent_dim: int=1024, attr_layer_num: int=0, dropout_ratio: float=0, pretrained: bool=False, **kwargs): model = zoo.resnet34(pretrained=pretrained) if pretrained: for param in model.parameters(): param.requires_grad = False model = nn.Sequential(*list(model.children())[:-1]) input_dim = 512 * BasicBlock.expansion classifier_attr = AttrsClassifier_v2( input_dim=512 * BasicBlock.expansion, num_attributes=num_attributes, latent_dim=latent_dim, layers_num=attr_layer_num, dropout_ratio=dropout_ratio, **kwargs) return model, classifier_attr def resnet34_ids_pre(num_attributes, ids_embedding_size, **kwargs): model = zoo.resnet34(pretrained=True) for param in model.parameters(): param.requires_grad = False model = nn.Sequential(*list(model.children())[:-1]) classifier_attr = ResNetClassifier(BasicBlock, num_classes=num_attributes, **kwargs) classifier_ids = ResNetClassifier(BasicBlock, num_classes=ids_embedding_size, **kwargs) return model, classifier_attr, classifier_ids def resnet34_ids_pre_v2( num_attributes: int, ids_embedding_size: int, latent_dim: int=1024, attr_layer_num: int=0, ids_layer_num: int=0, dropout_ratio: float=0, pretrained: bool=True, **kwargs): model = zoo.resnet34(pretrained=True) if pretrained: for param in model.parameters(): param.requires_grad = False model = nn.Sequential(*list(model.children())[:-1]) input_dim = 512 * BasicBlock.expansion classifier_attr = TopLayer( input_dim=input_dim, latent_dim=latent_dim, output_dim=num_attributes, layers_num=attr_layer_num, dropout_ratio=dropout_ratio, **kwargs) embedding_ids = TopLayer( input_dim=input_dim, latent_dim=latent_dim, output_dim=ids_embedding_size, layers_num=ids_layer_num, dropout_ratio=dropout_ratio, ** kwargs) return model, classifier_attr, embedding_ids def resnet18_ids_pre_v2( num_attributes: int, ids_embedding_size: int, latent_dim: int=1024, attr_layer_num: int=0, ids_layer_num: int=0, dropout_ratio: float=0, **kwargs): model = zoo.resnet18(pretrained=True) for param in model.parameters(): param.requires_grad = False model = nn.Sequential(*list(model.children())[:-1]) input_dim = 512 * BasicBlock.expansion classifier_attr = TopLayer( input_dim=input_dim, latent_dim=latent_dim, output_dim=num_attributes, layers_num=attr_layer_num, dropout_ratio=dropout_ratio, **kwargs) embedding_ids = TopLayer( input_dim=input_dim, latent_dim=latent_dim, output_dim=ids_embedding_size, layers_num=ids_layer_num, dropout_ratio=dropout_ratio, ** kwargs) return model, classifier_attr, embedding_ids def resnet18_ids_pre_v3( num_attributes: int=40, ids_embedding_size: int=512, latent_dim: int=1024, attr_layer_num: int=0, ids_layer_num: int=0, dropout_ratio: float=0, pretrained: bool=False, **kwargs): model = zoo.resnet18(pretrained=pretrained) if pretrained: for param in model.parameters(): param.requires_grad = False model = nn.Sequential(*list(model.children())[:-2]) input_dim = 512 * BasicBlock.expansion classifier_attr = AttrsClassifier( input_dim=512 * BasicBlock.expansion, num_attributes=num_attributes, latent_dim=latent_dim, layers_num=attr_layer_num, dropout_ratio=dropout_ratio, apply_spatial=True, **kwargs) embedding_ids = IDsEmbedding( input_dim=512 * BasicBlock.expansion, embedding_size=ids_embedding_size, latent_dim=latent_dim, layers_num=ids_layer_num, dropout_ratio=dropout_ratio, apply_avgpool=True, ** kwargs) return model, classifier_attr, embedding_ids def resnet34_ids_pre_v3( num_attributes: int=40, ids_embedding_size: int=512, latent_dim: int=1024, attr_layer_num: int=0, ids_layer_num: int=0, dropout_ratio: float=0, pretrained: bool=False, **kwargs): model = zoo.resnet34(pretrained=pretrained) if pretrained: for param in model.parameters(): param.requires_grad = False model = nn.Sequential(*list(model.children())[:-2]) input_dim = 512 * BasicBlock.expansion classifier_attr = AttrsClassifier( input_dim=512 * BasicBlock.expansion, num_attributes=num_attributes, latent_dim=latent_dim, layers_num=attr_layer_num, dropout_ratio=dropout_ratio, apply_spatial=True, **kwargs) embedding_ids = IDsEmbedding( input_dim=512 * BasicBlock.expansion, embedding_size=ids_embedding_size, latent_dim=latent_dim, layers_num=ids_layer_num, dropout_ratio=dropout_ratio, apply_avgpool=True, ** kwargs) return model, classifier_attr, embedding_ids
BSD 3-Clause New or Revised License
csernazs/pytest-httpserver
pytest_httpserver/httpserver.py
RequestHandler.respond_with_response
python
def respond_with_response(self, response: Response): self.request_handler = lambda request: response
Registers a respond handler function which responds the specified response object. :param response: the response object which will be responded
https://github.com/csernazs/pytest-httpserver/blob/e910ebadb2e434c1db74c00d3d0c21dda9910178/pytest_httpserver/httpserver.py#L499-L506
import queue import threading import json import time import re from collections import defaultdict from enum import Enum from contextlib import suppress, contextmanager from copy import copy from typing import Any, Callable, List, Mapping, Optional, Tuple, Union, Pattern from ssl import SSLContext import abc from werkzeug.http import parse_authorization_header from werkzeug.serving import make_server from werkzeug.wrappers import Request from werkzeug.wrappers import Response import werkzeug.urls from werkzeug.datastructures import MultiDict URI_DEFAULT = "" METHOD_ALL = "__ALL" class Undefined: def __repr__(self): return "<UNDEFINED>" UNDEFINED = Undefined() class Error(Exception): pass class NoHandlerError(Error): pass class HTTPServerError(Error): pass class NoMethodFoundForMatchingHeaderValueError(Error): pass class WaitingSettings: def __init__(self, raise_assertions: bool = True, stop_on_nohandler: bool = True, timeout: float = 5): self.raise_assertions = raise_assertions self.stop_on_nohandler = stop_on_nohandler self.timeout = timeout class Waiting: def __init__(self): self._result = None self._start = time.monotonic() self._stop = None def complete(self, result: bool): self._result = result self._stop = time.monotonic() @property def result(self) -> bool: return self._result @property def elapsed_time(self) -> float: return self._stop - self._start class HeaderValueMatcher: DEFAULT_MATCHERS: Mapping[str, Callable[[Optional[str], str], bool]] = {} def __init__(self, matchers: Optional[Mapping[str, Callable[[Optional[str], str], bool]]] = None): self.matchers = self.DEFAULT_MATCHERS if matchers is None else matchers @staticmethod def authorization_header_value_matcher(actual: Optional[str], expected: str) -> bool: return parse_authorization_header(actual) == parse_authorization_header(expected) @staticmethod def default_header_value_matcher(actual: Optional[str], expected: str) -> bool: return actual == expected def __call__(self, header_name: str, actual: Optional[str], expected: str) -> bool: try: matcher = self.matchers[header_name] except KeyError: raise NoMethodFoundForMatchingHeaderValueError( "No method found for matching header value: {}".format(header_name)) return matcher(actual, expected) HeaderValueMatcher.DEFAULT_MATCHERS = defaultdict( lambda: HeaderValueMatcher.default_header_value_matcher, {'Authorization': HeaderValueMatcher.authorization_header_value_matcher} ) class QueryMatcher(abc.ABC): def match(self, request_query_string: bytes) -> bool: values = self.get_comparing_values(request_query_string) return values[0] == values[1] @abc.abstractmethod def get_comparing_values(self, request_query_string: bytes) -> tuple: pass class StringQueryMatcher(QueryMatcher): def __init__(self, query_string: Union[bytes, str]): if not isinstance(query_string, (str, bytes)): raise TypeError("query_string must be a string, or a bytes-like object") self.query_string = query_string def get_comparing_values(self, request_query_string: bytes) -> tuple: if isinstance(self.query_string, str): query_string = self.query_string.encode() elif isinstance(self.query_string, bytes): query_string = self.query_string else: raise TypeError("query_string must be a string, or a bytes-like object") return (request_query_string, query_string) class MappingQueryMatcher(QueryMatcher): def __init__(self, query_dict: Union[Mapping, MultiDict]): self.query_dict = query_dict def get_comparing_values(self, request_query_string: bytes) -> tuple: query = werkzeug.urls.url_decode(request_query_string) if isinstance(self.query_dict, MultiDict): return (query, self.query_dict) else: return (query.to_dict(), dict(self.query_dict)) class BooleanQueryMatcher(QueryMatcher): def __init__(self, result: bool): self.result = result def get_comparing_values(self, request_query_string): if self.result: return (True, True) else: return (True, False) def _create_query_matcher(query_string: Union[None, QueryMatcher, str, bytes, Mapping]) -> QueryMatcher: if isinstance(query_string, QueryMatcher): return query_string if query_string is None: return BooleanQueryMatcher(True) if isinstance(query_string, (str, bytes)): return StringQueryMatcher(query_string) if isinstance(query_string, Mapping): return MappingQueryMatcher(query_string) raise TypeError("Unable to cast this type to QueryMatcher: {!r}".format(type(query_string))) class URIPattern(abc.ABC): @abc.abstractmethod def match(self, uri: str) -> bool: pass class RequestMatcher: def __init__( self, uri: Union[str, URIPattern, Pattern[str]], method: str = METHOD_ALL, data: Union[str, bytes, None] = None, data_encoding: str = "utf-8", headers: Optional[Mapping[str, str]] = None, query_string: Union[None, QueryMatcher, str, bytes, Mapping] = None, header_value_matcher: Optional[HeaderValueMatcher] = None, json: Any = UNDEFINED): if json is not UNDEFINED and data is not None: raise ValueError("data and json parameters are mutually exclusive") self.uri = uri self.method = method self.query_string = query_string self.query_matcher = _create_query_matcher(self.query_string) self.json = json self.headers: Mapping[str, str] = {} if headers is not None: self.headers = headers if isinstance(data, str): data = data.encode(data_encoding) self.data = data self.data_encoding = data_encoding self.header_value_matcher = HeaderValueMatcher() if header_value_matcher is None else header_value_matcher def __repr__(self): class_name = self.__class__.__name__ retval = "<{} ".format(class_name) retval += "uri={uri!r} method={method!r} query_string={query_string!r} headers={headers!r} data={data!r} json={json!r}>".format_map( self.__dict__) return retval def match_data(self, request: Request) -> bool: if self.data is None: return True return request.data == self.data def match_uri(self, request: Request) -> bool: path = request.path if isinstance(self.uri, URIPattern): return self.uri.match(path) elif isinstance(self.uri, re.compile("").__class__): return bool(self.uri.match(path)) else: return self.uri == URI_DEFAULT or path == self.uri def match_json(self, request: Request) -> bool: if self.json is UNDEFINED: return True try: json_received = json.loads(request.data.decode(self.data_encoding)) except json.JSONDecodeError: return False except UnicodeDecodeError: return False return json_received == self.json def difference(self, request: Request) -> List[Tuple]: retval: List[Tuple] = [] if not self.match_uri(request): retval.append(("uri", request.path, self.uri)) if self.method != METHOD_ALL and self.method != request.method: retval.append(("method", request.method, self.method)) if not self.query_matcher.match(request.query_string): retval.append(("query_string", request.query_string, self.query_string)) request_headers = {} expected_headers = {} for key, value in self.headers.items(): if not self.header_value_matcher(key, request.headers.get(key), value): request_headers[key] = request.headers.get(key) expected_headers[key] = value if request_headers and expected_headers: retval.append(("headers", request_headers, expected_headers)) if not self.match_data(request): retval.append(("data", request.data, self.data)) if not self.match_json(request): retval.append(("json", request.data, self.json)) return retval def match(self, request: Request) -> bool: difference = self.difference(request) return not difference class RequestHandler: def __init__(self, matcher: RequestMatcher): self.matcher = matcher self.request_handler: Optional[Callable[[Request], Response]] = None def respond(self, request: Request) -> Response: if self.request_handler is None: raise NoHandlerError("No handler found for request: {} {}".format(request.method, request.path)) else: return self.request_handler(request) def respond_with_json( self, response_json, status: int = 200, headers: Optional[Mapping[str, str]] = None, content_type: str = "application/json"): response_data = json.dumps(response_json, indent=4) self.respond_with_data(response_data, status, headers, content_type=content_type) def respond_with_data( self, response_data: Union[str, bytes] = "", status: int = 200, headers: Optional[Mapping[str, str]] = None, mimetype: Optional[str] = None, content_type: Optional[str] = None): def handler(request): return Response(response_data, status, headers, mimetype, content_type) self.request_handler = handler
MIT License
argoproj-labs/argo-client-python
argo/workflows/client/api/cluster_workflow_template_service_api.py
ClusterWorkflowTemplateServiceApi.list_cluster_workflow_templates
python
def list_cluster_workflow_templates(self, **kwargs): kwargs['_return_http_data_only'] = True return self.list_cluster_workflow_templates_with_http_info(**kwargs)
list_cluster_workflow_templates # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_cluster_workflow_templates(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str list_options_label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. +optional. :param str list_options_field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. +optional. :param bool list_options_watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. +optional. :param bool list_options_allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored. +optional. :param str list_options_resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. +optional. :param str list_options_timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. :param str list_options_limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str list_options_continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1alpha1ClusterWorkflowTemplateList If the method is called asynchronously, returns the request thread.
https://github.com/argoproj-labs/argo-client-python/blob/993d684cab39a834770b296e028519cec035c7b5/argo/workflows/client/api/cluster_workflow_template_service_api.py#L531-L560
from __future__ import absolute_import import re import six from argo.workflows.client.api_client import ApiClient from argo.workflows.client.exceptions import ( ApiTypeError, ApiValueError ) class ClusterWorkflowTemplateServiceApi(object): def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_cluster_workflow_template(self, body, **kwargs): kwargs['_return_http_data_only'] = True return self.create_cluster_workflow_template_with_http_info(body, **kwargs) def create_cluster_workflow_template_with_http_info(self, body, **kwargs): local_var_params = locals() all_params = [ 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method create_cluster_workflow_template" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('body' not in local_var_params or local_var_params['body'] is None): raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_workflow_template`") collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/v1/cluster-workflow-templates', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1alpha1ClusterWorkflowTemplate', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def delete_cluster_workflow_template(self, name, **kwargs): kwargs['_return_http_data_only'] = True return self.delete_cluster_workflow_template_with_http_info(name, **kwargs) def delete_cluster_workflow_template_with_http_info(self, name, **kwargs): local_var_params = locals() all_params = [ 'name', 'delete_options_grace_period_seconds', 'delete_options_preconditions_uid', 'delete_options_preconditions_resource_version', 'delete_options_orphan_dependents', 'delete_options_propagation_policy', 'delete_options_dry_run' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_cluster_workflow_template" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('name' not in local_var_params or local_var_params['name'] is None): raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_workflow_template`") collection_formats = {} path_params = {} if 'name' in local_var_params: path_params['name'] = local_var_params['name'] query_params = [] if 'delete_options_grace_period_seconds' in local_var_params and local_var_params['delete_options_grace_period_seconds'] is not None: query_params.append(('deleteOptions.gracePeriodSeconds', local_var_params['delete_options_grace_period_seconds'])) if 'delete_options_preconditions_uid' in local_var_params and local_var_params['delete_options_preconditions_uid'] is not None: query_params.append(('deleteOptions.preconditions.uid', local_var_params['delete_options_preconditions_uid'])) if 'delete_options_preconditions_resource_version' in local_var_params and local_var_params['delete_options_preconditions_resource_version'] is not None: query_params.append(('deleteOptions.preconditions.resourceVersion', local_var_params['delete_options_preconditions_resource_version'])) if 'delete_options_orphan_dependents' in local_var_params and local_var_params['delete_options_orphan_dependents'] is not None: query_params.append(('deleteOptions.orphanDependents', local_var_params['delete_options_orphan_dependents'])) if 'delete_options_propagation_policy' in local_var_params and local_var_params['delete_options_propagation_policy'] is not None: query_params.append(('deleteOptions.propagationPolicy', local_var_params['delete_options_propagation_policy'])) if 'delete_options_dry_run' in local_var_params and local_var_params['delete_options_dry_run'] is not None: query_params.append(('deleteOptions.dryRun', local_var_params['delete_options_dry_run'])) collection_formats['deleteOptions.dryRun'] = 'multi' header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/v1/cluster-workflow-templates/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_cluster_workflow_template(self, name, **kwargs): kwargs['_return_http_data_only'] = True return self.get_cluster_workflow_template_with_http_info(name, **kwargs) def get_cluster_workflow_template_with_http_info(self, name, **kwargs): local_var_params = locals() all_params = [ 'name', 'get_options_resource_version' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_cluster_workflow_template" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('name' not in local_var_params or local_var_params['name'] is None): raise ApiValueError("Missing the required parameter `name` when calling `get_cluster_workflow_template`") collection_formats = {} path_params = {} if 'name' in local_var_params: path_params['name'] = local_var_params['name'] query_params = [] if 'get_options_resource_version' in local_var_params and local_var_params['get_options_resource_version'] is not None: query_params.append(('getOptions.resourceVersion', local_var_params['get_options_resource_version'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/v1/cluster-workflow-templates/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1alpha1ClusterWorkflowTemplate', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def lint_cluster_workflow_template(self, body, **kwargs): kwargs['_return_http_data_only'] = True return self.lint_cluster_workflow_template_with_http_info(body, **kwargs) def lint_cluster_workflow_template_with_http_info(self, body, **kwargs): local_var_params = locals() all_params = [ 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method lint_cluster_workflow_template" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('body' not in local_var_params or local_var_params['body'] is None): raise ApiValueError("Missing the required parameter `body` when calling `lint_cluster_workflow_template`") collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/v1/cluster-workflow-templates/lint', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1alpha1ClusterWorkflowTemplate', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
Apache License 2.0
uwdata/termite-visualizations
web2py/gluon/restricted.py
RestrictedError.__init__
python
def __init__( self, layer='', code='', output='', environment=None, ): if environment is None: environment = {} self.layer = layer self.code = code self.output = output self.environment = environment if layer: try: self.traceback = traceback.format_exc() except: self.traceback = 'no traceback because template parsing error' try: self.snapshot = snapshot(context=10, code=code, environment=self.environment) except: self.snapshot = {} else: self.traceback = '(no error)' self.snapshot = {}
layer here is some description of where in the system the exception occurred.
https://github.com/uwdata/termite-visualizations/blob/79da58bc607893bbd5db703f7d87a89b5e97c311/web2py/gluon/restricted.py#L118-L147
import sys import cPickle import traceback import types import os import logging from storage import Storage from http import HTTP from html import BEAUTIFY, XML logger = logging.getLogger("web2py") __all__ = ['RestrictedError', 'restricted', 'TicketStorage', 'compile2'] class TicketStorage(Storage): def __init__( self, db=None, tablename='web2py_ticket' ): Storage.__init__(self) self.db = db self.tablename = tablename def store(self, request, ticket_id, ticket_data): if self.db: self._store_in_db(request, ticket_id, ticket_data) else: self._store_on_disk(request, ticket_id, ticket_data) def _store_in_db(self, request, ticket_id, ticket_data): self.db._adapter.reconnect() try: table = self._get_table(self.db, self.tablename, request.application) id = table.insert(ticket_id=ticket_id, ticket_data=cPickle.dumps(ticket_data), created_datetime=request.now) self.db.commit() message = 'In FILE: %(layer)s\n\n%(traceback)s\n' except Exception, e: self.db.rollback() message =' Unable to store in FILE: %(layer)s\n\n%(traceback)s\n' self.db.close() logger.error(message % ticket_data) def _store_on_disk(self, request, ticket_id, ticket_data): ef = self._error_file(request, ticket_id, 'wb') try: cPickle.dump(ticket_data, ef) finally: ef.close() def _error_file(self, request, ticket_id, mode, app=None): root = request.folder if app: root = os.path.join(os.path.join(root, '..'), app) errors_folder = os.path.abspath( os.path.join(root, 'errors')) return open(os.path.join(errors_folder, ticket_id), mode) def _get_table(self, db, tablename, app): tablename = tablename + '_' + app table = db.get(tablename) if not table: table = db.define_table( tablename, db.Field('ticket_id', length=100), db.Field('ticket_data', 'text'), db.Field('created_datetime', 'datetime')) return table def load( self, request, app, ticket_id, ): if not self.db: try: ef = self._error_file(request, ticket_id, 'rb', app) except IOError: return {} try: return cPickle.load(ef) finally: ef.close() else: table = self._get_table(self.db, self.tablename, app) rows = self.db(table.ticket_id == ticket_id).select() return cPickle.loads(rows[0].ticket_data) if rows else {} class RestrictedError(Exception):
BSD 3-Clause New or Revised License
nuitka/nuitka
nuitka/nodes/ExpressionBases.py
ExpressionBase.mayRaiseExceptionImportName
python
def mayRaiseExceptionImportName(exception_type, import_name): return True
Unless we are told otherwise, everything may raise for name import.
https://github.com/nuitka/nuitka/blob/4c5161620ea8f0f1c93a1d6be79e7e6eda7161d4/nuitka/nodes/ExpressionBases.py#L744-L747
from abc import abstractmethod from nuitka import Options from nuitka.__past__ import long from nuitka.Constants import isCompileTimeConstantValue from nuitka.PythonVersions import python_version from .NodeBases import ChildrenHavingMixin, NodeBase from .NodeMakingHelpers import ( getComputationResult, makeConstantReplacementNode, makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue, wrapExpressionWithNodeSideEffects, wrapExpressionWithSideEffects, ) from .shapes.BuiltinTypeShapes import tshape_dict, tshape_str, tshape_unicode from .shapes.StandardShapes import tshape_unknown class ExpressionBase(NodeBase): __slots__ = ("code_generated",) @staticmethod def getTypeShape(): return tshape_unknown def getValueShape(self): return self @staticmethod def isCompileTimeConstant(): return False @staticmethod def getTruthValue(): return None @staticmethod def getComparisonValue(): return None @staticmethod def isKnownToBeIterable(count): return False @staticmethod def isKnownToBeIterableAtMin(count): return False def getIterationLength(self): return None def getIterationMinLength(self): return self.getIterationLength() @staticmethod def getStringValue(): return None def getStrValue(self): string_value = self.getStringValue() if string_value is not None: return makeConstantReplacementNode( node=self, constant=string_value, user_provided=False ) return None def getTypeValue(self): from .TypeNodes import ExpressionBuiltinType1 return ExpressionBuiltinType1( value=self.makeClone(), source_ref=self.source_ref ) def getIterationHandle(self): return None @staticmethod def isKnownToBeHashable(): return None @staticmethod def extractUnhashableNode(): return None def onRelease(self, trace_collection): pass def isKnownToHaveAttribute(self, attribute_name): return None @abstractmethod def computeExpressionRaw(self, trace_collection): def computeExpressionAttribute(self, lookup_node, attribute_name, trace_collection): trace_collection.onControlFlowEscape(self) if not self.isKnownToHaveAttribute(attribute_name): trace_collection.onExceptionRaiseExit(BaseException) return lookup_node, None, None def computeExpressionAttributeSpecial( self, lookup_node, attribute_name, trace_collection ): trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return lookup_node, None, None def computeExpressionImportName(self, import_node, import_name, trace_collection): if self.mayRaiseExceptionImportName(BaseException, import_name): trace_collection.onExceptionRaiseExit(BaseException) trace_collection.onControlFlowEscape(self) return import_node, None, None def computeExpressionSetAttribute( self, set_node, attribute_name, value_node, trace_collection ): trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return set_node, None, None def computeExpressionDelAttribute(self, set_node, attribute_name, trace_collection): trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return set_node, None, None def computeExpressionSubscript(self, lookup_node, subscript, trace_collection): trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return lookup_node, None, None def computeExpressionSetSubscript( self, set_node, subscript, value_node, trace_collection ): trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return set_node, None, None def computeExpressionDelSubscript(self, del_node, subscript, trace_collection): trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return del_node, None, None def computeExpressionSlice(self, lookup_node, lower, upper, trace_collection): trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return lookup_node, None, None def computeExpressionSetSlice( self, set_node, lower, upper, value_node, trace_collection ): trace_collection.removeKnowledge(self) trace_collection.removeKnowledge(value_node) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return set_node, None, None def computeExpressionDelSlice(self, set_node, lower, upper, trace_collection): trace_collection.removeKnowledge(self) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return set_node, None, None def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection): self.onContentEscapes(trace_collection) if call_args is not None: call_args.onContentEscapes(trace_collection) if call_kw is not None: call_kw.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return call_node, None, None def computeExpressionLen(self, len_node, trace_collection): shape = self.getValueShape() has_len = shape.hasShapeSlotLen() if has_len is False: return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue( template="object of type '%s' has no len()", operation="len", original_node=len_node, value_node=self, ) elif has_len is True: iter_length = self.getIterationLength() if iter_length is not None: from .ConstantRefNodes import makeConstantRefNode result = makeConstantRefNode( constant=int(iter_length), source_ref=len_node.getSourceReference(), ) result = wrapExpressionWithNodeSideEffects( new_node=result, old_node=self ) return ( result, "new_constant", "Predicted 'len' result from value shape.", ) self.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return len_node, None, None def computeExpressionAbs(self, abs_node, trace_collection): shape = self.getTypeShape() if shape.hasShapeSlotAbs() is False: return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue( template="bad operand type for abs(): '%s'", operation="abs", original_node=abs_node, value_node=self, ) self.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return abs_node, None, None def computeExpressionInt(self, int_node, trace_collection): shape = self.getTypeShape() if shape.hasShapeSlotInt() is False: return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue( template="int() argument must be a string or a number, not '%s'" if python_version < 0x300 else "int() argument must be a string, a bytes-like object or a number, not '%s'", operation="int", original_node=int_node, value_node=self, ) self.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return int_node, None, None def computeExpressionLong(self, long_node, trace_collection): shape = self.getTypeShape() if shape.hasShapeSlotLong() is False: return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue( template="long() argument must be a string or a number, not '%s'", operation="long", original_node=long_node, value_node=self, ) self.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return long_node, None, None def computeExpressionFloat(self, float_node, trace_collection): shape = self.getTypeShape() if shape.hasShapeSlotFloat() is False: return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue( "float() argument must be a string or a number" if Options.is_fullcompat and python_version < 0x300 else "float() argument must be a string or a number, not '%s'", operation="long", original_node=float_node, value_node=self, ) self.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return float_node, None, None def computeExpressionBytes(self, bytes_node, trace_collection): shape = self.getTypeShape() if ( shape.hasShapeSlotBytes() is False and shape.hasShapeSlotInt() is False and shape.hasShapeSlotIter() is False ): return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue( "'%s' object is not iterable", operation="bytes", original_node=bytes_node, value_node=self, ) self.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return bytes_node, None, None def computeExpressionComplex(self, complex_node, trace_collection): shape = self.getTypeShape() if shape.hasShapeSlotComplex() is False: return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue( "complex() argument must be a string or a number" if Options.is_fullcompat and python_version < 0x300 else "complex() argument must be a string or a number, not '%s'", operation="complex", original_node=complex_node, value_node=self, ) self.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return complex_node, None, None def computeExpressionIter1(self, iter_node, trace_collection): shape = self.getTypeShape() if shape.hasShapeSlotIter() is False: return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue( template="'%s' object is not iterable", operation="iter", original_node=iter_node, value_node=self, ) self.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return iter_node, None, None def computeExpressionNext1(self, next_node, trace_collection): self.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return False, (next_node, None, None) def computeExpressionAsyncIter(self, iter_node, trace_collection): self.onContentEscapes(trace_collection) trace_collection.onControlFlowEscape(self) trace_collection.onExceptionRaiseExit(BaseException) return iter_node, None, None def computeExpressionOperationNot(self, not_node, trace_collection): trace_collection.onControlFlowEscape(not_node) trace_collection.onExceptionRaiseExit(BaseException) return not_node, None, None def computeExpressionOperationRepr(self, repr_node, trace_collection): type_shape = self.getTypeShape() escape_desc = type_shape.getOperationUnaryReprEscape() exception_raise_exit = escape_desc.getExceptionExit() if exception_raise_exit is not None: trace_collection.onExceptionRaiseExit(exception_raise_exit) if escape_desc.isValueEscaping(): trace_collection.removeKnowledge(self) if escape_desc.isControlFlowEscape(): trace_collection.onControlFlowEscape(self) return (repr_node, None, None), escape_desc def computeExpressionComparisonIn(self, in_node, value_node, trace_collection): shape = self.getTypeShape() assert shape is not None, self if shape.hasShapeSlotContains() is False: return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue( template="argument of type '%s' object is not iterable", operation="in", original_node=in_node, value_node=self, ) trace_collection.onControlFlowEscape(in_node) trace_collection.onExceptionRaiseExit(BaseException) return in_node, None, None def computeExpressionDrop(self, statement, trace_collection): if not self.mayHaveSideEffects(): return ( None, "new_statements", lambda: "Removed %s without effect." % self.getDescription(), ) return statement, None, None def computeExpressionBool(self, trace_collection): if not self.mayRaiseException(BaseException) and self.mayRaiseExceptionBool( BaseException ): trace_collection.onExceptionRaiseExit(BaseException) @staticmethod def onContentEscapes(trace_collection): pass @staticmethod def mayRaiseExceptionBool(exception_type): return True @staticmethod def mayRaiseExceptionAbs(exception_type): return True @staticmethod def mayRaiseExceptionInt(exception_type): return True @staticmethod def mayRaiseExceptionLong(exception_type): return True @staticmethod def mayRaiseExceptionFloat(exception_type): return True @staticmethod def mayRaiseExceptionBytes(exception_type): return True @staticmethod def mayRaiseExceptionIn(exception_type, checked_value): return True @staticmethod def mayRaiseExceptionAttributeLookup(exception_type, attribute_name): return True @staticmethod def mayRaiseExceptionAttributeLookupSpecial(exception_type, attribute_name): return True @staticmethod def mayRaiseExceptionAttributeLookupObject(exception_type, attribute): return True @staticmethod def mayRaiseExceptionAttributeCheck(exception_type, attribute_name): return True @staticmethod def mayRaiseExceptionAttributeCheckObject(exception_type, attribute): return True @staticmethod
Apache License 2.0
react-native-skia/react-native-skia
tools/clang/scripts/run_tool.py
_CompilerDispatcher.Run
python
def Run(self): pool = multiprocessing.Pool() result_iterator = pool.imap_unordered( functools.partial(_ExecuteTool, self.__toolname, self.__tool_args, self.__build_directory), self.__compdb_entries) for result in result_iterator: self.__ProcessResult(result) sys.stderr.write('\n')
Does the grunt work.
https://github.com/react-native-skia/react-native-skia/blob/91ecc74444b163f128541dbc1a42e27a9c0fb40b/tools/clang/scripts/run_tool.py#L284-L293
from __future__ import print_function import argparse from collections import namedtuple import functools import json import multiprocessing import os import os.path import re import subprocess import shlex import sys script_dir = os.path.dirname(os.path.realpath(__file__)) tool_dir = os.path.abspath(os.path.join(script_dir, '../pylib')) sys.path.insert(0, tool_dir) from clang import compile_db CompDBEntry = namedtuple('CompDBEntry', ['directory', 'filename', 'command']) def _PruneGitFiles(git_files, paths): if not git_files: return [] git_files.sort() pruned_list = [] git_index = 0 for path in sorted(paths): least = git_index most = len(git_files) - 1 while least <= most: middle = (least + most ) / 2 if git_files[middle] == path: least = middle break elif git_files[middle] > path: most = middle - 1 else: least = middle + 1 while least < len(git_files) and git_files[least].startswith(path): pruned_list.append(git_files[least]) least += 1 git_index = least return pruned_list def _GetFilesFromGit(paths=None): partial_paths = [] files = [] for p in paths: real_path = os.path.realpath(p) if os.path.isfile(real_path): files.append(real_path) else: partial_paths.append(real_path) if partial_paths or not files: args = [] if sys.platform == 'win32': args.append('git.bat') else: args.append('git') args.append('ls-files') command = subprocess.Popen(args, stdout=subprocess.PIPE) output, _ = command.communicate() git_files = [os.path.realpath(p) for p in output.splitlines()] if partial_paths: git_files = _PruneGitFiles(git_files, partial_paths) files.extend(git_files) return files def _GetEntriesFromCompileDB(build_directory, source_filenames): filenames_set = None if source_filenames is None else set(source_filenames) return [ CompDBEntry(entry['directory'], entry['file'], entry['command']) for entry in compile_db.Read(build_directory) if filenames_set is None or os.path.realpath( os.path.join(entry['directory'], entry['file'])) in filenames_set ] def _UpdateCompileCommandsIfNeeded(compile_commands, files_list): if sys.platform == 'win32' and files_list: relative_paths = set([os.path.relpath(f) for f in files_list]) filtered_compile_commands = [] for entry in compile_commands: file_path = os.path.relpath( os.path.join(entry['directory'], entry['file'])) if file_path in relative_paths: filtered_compile_commands.append(entry) else: filtered_compile_commands = compile_commands return compile_db.ProcessCompileDatabaseIfNeeded(filtered_compile_commands) def _ExecuteTool(toolname, tool_args, build_directory, compdb_entry): args = [toolname, compdb_entry.filename] if (tool_args): args.extend(tool_args) args.append('--') args.extend([ a for a in shlex.split(compdb_entry.command, posix=(sys.platform != 'win32')) if a != compdb_entry.filename and a != '/showIncludes' and a != '/showIncludes:user' and a != '-MMD' ]) for i, arg in enumerate(args): if arg == '-MF': del args[i:i+2] break if sys.platform == 'win32': args = [a.replace('\\"', '"') for a in args] command = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=build_directory) stdout_text, stderr_text = command.communicate() stderr_text = re.sub( r"^warning: .*'linker' input unused \[-Wunused-command-line-argument\]\n", "", stderr_text, flags=re.MULTILINE) if command.returncode != 0: return { 'status': False, 'filename': compdb_entry.filename, 'stderr_text': stderr_text, } else: return { 'status': True, 'filename': compdb_entry.filename, 'stdout_text': stdout_text, 'stderr_text': stderr_text, } class _CompilerDispatcher(object): def __init__(self, toolname, tool_args, build_directory, compdb_entries): self.__toolname = toolname self.__tool_args = tool_args self.__build_directory = build_directory self.__compdb_entries = compdb_entries self.__success_count = 0 self.__failed_count = 0 @property def failed_count(self): return self.__failed_count
MIT License
pfnet/pfrl
pfrl/agent.py
Agent.save
python
def save(self, dirname: str) -> None: pass
Save internal states. Returns: None
https://github.com/pfnet/pfrl/blob/edc5a35a0d5ffb86ec41f4ae3a9a10c5ab2c6be6/pfrl/agent.py#L33-L39
import contextlib import os from abc import ABCMeta, abstractmethod, abstractproperty from typing import Any, List, Optional, Sequence, Tuple import torch class Agent(object, metaclass=ABCMeta): training = True @abstractmethod def act(self, obs: Any) -> Any: raise NotImplementedError() @abstractmethod def observe(self, obs: Any, reward: float, done: bool, reset: bool) -> None: raise NotImplementedError() @abstractmethod
MIT License
argoproj-labs/argo-client-python
argo/workflows/client/models/v1alpha1_workflow_template_spec.py
V1alpha1WorkflowTemplateSpec.priority
python
def priority(self, priority): self._priority = priority
Sets the priority of this V1alpha1WorkflowTemplateSpec. Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first. # noqa: E501 :param priority: The priority of this V1alpha1WorkflowTemplateSpec. # noqa: E501 :type: int
https://github.com/argoproj-labs/argo-client-python/blob/993d684cab39a834770b296e028519cec035c7b5/argo/workflows/client/models/v1alpha1_workflow_template_spec.py#L717-L726
import pprint import re import six from argo.workflows.client.configuration import Configuration class V1alpha1WorkflowTemplateSpec(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'active_deadline_seconds': 'int', 'affinity': 'V1Affinity', 'arguments': 'V1alpha1Arguments', 'artifact_repository_ref': 'V1alpha1ArtifactRepositoryRef', 'automount_service_account_token': 'bool', 'dns_config': 'V1PodDNSConfig', 'dns_policy': 'str', 'entrypoint': 'str', 'executor': 'V1alpha1ExecutorConfig', 'host_aliases': 'list[V1HostAlias]', 'host_network': 'bool', 'image_pull_secrets': 'list[V1LocalObjectReference]', 'metrics': 'V1alpha1Metrics', 'node_selector': 'dict(str, str)', 'on_exit': 'str', 'parallelism': 'int', 'pod_disruption_budget': 'IoK8sApiPolicyV1beta1PodDisruptionBudgetSpec', 'pod_gc': 'V1alpha1PodGC', 'pod_priority': 'int', 'pod_priority_class_name': 'str', 'pod_spec_patch': 'str', 'priority': 'int', 'retry_strategy': 'V1alpha1RetryStrategy', 'scheduler_name': 'str', 'security_context': 'V1PodSecurityContext', 'service_account_name': 'str', 'shutdown': 'str', 'suspend': 'bool', 'synchronization': 'V1alpha1Synchronization', 'templates': 'list[V1alpha1Template]', 'tolerations': 'list[V1Toleration]', 'ttl_seconds_after_finished': 'int', 'ttl_strategy': 'V1alpha1TTLStrategy', 'volume_claim_gc': 'V1alpha1VolumeClaimGC', 'volume_claim_templates': 'list[V1PersistentVolumeClaim]', 'volumes': 'list[V1Volume]', 'workflow_metadata': 'V1ObjectMeta', 'workflow_template_ref': 'V1alpha1WorkflowTemplateRef' } attribute_map = { 'active_deadline_seconds': 'activeDeadlineSeconds', 'affinity': 'affinity', 'arguments': 'arguments', 'artifact_repository_ref': 'artifactRepositoryRef', 'automount_service_account_token': 'automountServiceAccountToken', 'dns_config': 'dnsConfig', 'dns_policy': 'dnsPolicy', 'entrypoint': 'entrypoint', 'executor': 'executor', 'host_aliases': 'hostAliases', 'host_network': 'hostNetwork', 'image_pull_secrets': 'imagePullSecrets', 'metrics': 'metrics', 'node_selector': 'nodeSelector', 'on_exit': 'onExit', 'parallelism': 'parallelism', 'pod_disruption_budget': 'podDisruptionBudget', 'pod_gc': 'podGC', 'pod_priority': 'podPriority', 'pod_priority_class_name': 'podPriorityClassName', 'pod_spec_patch': 'podSpecPatch', 'priority': 'priority', 'retry_strategy': 'retryStrategy', 'scheduler_name': 'schedulerName', 'security_context': 'securityContext', 'service_account_name': 'serviceAccountName', 'shutdown': 'shutdown', 'suspend': 'suspend', 'synchronization': 'synchronization', 'templates': 'templates', 'tolerations': 'tolerations', 'ttl_seconds_after_finished': 'ttlSecondsAfterFinished', 'ttl_strategy': 'ttlStrategy', 'volume_claim_gc': 'volumeClaimGC', 'volume_claim_templates': 'volumeClaimTemplates', 'volumes': 'volumes', 'workflow_metadata': 'workflowMetadata', 'workflow_template_ref': 'workflowTemplateRef' } def __init__(self, active_deadline_seconds=None, affinity=None, arguments=None, artifact_repository_ref=None, automount_service_account_token=None, dns_config=None, dns_policy=None, entrypoint=None, executor=None, host_aliases=None, host_network=None, image_pull_secrets=None, metrics=None, node_selector=None, on_exit=None, parallelism=None, pod_disruption_budget=None, pod_gc=None, pod_priority=None, pod_priority_class_name=None, pod_spec_patch=None, priority=None, retry_strategy=None, scheduler_name=None, security_context=None, service_account_name=None, shutdown=None, suspend=None, synchronization=None, templates=None, tolerations=None, ttl_seconds_after_finished=None, ttl_strategy=None, volume_claim_gc=None, volume_claim_templates=None, volumes=None, workflow_metadata=None, workflow_template_ref=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._active_deadline_seconds = None self._affinity = None self._arguments = None self._artifact_repository_ref = None self._automount_service_account_token = None self._dns_config = None self._dns_policy = None self._entrypoint = None self._executor = None self._host_aliases = None self._host_network = None self._image_pull_secrets = None self._metrics = None self._node_selector = None self._on_exit = None self._parallelism = None self._pod_disruption_budget = None self._pod_gc = None self._pod_priority = None self._pod_priority_class_name = None self._pod_spec_patch = None self._priority = None self._retry_strategy = None self._scheduler_name = None self._security_context = None self._service_account_name = None self._shutdown = None self._suspend = None self._synchronization = None self._templates = None self._tolerations = None self._ttl_seconds_after_finished = None self._ttl_strategy = None self._volume_claim_gc = None self._volume_claim_templates = None self._volumes = None self._workflow_metadata = None self._workflow_template_ref = None self.discriminator = None if active_deadline_seconds is not None: self.active_deadline_seconds = active_deadline_seconds if affinity is not None: self.affinity = affinity if arguments is not None: self.arguments = arguments if artifact_repository_ref is not None: self.artifact_repository_ref = artifact_repository_ref if automount_service_account_token is not None: self.automount_service_account_token = automount_service_account_token if dns_config is not None: self.dns_config = dns_config if dns_policy is not None: self.dns_policy = dns_policy if entrypoint is not None: self.entrypoint = entrypoint if executor is not None: self.executor = executor if host_aliases is not None: self.host_aliases = host_aliases if host_network is not None: self.host_network = host_network if image_pull_secrets is not None: self.image_pull_secrets = image_pull_secrets if metrics is not None: self.metrics = metrics if node_selector is not None: self.node_selector = node_selector if on_exit is not None: self.on_exit = on_exit if parallelism is not None: self.parallelism = parallelism if pod_disruption_budget is not None: self.pod_disruption_budget = pod_disruption_budget if pod_gc is not None: self.pod_gc = pod_gc if pod_priority is not None: self.pod_priority = pod_priority if pod_priority_class_name is not None: self.pod_priority_class_name = pod_priority_class_name if pod_spec_patch is not None: self.pod_spec_patch = pod_spec_patch if priority is not None: self.priority = priority if retry_strategy is not None: self.retry_strategy = retry_strategy if scheduler_name is not None: self.scheduler_name = scheduler_name if security_context is not None: self.security_context = security_context if service_account_name is not None: self.service_account_name = service_account_name if shutdown is not None: self.shutdown = shutdown if suspend is not None: self.suspend = suspend if synchronization is not None: self.synchronization = synchronization if templates is not None: self.templates = templates if tolerations is not None: self.tolerations = tolerations if ttl_seconds_after_finished is not None: self.ttl_seconds_after_finished = ttl_seconds_after_finished if ttl_strategy is not None: self.ttl_strategy = ttl_strategy if volume_claim_gc is not None: self.volume_claim_gc = volume_claim_gc if volume_claim_templates is not None: self.volume_claim_templates = volume_claim_templates if volumes is not None: self.volumes = volumes if workflow_metadata is not None: self.workflow_metadata = workflow_metadata if workflow_template_ref is not None: self.workflow_template_ref = workflow_template_ref @property def active_deadline_seconds(self): return self._active_deadline_seconds @active_deadline_seconds.setter def active_deadline_seconds(self, active_deadline_seconds): self._active_deadline_seconds = active_deadline_seconds @property def affinity(self): return self._affinity @affinity.setter def affinity(self, affinity): self._affinity = affinity @property def arguments(self): return self._arguments @arguments.setter def arguments(self, arguments): self._arguments = arguments @property def artifact_repository_ref(self): return self._artifact_repository_ref @artifact_repository_ref.setter def artifact_repository_ref(self, artifact_repository_ref): self._artifact_repository_ref = artifact_repository_ref @property def automount_service_account_token(self): return self._automount_service_account_token @automount_service_account_token.setter def automount_service_account_token(self, automount_service_account_token): self._automount_service_account_token = automount_service_account_token @property def dns_config(self): return self._dns_config @dns_config.setter def dns_config(self, dns_config): self._dns_config = dns_config @property def dns_policy(self): return self._dns_policy @dns_policy.setter def dns_policy(self, dns_policy): self._dns_policy = dns_policy @property def entrypoint(self): return self._entrypoint @entrypoint.setter def entrypoint(self, entrypoint): self._entrypoint = entrypoint @property def executor(self): return self._executor @executor.setter def executor(self, executor): self._executor = executor @property def host_aliases(self): return self._host_aliases @host_aliases.setter def host_aliases(self, host_aliases): self._host_aliases = host_aliases @property def host_network(self): return self._host_network @host_network.setter def host_network(self, host_network): self._host_network = host_network @property def image_pull_secrets(self): return self._image_pull_secrets @image_pull_secrets.setter def image_pull_secrets(self, image_pull_secrets): self._image_pull_secrets = image_pull_secrets @property def metrics(self): return self._metrics @metrics.setter def metrics(self, metrics): self._metrics = metrics @property def node_selector(self): return self._node_selector @node_selector.setter def node_selector(self, node_selector): self._node_selector = node_selector @property def on_exit(self): return self._on_exit @on_exit.setter def on_exit(self, on_exit): self._on_exit = on_exit @property def parallelism(self): return self._parallelism @parallelism.setter def parallelism(self, parallelism): self._parallelism = parallelism @property def pod_disruption_budget(self): return self._pod_disruption_budget @pod_disruption_budget.setter def pod_disruption_budget(self, pod_disruption_budget): self._pod_disruption_budget = pod_disruption_budget @property def pod_gc(self): return self._pod_gc @pod_gc.setter def pod_gc(self, pod_gc): self._pod_gc = pod_gc @property def pod_priority(self): return self._pod_priority @pod_priority.setter def pod_priority(self, pod_priority): self._pod_priority = pod_priority @property def pod_priority_class_name(self): return self._pod_priority_class_name @pod_priority_class_name.setter def pod_priority_class_name(self, pod_priority_class_name): self._pod_priority_class_name = pod_priority_class_name @property def pod_spec_patch(self): return self._pod_spec_patch @pod_spec_patch.setter def pod_spec_patch(self, pod_spec_patch): self._pod_spec_patch = pod_spec_patch @property def priority(self): return self._priority @priority.setter
Apache License 2.0
svenito/brew-tools
brew_tools/brew_maths.py
apparent_attenuation
python
def apparent_attenuation(og, fg): return 1.0 - to_plato(fg) / to_plato(og)
Calculate the apparent attenuation from the current and original gravity. via http://realbeer.com/spencer/attenuation.html AA = 1 - AE / OE :arg og: The original gravity of the wort (1.0 to 1.2) :arg fg: The current gravity of the beer :returns: The apparent attenuation as a decimal (multiply by 100 to get percentage value)
https://github.com/svenito/brew-tools/blob/23a2e6b4528b4e8ad5a046e80555c961342f9ea4/brew_tools/brew_maths.py#L253-L266
import math def oz_to_g(oz): return oz * 28.34952 def g_to_oz(g): return g / 28.34952 def lbs_to_oz(lbs): return lbs * 16 def c_to_f(c): return c * 1.8 + 32.0 def f_to_c(f): return (f - 32.0) / 1.8 def l_to_g(liter): return liter * 0.26417 def g_to_l(gallon): return gallon / 0.26417 def l_to_q(liter): return liter * 1.056688 def kg_to_lbs(kg): return kg * 2.204623 def lbs_to_kg(lbs): return lbs / 2.204623 def to_brix(value): brix = ((182.4601 * value - 775.6821) * value + 1262.7794) * value - 669.5622 return brix def to_plato(sg): plato = (-1 * 616.868) + (1111.14 * sg) - (630.272 * sg ** 2) + (135.997 * sg ** 3) return plato def to_sg(plato): return 1 + (plato / (258.6 - ((plato / 258.2) * 227.1))) def ebc_to_srm(ebc): return ebc * 0.508 def ebc_to_l(ebc): return srm_to_l(ebc_to_srm(ebc)) def srm_to_ebc(srm): return srm / 0.508 def srm_to_l(srm): return (srm + 0.76) / 1.3546 def l_to_srm(lovibond): return 1.3546 * lovibond - 0.76 def l_to_ebc(lovibond): return srm_to_ebc(l_to_srm(lovibond)) def adjust_gravity(og, fg): adjusted_fg = (1.0000 - 0.00085683 * to_brix(og)) + 0.0034941 * to_brix(fg) return adjusted_fg def abv(og, fg, adjust): if adjust: fg = adjust_gravity(og, fg) return (og - fg) * 131.25 def keg_psi(temp, co2): henry_coeff = 0.01821 + 0.09011 * math.exp(-(temp - 32) / 43.11) pressure = ((co2 + 0.003342) / henry_coeff) - 14.695 return pressure def priming(temp, beer_vol, co2): return ( 15.195 * beer_vol * (co2 - 3.0378 + (0.050062 * temp) - (0.00026555 * (temp ** 2))) ) def infusion(ratio, curr_temp, new_temp, water_temp, grain): mash_water = grain * ratio return ((new_temp - curr_temp) * (0.2 * grain + mash_water)) / ( water_temp - new_temp ) def pre_boil_dme(points, cur_vol): return lbs_to_oz(points * (1 / (44 / cur_vol)))
MIT License
pyansys/pyaedt
pyaedt/modules/SolveSetup.py
SetupCircuit.p_app
python
def p_app(self): return self._app
AEDT app module for setting up the analysis.
https://github.com/pyansys/pyaedt/blob/817c7d706a2d10942470ccac959645e16e9ea971/pyaedt/modules/SolveSetup.py#L493-L495
from __future__ import absolute_import import warnings from collections import OrderedDict import os.path from ..generic.general_methods import aedt_exception_handler, generate_unique_name from .SetupTemplates import SweepHFSS, SweepQ3D, SetupKeys, SweepHFSS3DLayout from ..generic.DataHandlers import tuple2dict, dict2arg class Setup(object): @property def p_app(self): return self._app @p_app.setter def p_app(self, value): self._app = value @property def omodule(self): return self._app.oanalysis def __repr__(self): return "SetupName " + self.name + " with " + str(len(self.sweeps)) + " Sweeps" def __init__(self, app, solutiontype, setupname="MySetupAuto", isnewsetup=True): self._app = None self.p_app = app if isinstance(solutiontype, int): self.setuptype = solutiontype else: self.setuptype = SetupKeys.defaultSetups[solutiontype] self.name = setupname self.props = {} self.sweeps = [] if isnewsetup: setup_template = SetupKeys.SetupTemplates[self.setuptype] for t in setup_template: tuple2dict(t, self.props) else: try: setups_data = self.p_app.design_properties["AnalysisSetup"]["SolveSetups"] if setupname in setups_data: setup_data = setups_data[setupname] if "Sweeps" in setup_data and self.setuptype not in [ 0, 7, ]: if self.setuptype <= 4: app = setup_data["Sweeps"] app.pop("NextUniqueID", None) app.pop("MoveBackForward", None) app.pop("MoveBackwards", None) for el in app: if isinstance(app[el], (OrderedDict, dict)): self.sweeps.append(SweepHFSS(self.omodule, setupname, el, props=app[el])) else: app = setup_data["Sweeps"] for el in app: if isinstance(app[el], (OrderedDict, dict)): self.sweeps.append(SweepQ3D(self.omodule, setupname, el, props=app[el])) setup_data.pop("Sweeps", None) self.props = OrderedDict(setup_data) except: self.props = OrderedDict() @aedt_exception_handler def create(self): soltype = SetupKeys.SetupNames[self.setuptype] arg = ["NAME:" + self.name] dict2arg(self.props, arg) self.omodule.InsertSetup(soltype, arg) return arg @aedt_exception_handler def update(self, update_dictionary=None): if update_dictionary: for el in update_dictionary: self.props[el] = update_dictionary[el] arg = ["NAME:" + self.name] dict2arg(self.props, arg) self.omodule.EditSetup(self.name, arg) return True @aedt_exception_handler def _expression_cache( self, expression_list, report_type_list, intrinsics_list, isconvergence_list, isrelativeconvergence, conv_criteria, ): if isrelativeconvergence: userelative = 1 else: userelative = 0 list_data = ["NAME:ExpressionCache"] if type(expression_list) is list: i = 0 while i < len(expression_list): expression = expression_list[i] name = expression.replace("(", "_") + "1" name = name.replace(")", "_") name = name.replace(" ", "_") name = name.replace(".", "_") name = name.replace("/", "_") name = name.replace("*", "_") name = name.replace("+", "_") name = name.replace("-", "_") if type(report_type_list) is list: report_type = report_type_list[i] else: report_type = report_type_list if type(isconvergence_list) is list: isconvergence = isconvergence_list[i] else: isconvergence = isconvergence_list if type(intrinsics_list) is list: intrinsics = intrinsics_list[i] else: intrinsics = intrinsics_list list_data.append( [ "NAME:CacheItem", "Title:=", name, "Expression:=", expression, "Intrinsics:=", intrinsics, "IsConvergence:=", isconvergence, "UseRelativeConvergence:=", 1, "MaxConvergenceDelta:=", 1, "MaxConvergeValue:=", "0.01", "ReportType:=", report_type, ["NAME:ExpressionContext"], ] ) i += 1 else: name = expression_list.replace("(", "") + "1" name = name.replace(")", "") name = name.replace(" ", "") name = name.replace(",", "_") list_data.append( [ "NAME:CacheItem", "Title:=", name, "Expression:=", expression_list, "Intrinsics:=", intrinsics_list, "IsConvergence:=", isconvergence_list, "UseRelativeConvergence:=", userelative, "MaxConvergenceDelta:=", conv_criteria, "MaxConvergeValue:=", str(conv_criteria), "ReportType:=", report_type_list, ["NAME:ExpressionContext"], ] ) return list_data @aedt_exception_handler def enable_expression_cache( self, expressions, report_type="Fields", intrinsics="", isconvergence=True, isrelativeconvergence=True, conv_criteria=1, ): arg = ["NAME:" + self.name] dict2arg(self.props, arg) expression_cache = self._expression_cache( expressions, report_type, intrinsics, isconvergence, isrelativeconvergence, conv_criteria ) arg.append(expression_cache) self.omodule.EditSetup(self.name, arg) return True @aedt_exception_handler def add_derivatives(self, derivative_list): arg = ["NAME:" + self.name] dict2arg(self.props, arg) arg.append("VariablesForDerivatives:=") arg.append(derivative_list) self.omodule.EditSetup(self.name, arg) return True @aedt_exception_handler def enable(self, setup_name=None): if not setup_name: setup_name = self.name self.omodule.EditSetup(setup_name, ["NAME:" + setup_name, "IsEnabled:=", True]) return True @aedt_exception_handler def disable(self, setup_name=None): if not setup_name: setup_name = self.name self.omodule.EditSetup(setup_name, ["NAME:" + setup_name, "IsEnabled:", False]) return True @aedt_exception_handler def add_sweep(self, sweepname=None, sweeptype="Interpolating"): if not sweepname: sweepname = generate_unique_name("Sweep") if self.setuptype <= 4: sweep_n = SweepHFSS(self.omodule, self.name, sweepname, sweeptype) else: sweep_n = SweepQ3D(self.omodule, self.name, sweepname, sweeptype) sweep_n.create() self.sweeps.append(sweep_n) return sweep_n @aedt_exception_handler def add_mesh_link(self, design_name, solution_name, parameters_dict, project_name="This Project*"): meshlinks = self.props["MeshLink"] meshlinks["ImportMesh"] = True meshlinks["Project"] = project_name meshlinks["Product"] = "ElectronicsDesktop" meshlinks["Design"] = design_name meshlinks["Soln"] = solution_name meshlinks["Params"] = OrderedDict({}) for el in parameters_dict: if el in list(self._app.available_variations.nominal_w_values_dict.keys()): meshlinks["Params"][el] = el else: meshlinks["Params"][el] = parameters_dict[el] meshlinks["ForceSourceToSolve"] = True meshlinks["PreservePartnerSoln"] = True meshlinks["PathRelativeTo"] = "TargetProject" meshlinks["ApplyMeshOp"] = True self.update() return True class SetupCircuit(object): def __init__(self, app, solutiontype, setupname="MySetupAuto", isnewsetup=True): self._app = None self.p_app = app if isinstance(solutiontype, int): self.setuptype = solutiontype else: self.setuptype = SetupKeys.defaultSetups[solutiontype] self._Name = "LinearFrequency" self.props = {} if isnewsetup: setup_template = SetupKeys.SetupTemplates[self.setuptype] for t in setup_template: tuple2dict(t, self.props) else: try: setups_data = self.p_app.design_properties["SimSetups"]["SimSetup"] if type(setups_data) is not list: setups_data = [setups_data] for setup in setups_data: if setupname == setup["Name"]: setup_data = setup setup_data.pop("Sweeps", None) self.props = setup_data except: self.props = {} self.name = setupname @property def name(self): return self._Name @name.setter def name(self, name): self._Name = name self.props["Name"] = name @property
MIT License
legrego/homeassistant-elasticsearch
custom_components/elasticsearch/es_gateway.py
ElasticsearchGateway.async_init
python
async def async_init(self): LOGGER.debug("Creating Elasticsearch client for %s", self._url) self.client = self._create_es_client() self.es_version = ElasticsearchVersion(self.client) await self.es_version.async_init() if not self.es_version.is_supported_version(): LOGGER.fatal( "UNSUPPORTED VERSION OF ELASTICSEARCH DETECTED: %s.", self.es_version.to_string(), ) raise UnsupportedVersion() LOGGER.debug("Gateway initialized")
I/O bound init
https://github.com/legrego/homeassistant-elasticsearch/blob/ba7a4a78bd9209993628b99511a3c97108614f63/custom_components/elasticsearch/es_gateway.py#L84-L99
import aiohttp from homeassistant.const import ( CONF_PASSWORD, CONF_TIMEOUT, CONF_URL, CONF_USERNAME, CONF_VERIFY_SSL, ) from homeassistant.helpers.typing import HomeAssistantType from .const import CONF_SSL_CA_PATH from .errors import ( AuthenticationRequired, CannotConnect, ElasticException, InsufficientPrivileges, UnsupportedVersion, UntrustedCertificate, ) from .es_serializer import get_serializer from .es_version import ElasticsearchVersion from .logger import LOGGER class ElasticsearchGateway: def __init__(self, config): self._url = config.get(CONF_URL) self._timeout = config.get(CONF_TIMEOUT) self._username = config.get(CONF_USERNAME) self._password = config.get(CONF_PASSWORD) self._verify_certs = config.get(CONF_VERIFY_SSL, True) self._ca_certs = config.get(CONF_SSL_CA_PATH) self.client = None self.es_version = None async def check_connection(self, hass: HomeAssistantType): from elasticsearch import ( AuthenticationException, AuthorizationException, ConnectionError, ElasticsearchException, SSLError, ) client = None is_supported_version = True try: client = self._create_es_client() es_version = ElasticsearchVersion(client) await es_version.async_init() is_supported_version = es_version.is_supported_version() except SSLError as err: raise UntrustedCertificate(err) except ConnectionError as err: if isinstance( err.info, aiohttp.client_exceptions.ClientConnectorCertificateError ): raise UntrustedCertificate(err) raise CannotConnect(err) except AuthenticationException as err: raise AuthenticationRequired(err) except AuthorizationException as err: raise InsufficientPrivileges(err) except ElasticsearchException as err: raise ElasticException(err) except Exception as err: raise ElasticException(err) finally: if client: await client.close() client = None if not is_supported_version: raise UnsupportedVersion()
MIT License
btaba/yarlp
yarlp/experiment/plotting.py
plot_data
python
def plot_data(data, value, time, run, condition, title='', ax=None, ci=95): if isinstance(data, list): data = pd.concat(data, ignore_index=True) sns.set(style="darkgrid", font_scale=1.5) plot = sns.tsplot( data=data, time=time, value=value, unit=run, condition=condition, ax=ax, ci=ci) plt.title(title) return plot
Plot time series data using sns.tsplot Params ---------- data (pd.DataFrame): value (str): value column time (str): time column condition (str): sns.tsplot condition title (str): ax (matplotlib axis):
https://github.com/btaba/yarlp/blob/e6bc70afe32f8617f56180d60d6a100c83868119/yarlp/experiment/plotting.py#L8-L28
import pandas as pd from matplotlib import pyplot as plt import seaborn as sns import numpy as np
MIT License
openstack/tempest
tempest/lib/services/identity/v2/users_client.py
UsersClient.show_user
python
def show_user(self, user_id): resp, body = self.get("users/%s" % user_id) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body)
GET a user. For a full list of available parameters, please refer to the official API reference: https://docs.openstack.org/api-ref/identity/v2-admin/index.html#show-user-details-admin-endpoint
https://github.com/openstack/tempest/blob/d458bf329739ae7b7652d329e6415ad6ba54e490/tempest/lib/services/identity/v2/users_client.py#L49-L59
from urllib import parse as urllib from oslo_serialization import jsonutils as json from tempest.lib.common import rest_client class UsersClient(rest_client.RestClient): api_version = "v2.0" def create_user(self, **kwargs): post_body = json.dumps({'user': kwargs}) resp, body = self.post('users', post_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def update_user(self, user_id, **kwargs): put_body = json.dumps({'user': kwargs}) resp, body = self.put('users/%s' % user_id, put_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body)
Apache License 2.0
stefanvdweide/flask-api-template
flask_api_template.py
remove_old_jwts
python
def remove_old_jwts(): from app.models import RevokedTokenModel delete_date = datetime.utcnow() - relativedelta(days=5) old_tokens = ( db.session.query(RevokedTokenModel) .filter(RevokedTokenModel.date_revoked < delete_date) .all() ) if old_tokens: for token in old_tokens: db.session.delete(token) db.session.commit() print( "{} old tokens have been removed from the database".format(len(old_tokens)) ) else: print("No JWT's older than 5 days have been found") return old_tokens
Scan the database for JWT tokens in the Revoked Token table older than 5 days and remove them.
https://github.com/stefanvdweide/flask-api-template/blob/68330e797c94cdfffb16bf547a4b83935be9edba/flask_api_template.py#L11-L40
from app import create_app from app import db from datetime import datetime from dateutil.relativedelta import relativedelta app = create_app() @app.cli.command()
MIT License
jtambasco/modesolverpy
modesolverpy/structure_base.py
Slabs.n
python
def n(self): try: n_mat = self.slabs['0'].n for s in range(1, self.slab_count): n_mat = np.vstack((self.slabs[str(s)].n, n_mat)) except KeyError: n_mat = None return n_mat
np.array: The refractive index profile matrix of the current slab.
https://github.com/jtambasco/modesolverpy/blob/ebdd70087f8e1c46f36dd7c6ae70ad59d7c38185/modesolverpy/structure_base.py#L418-L429
import numpy as np from scipy import interpolate import os import sys import subprocess import abc from six import with_metaclass try: devnull = open(os.devnull, 'w') subprocess.call(['gnuplot', '--version'], stdout=devnull, stderr=devnull) import gnuplotpy as gp MPL = False except: import matplotlib.pylab as plt MPL = True def use_gnuplot(): global gp import gnuplotpy as gp global MPL MPL = False def use_matplotlib(): global plt import matplotlib.pylab as plt global MPL MPL = True class _AbstractStructure(with_metaclass(abc.ABCMeta)): @abc.abstractproperty def n(self): pass @property def x_pts(self): return int((self.x_max - self.x_min) / self.x_step + 1) @property def y_pts(self): return int((self.y_max - self.y_min) / self.y_step) @property def x_ctr(self): return 0.5*(self.x_max + self.x_min) @property def y_ctr(self): return 0.5*(self.y_max + self.y_min) @property def xc(self): return 0.5*(self.x[1:] + self.x[:-1]) @property def yc(self): return 0.5*(self.y[1:] + self.y[:-1]) @property def xc_pts(self): return self.x_pts - 1 @property def yc_pts(self): return self.y_pts - 1 @property def xc_min(self): return self.xc[0] @property def xc_max(self): return self.xc[-1] @property def yc_min(self): return self.yc[0] @property def yc_max(self): return self.yc[-1] @property def x(self): if None not in (self.x_min, self.x_max, self.x_step) and self.x_min != self.x_max: x = np.arange(self.x_min, self.x_max+self.x_step-self.y_step*0.1, self.x_step) else: x = np.array([]) return x @property def y(self): if None not in (self.y_min, self.y_max, self.y_step) and self.y_min != self.y_max: y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step) else: y = np.array([]) return y @property def eps(self): return self.n**2 @property def eps_func(self): interp_real = interpolate.interp2d(self.x, self.y, self.eps.real) interp_imag = interpolate.interp2d(self.x, self.y, self.eps.imag) interp = lambda x, y: interp_real(x, y) + 1.j*interp_imag(x, y) return interp @property def n_func(self): return interpolate.interp2d(self.x, self.y, self.n) def _add_triangular_sides(self, xy_mask, angle, y_top_right, y_bot_left, x_top_right, x_bot_left, n_material): angle = np.radians(angle) trap_len = (y_top_right - y_bot_left) / np.tan(angle) num_x_iterations = trap_len / self.x_step y_per_iteration = num_x_iterations / self.y_pts lhs_x_start_index = int(x_bot_left/ self.x_step + 0.5) rhs_x_stop_index = int(x_top_right/ self.x_step + 1 + 0.5) running_removal_float = y_per_iteration for i, _ in enumerate(xy_mask): if running_removal_float >= 1: removal_int = int(round(running_removal_float)) lhs_x_start_index -= removal_int rhs_x_stop_index += removal_int running_removal_float -= removal_int running_removal_float += y_per_iteration xy_mask[i][:lhs_x_start_index] = False xy_mask[i][lhs_x_start_index:rhs_x_stop_index] = True self.n[xy_mask] = n_material return self.n def _add_material(self, x_bot_left, y_bot_left, x_top_right, y_top_right, n_material, angle=0): x_mask = np.logical_and(x_bot_left<=self.x, self.x<=x_top_right) y_mask = np.logical_and(y_bot_left<=self.y, self.y<=y_top_right) xy_mask = np.kron(y_mask, x_mask).reshape((y_mask.size, x_mask.size)) self.n[xy_mask] = n_material if angle: self._add_triangular_sides(xy_mask, angle, y_top_right, y_bot_left, x_top_right, x_bot_left, n_material) return self.n def write_to_file(self, filename='material_index.dat', plot=True): path = os.path.dirname(sys.modules[__name__].__file__) + '/' with open(filename, 'w') as fs: for n_row in np.abs(self.n[::-1]): n_str = ','.join([str(v) for v in n_row]) fs.write(n_str+'\n') if plot: filename_image_prefix, _ = os.path.splitext(filename) filename_image = filename_image_prefix + '.png' args = { 'title': 'Refractive Index Profile', 'x_pts': self.x_pts, 'y_pts': self.y_pts, 'x_min': self.x_min, 'x_max': self.x_max, 'y_min': self.y_min, 'y_max': self.y_max, 'filename_data': filename, 'filename_image': filename_image } if MPL: heatmap = np.loadtxt(args['filename_data'], delimiter=',') plt.clf() plt.title(args['title']) plt.xlabel('$x$') plt.ylabel('$y$') plt.imshow(np.flipud(heatmap), extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']), aspect="auto") plt.colorbar() plt.savefig(filename_image) else: gp.gnuplot(path+'structure.gpi', args) def __str__(self): return self.n.__str__() class Structure(_AbstractStructure): def __init__(self, x_step, y_step, x_max, y_max, x_min=0., y_min=0., n_background=1.): self.x_min = x_min self.x_max = x_max self.y_min = y_min self.y_max = y_max self.x_step = x_step self.y_step = y_step self.n_background = n_background self._n = np.ones((self.y.size,self.x.size), 'complex_') * n_background @property def n(self): return self._n class Slabs(_AbstractStructure): def __init__(self, wavelength, y_step, x_step, x_max, x_min=0.): _AbstractStructure.__init__(self) self._wl = wavelength self.x_min = x_min self.x_max = x_max self.x_step = x_step self.y_step = y_step self.y_min = 0 self.slabs = {} self.slab_count = 0 self._next_start = 0. def add_slab(self, height, n_background=1., position='top'): assert position in ('top', 'bottom') name = str(self.slab_count) if not callable(n_background): n_back = lambda wl: n_background else: n_back = n_background height_discretised = self.y_step*((height // self.y_step) + 1) y_min = self._next_start y_max = y_min + height_discretised self.slabs[name] = Slab(name, self.x_step, self.y_step, self.x_max, y_max, self.x_min, y_min, n_back, self._wl) self.y_max = y_max self._next_start = y_min + height_discretised self.slab_count += 1 if position == 'bottom': slabs = {} for k in self.slabs.keys(): slabs[str(int(k)+1)] = self.slabs[k] slabs['0'] = slabs.pop(str(self.slab_count)) self.slabs = slabs return name def change_wavelength(self, wavelength): for name, slab in self.slabs.items(): const_args = slab._const_args mat_args = slab._mat_params const_args[8] = wavelength s = Slab(*const_args) for mat_arg in mat_args: s.add_material(*mat_arg) self.slabs[name] = s self._wl = wavelength @property
MIT License
openstack/openstacksdk
openstack/baremetal/v1/_proxy.py
Proxy.wait_for_nodes_provision_state
python
def wait_for_nodes_provision_state(self, nodes, expected_state, timeout=None, abort_on_failed_state=True, fail=True): log_nodes = ', '.join(n.id if isinstance(n, _node.Node) else n for n in nodes) finished = [] failed = [] remaining = nodes try: for count in utils.iterate_timeout( timeout, "Timeout waiting for nodes %(nodes)s to reach " "target state '%(state)s'" % {'nodes': log_nodes, 'state': expected_state}): nodes = [self.get_node(n) for n in remaining] remaining = [] for n in nodes: try: if n._check_state_reached(self, expected_state, abort_on_failed_state): finished.append(n) else: remaining.append(n) except exceptions.ResourceFailure: if fail: raise else: failed.append(n) if not remaining: if fail: return finished else: return _node.WaitResult(finished, failed, []) self.log.debug( 'Still waiting for nodes %(nodes)s to reach state ' '"%(target)s"', {'nodes': ', '.join(n.id for n in remaining), 'target': expected_state}) except exceptions.ResourceTimeout: if fail: raise else: return _node.WaitResult(finished, failed, remaining)
Wait for the nodes to reach the expected state. :param nodes: List of nodes - name, ID or :class:`~openstack.baremetal.v1.node.Node` instance. :param expected_state: The expected provisioning state to reach. :param timeout: If ``wait`` is set to ``True``, specifies how much (in seconds) to wait for the expected state to be reached. The value of ``None`` (the default) means no client-side timeout. :param abort_on_failed_state: If ``True`` (the default), abort waiting if any node reaches a failure state which does not match the expected one. Note that the failure state for ``enroll`` -> ``manageable`` transition is ``enroll`` again. :param fail: If set to ``False`` this call will not raise on timeouts and provisioning failures. :return: If `fail` is ``True`` (the default), the list of :class:`~openstack.baremetal.v1.node.Node` instances that reached the requested state. If `fail` is ``False``, a :class:`~openstack.baremetal.v1.node.WaitResult` named tuple. :raises: :class:`~openstack.exceptions.ResourceFailure` if a node reaches an error state and ``abort_on_failed_state`` is ``True``. :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout.
https://github.com/openstack/openstacksdk/blob/b38f16e0e8f47f5bdbfd57506869bb6ee2533005/openstack/baremetal/v1/_proxy.py#L432-L501
from openstack.baremetal.v1 import _common from openstack.baremetal.v1 import allocation as _allocation from openstack.baremetal.v1 import chassis as _chassis from openstack.baremetal.v1 import conductor as _conductor from openstack.baremetal.v1 import deploy_templates as _deploytemplates from openstack.baremetal.v1 import driver as _driver from openstack.baremetal.v1 import node as _node from openstack.baremetal.v1 import port as _port from openstack.baremetal.v1 import port_group as _portgroup from openstack.baremetal.v1 import volume_connector as _volumeconnector from openstack.baremetal.v1 import volume_target as _volumetarget from openstack import exceptions from openstack import proxy from openstack import utils class Proxy(proxy.Proxy): retriable_status_codes = _common.RETRIABLE_STATUS_CODES def _get_with_fields(self, resource_type, value, fields=None): res = self._get_resource(resource_type, value) kwargs = {} if fields: kwargs['fields'] = _common.fields_type(fields, resource_type) return res.fetch( self, error_message="No {resource_type} found for {value}".format( resource_type=resource_type.__name__, value=value), **kwargs) def chassis(self, details=False, **query): return _chassis.Chassis.list(self, details=details, **query) def create_chassis(self, **attrs): return self._create(_chassis.Chassis, **attrs) def find_chassis(self, name_or_id, ignore_missing=True): return self._find(_chassis.Chassis, name_or_id, ignore_missing=ignore_missing) def get_chassis(self, chassis, fields=None): return self._get_with_fields(_chassis.Chassis, chassis, fields=fields) def update_chassis(self, chassis, **attrs): return self._update(_chassis.Chassis, chassis, **attrs) def patch_chassis(self, chassis, patch): return self._get_resource(_chassis.Chassis, chassis).patch(self, patch) def delete_chassis(self, chassis, ignore_missing=True): return self._delete(_chassis.Chassis, chassis, ignore_missing=ignore_missing) def drivers(self, details=False, **query): if details: query['details'] = True return self._list(_driver.Driver, **query) def get_driver(self, driver): return self._get(_driver.Driver, driver) def list_driver_vendor_passthru(self, driver): driver = self.get_driver(driver) return driver.list_vendor_passthru(self) def call_driver_vendor_passthru(self, driver, verb: str, method: str, body=None): driver = self.get_driver(driver) return driver.call_vendor_passthru(self, verb, method, body) def nodes(self, details=False, **query): return _node.Node.list(self, details=details, **query) def create_node(self, **attrs): return self._create(_node.Node, **attrs) def find_node(self, name_or_id, ignore_missing=True): return self._find(_node.Node, name_or_id, ignore_missing=ignore_missing) def get_node(self, node, fields=None): return self._get_with_fields(_node.Node, node, fields=fields) def update_node(self, node, retry_on_conflict=True, **attrs): res = self._get_resource(_node.Node, node, **attrs) return res.commit(self, retry_on_conflict=retry_on_conflict) def patch_node(self, node, patch, reset_interfaces=None, retry_on_conflict=True): res = self._get_resource(_node.Node, node) return res.patch(self, patch, retry_on_conflict=retry_on_conflict, reset_interfaces=reset_interfaces) def set_node_provision_state(self, node, target, config_drive=None, clean_steps=None, rescue_password=None, wait=False, timeout=None, deploy_steps=None): res = self._get_resource(_node.Node, node) return res.set_provision_state(self, target, config_drive=config_drive, clean_steps=clean_steps, rescue_password=rescue_password, wait=wait, timeout=timeout, deploy_steps=deploy_steps) def set_node_boot_device(self, node, boot_device, persistent=False): res = self._get_resource(_node.Node, node) return res.set_boot_device(self, boot_device, persistent=persistent) def set_node_boot_mode(self, node, target): res = self._get_resource(_node.Node, node) return res.set_boot_mode(self, target) def set_node_secure_boot(self, node, target): res = self._get_resource(_node.Node, node) return res.set_secure_boot(self, target)
Apache License 2.0
hyperledger/fabric-sdk-py
hfc/fabric/channel/channel.py
Channel._validate_state
python
def _validate_state(self): if not self._initialized: raise ValueError( "Channel {} has not been initialized.".format(self._name))
Validate channel state. :raises ValueError:
https://github.com/hyperledger/fabric-sdk-py/blob/8ee33a8981887e37950dc0f36a7ec63b3a5ba5c3/hfc/fabric/channel/channel.py#L192-L201
import logging import random import sys import re from hashlib import sha256 from hfc.fabric.block_decoder import BlockDecoder from hfc.fabric.transaction.tx_proposal_request import create_tx_prop_req from hfc.protos.common import common_pb2 from hfc.protos.orderer import ab_pb2 from hfc.protos.peer import chaincode_pb2, proposal_pb2 from hfc.protos.discovery import protocol_pb2 from hfc.protos.utils import create_cc_spec, create_seek_info, create_seek_payload, create_envelope from hfc.util import utils from hfc.util.utils import proto_str, current_timestamp, proto_b, build_header, build_channel_header, build_cc_proposal, send_transaction_proposal, pem_to_der from hfc.util.consts import SYSTEM_CHANNEL_NAME, CC_INSTANTIATE, CC_UPGRADE, CC_INVOKE, CC_QUERY, CC_TYPE_GOLANG from .channel_eventhub import ChannelEventHub from hfc.util.collection_config import build_collection_config_proto from hfc.util.policies import build_policy _logger = logging.getLogger(__name__) _logger.setLevel(logging.DEBUG) class Channel(object): def __init__(self, name, client): pat = "^[a-z][a-z0-9.-]*$" if not re.match(pat, name): raise ValueError( "ERROR: Channel name is invalid. It should be a \ string and match {}, but got {}".format(pat, name) ) self._name = name self._client = client self._orderers = {} self._peers = {} self._anchor_peers = [] self._kafka_brokers = [] self._initialized = False self._is_dev_mode = False self._channel_event_hubs = {} def add_orderer(self, orderer): self._orderers[orderer.endpoint] = orderer def remove_orderer(self, orderer): if orderer.endpoint in self._orderers: self._orderers.pop(orderer.endpoint, None) def add_peer(self, peer): self._peers[peer.endpoint] = peer def remove_peer(self, peer): if peer.endpoint in self._peers: self._peers.pop(peer.endpoint, None) @property def orderers(self): return self._orderers @property def peers(self): return self._peers @property def is_dev_mode(self): return self._is_dev_mode @is_dev_mode.setter def is_dev_mode(self, mode): self._is_dev_mode = mode def _get_latest_block(self, tx_context, orderer): seek_info = ab_pb2.SeekInfo() seek_info.start.newest = ab_pb2.SeekNewest() seek_info.stop.newest = ab_pb2.SeekNewest() seek_info.behavior = ab_pb2.SeekInfo.SeekBehavior.Value('BLOCK_UNTIL_READY') seek_info_header = self._build_channel_header( common_pb2.HeaderType.Value('DELIVER_SEEK_INFO'), tx_context.tx_id, self._name, current_timestamp(), tx_context.epoch) signature_header = common_pb2.SignatureHeader() signature_header.creator = tx_context.identity signature_header.nonce = tx_context.nonce seek_payload = common_pb2.Payload() seek_payload.header.signature_header = signature_header.SerializeToString() seek_payload.header.channel_header = seek_info_header.SerializeToString() seek_payload.data = seek_info.SerializeToString() envelope = common_pb2.Envelope() envelope.signature = tx_context.sign(seek_payload.SerializeToString()) envelope.payload = seek_payload.SerializeToString() def _get_random_orderer(self): if sys.version_info < (3, 0): return random.choice(self._orderers.values()) else: return random.choice(list(self._orderers.values())) @property def name(self): return self._name def state_store(self): return self._client.state_store
Apache License 2.0
pyglet/pyglet
pyglet/text/document.py
InlineElement.remove
python
def remove(self, layout): raise NotImplementedError('abstract')
Remove this element from a layout. The counterpart of `place`; called when the element is no longer visible in the given layout. :Parameters: `layout` : `pyglet.text.layout.TextLayout` The layout the element was removed from.
https://github.com/pyglet/pyglet/blob/b9a63ea179735c8f252ac31d51751bdf8a741c9d/pyglet/text/document.py#L246-L257
import re import sys from pyglet import event from pyglet.text import runlist _is_pyglet_doc_run = hasattr(sys, "is_pyglet_doc_run") and sys.is_pyglet_doc_run STYLE_INDETERMINATE = 'indeterminate' class InlineElement: def __init__(self, ascent, descent, advance): self.ascent = ascent self.descent = descent self.advance = advance self._position = None position = property(lambda self: self._position, doc="""Position of the element within the document. Read-only. :type: int """) def place(self, layout, x, y): raise NotImplementedError('abstract')
BSD 3-Clause New or Revised License
hectorpulido/deeplearning-based-twitch-bot
src/Class/ChatbotBrain.py
ChatbotBrain.post_process_text
python
def post_process_text(self, ask): ask = ask.strip() search = re.findall(r"(([A-Z0-9]+\.)+[A-Z0-9]+)", ask, flags=re.IGNORECASE) for match in search: ask = ask.replace(match[0], "") ask = re.sub(r"\W+\?\!\.\,", "", ask) return ask[:500]
Post process the response to avoid links Args: ask (str): response string Returns: str: post processed response string
https://github.com/hectorpulido/deeplearning-based-twitch-bot/blob/9482b394bb96453a2cfcdbfccd2681318174e9ef/src/Class/ChatbotBrain.py#L131-L146
import re from transformers import pipeline, set_seed, MarianMTModel, MarianTokenizer from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer class ChatbotBrain: def __init__( self, context, translation_artifacts_english, translation_artifacts_spanish, model="microsoft/DialoGPT-small", tokenizer="microsoft/DialoGPT-small", translate=True, sentiment_analisis=False, seed=44, ): self.generator = pipeline("text-generation", model=model, tokenizer=tokenizer) self.translate = translate self.context = context self.translation_artifacts_english = translation_artifacts_english self.translation_artifacts_spanish = translation_artifacts_spanish self.sentiment_analisis = sentiment_analisis self.parsed_context = self.generator.tokenizer.eos_token.join( context.split("\n") ) self.temporal_context = [] set_seed(seed) if sentiment_analisis: self.sentiment_engine = SentimentIntensityAnalyzer() if translate: self.model_name_en_t_es = "Helsinki-NLP/opus-mt-en-ROMANCE" self.tokenizer_en_t_es = MarianTokenizer.from_pretrained( self.model_name_en_t_es ) self.model_en_t_es = MarianMTModel.from_pretrained(self.model_name_en_t_es) self.model_name_es_t_en = "Helsinki-NLP/opus-mt-ROMANCE-en" self.tokenizer_es_t_en = MarianTokenizer.from_pretrained( self.model_name_es_t_en ) self.model_es_t_en = MarianMTModel.from_pretrained(self.model_name_es_t_en) def english_to_spanish(self, text): src_text = [">>es<< {}".format(text)] translated = self.model_en_t_es.generate( **self.tokenizer_en_t_es.prepare_seq2seq_batch(src_text, return_tensors="pt", padding=True) ) tgt_text = [ self.tokenizer_en_t_es.decode(t, skip_special_tokens=True) for t in translated ] return tgt_text[0] def spanish_to_english(self, text): src_text = [text] translated = self.model_es_t_en.generate( **self.tokenizer_es_t_en.prepare_seq2seq_batch(src_text, return_tensors="pt", padding=True) ) tgt_text = [ self.tokenizer_es_t_en.decode(t, skip_special_tokens=True) for t in translated ] return tgt_text[0] def replace_translation_artifacts_en_sp(self, text): for word, initial in self.translation_artifacts_spanish.items(): text = text.lower().replace(word.lower(), initial.lower()) return text def replace_translation_artifacts_sp_en(self, text): for word, initial in self.translation_artifacts_english.items(): text = text.lower().replace(word.lower(), initial.lower()) return text
MIT License
mbr/tinyrpc
tinyrpc/client.py
RPCProxy.__getattr__
python
def __getattr__(self, name: str) -> Callable: proxy_func = lambda *args, **kwargs: self.client.call( self.prefix + name, args, kwargs, one_way=self.one_way ) return proxy_func
Returns a proxy function that, when called, will call a function name ``name`` on the client associated with the proxy.
https://github.com/mbr/tinyrpc/blob/0d359315b629ec7c083c5753f310d874b1c710bd/tinyrpc/client.py#L173-L180
import sys from collections import namedtuple from typing import List, Any, Dict, Callable, Optional from .transports import ClientTransport from .exc import RPCError from .protocols import RPCErrorResponse, RPCProtocol, RPCRequest, RPCResponse, RPCBatchResponse RPCCall = namedtuple('RPCCall', 'method args kwargs') RPCCallTo = namedtuple('RPCCallTo', 'transport method args kwargs') class RPCClient(object): def __init__( self, protocol: RPCProtocol, transport: ClientTransport ) -> None: self.protocol = protocol self.transport = transport def _send_and_handle_reply( self, req: RPCRequest, one_way: bool = False, transport: ClientTransport = None, no_exception: bool = False ) -> Optional[RPCResponse]: tport = self.transport if transport is None else transport reply = tport.send_message(req.serialize(), expect_reply=(not one_way)) if one_way: return response = self.protocol.parse_reply(reply) if not no_exception and isinstance(response, RPCErrorResponse): if hasattr(self.protocol, 'raise_error') and callable( self.protocol.raise_error): response = self.protocol.raise_error(response) else: raise RPCError( 'Error calling remote procedure: %s' % response.error ) return response def call( self, method: str, args: List, kwargs: Dict, one_way: bool = False ) -> Any: req = self.protocol.create_request(method, args, kwargs, one_way) rep = self._send_and_handle_reply(req, one_way) if one_way: return return rep.result def call_all(self, requests: List[RPCCall]) -> List[Any]: threads = [] if 'gevent' in sys.modules: import gevent for r in requests: req = self.protocol.create_request(r.method, r.args, r.kwargs) tr = r.transport.transport if len(r) == 4 else None threads.append( gevent.spawn( self._send_and_handle_reply, req, False, tr, True ) ) gevent.joinall(threads) return [t.value for t in threads] else: for r in requests: req = self.protocol.create_request(r.method, r.args, r.kwargs) tr = r.transport.transport if len(r) == 4 else None threads.append( self._send_and_handle_reply(req, False, tr, True) ) return threads def get_proxy(self, prefix: str = '', one_way: bool = False) -> 'RPCProxy': return RPCProxy(self, prefix, one_way) def batch_call(self, calls: List[RPCCallTo]) -> RPCBatchResponse: req = self.protocol.create_batch_request() for call_args in calls: req.append(self.protocol.create_request(*call_args)) return self._send_and_handle_reply(req) class RPCProxy(object): def __init__( self, client: RPCClient, prefix: str = '', one_way: bool = False ) -> None: self.client = client self.prefix = prefix self.one_way = one_way
MIT License
ibm/mi-prometheus
miprometheus/workers/trainer.py
Trainer.setup_experiment
python
def setup_experiment(self): super(Trainer, self).setup_experiment() if self.flags.config == '': print('Please pass configuration file(s) as --c parameter') exit(-1) if self.flags.use_gpu and (torch.cuda.device_count() == 0): self.logger.error("Cannot use GPU as there are no CUDA-compatible devices present in the system!") exit(-2) configs_to_load = self.recurrent_config_parse(self.flags.config, []) self.recurrent_config_load(configs_to_load) conf_str = 'Loaded (initial) configuration:\n' conf_str += '='*80 + '\n' conf_str += yaml.safe_dump(self.params.to_dict(), default_flow_style=False) conf_str += '='*80 + '\n' print(conf_str) try: training_problem_name = self.params['training']['problem']['name'] except KeyError: print("Error: Couldn't retrieve the problem name from the 'training' section in the loaded configuration") exit(-1) try: _ = self.params['validation']['problem']['name'] except KeyError: print("Error: Couldn't retrieve the problem name from the 'validation' section in the loaded configuration") exit(-1) try: model_name = self.params['model']['name'] except KeyError: print("Error: Couldn't retrieve the model name from the loaded configuration") exit(-1) while True: try: time_str = '{0:%Y%m%d_%H%M%S}'.format(datetime.now()) if self.flags.savetag != '': time_str = time_str + "_" + self.flags.savetag self.log_dir = self.flags.expdir + '/' + training_problem_name + '/' + model_name + '/' + time_str + '/' os.makedirs(self.log_dir, exist_ok=False) except FileExistsError: sleep(1) else: break self.log_file = self.log_dir + 'trainer.log' self.add_file_handler_to_logger(self.log_file) self.model_dir = self.log_dir + 'models/' os.makedirs(self.model_dir, exist_ok=False) self.set_random_seeds(self.params['training'], 'training') self.check_and_set_cuda(self.flags.use_gpu) self.training_problem, self.training_sampler, self.training_dataloader = self.build_problem_sampler_loader(self.params['training'], 'training') if 'curriculum_learning' in self.params['training']: self.training_problem.curriculum_learning_initialize(self.params['training']['curriculum_learning']) self.curric_done = self.training_problem.curriculum_learning_update_params(0) self.params['training']['curriculum_learning'].add_default_params({'must_finish': True}) self.must_finish_curriculum = self.params['training']['curriculum_learning']['must_finish'] self.logger.info("Curriculum Learning activated") else: self.must_finish_curriculum = False self.curric_done = True self.validation_problem, self.validations_sampler, self.validation_dataloader = self.build_problem_sampler_loader(self.params['validation'], 'validation') self.validation_batch = next(iter(self.validation_dataloader)) self.model = ModelFactory.build(self.params['model'], self.training_problem.default_values) try: if self.flags.model != "": model_name = self.flags.model msg = "command line (--m)" elif "load" in self.params['model']: model_name = self.params['model']['load'] msg = "model section of the configuration file" else: model_name = "" if model_name != "": if os.path.isfile(model_name): self.model.load(model_name) else: raise Exception("Couldn't load the checkpoint {} indicated in the {}: file does not exist".format(model_name, msg)) except KeyError: self.logger.error("File {} indicated in the {} seems not to be a valid model checkpoint".format(model_name, msg)) exit(-5) except Exception as e: self.logger.error(e) exit(-6) if self.app_state.use_CUDA: self.model.cuda() self.logger.info(self.model.summarize()) optimizer_conf = dict(self.params['training']['optimizer']) optimizer_name = optimizer_conf['name'] del optimizer_conf['name'] self.optimizer = getattr(torch.optim, optimizer_name)(filter(lambda p: p.requires_grad, self.model.parameters()), **optimizer_conf)
Sets up experiment of all trainers: - Calls base class setup_experiment to parse the command line arguments, - Loads the config file(s): >>> configs_to_load = self.recurrent_config_parse(flags.config, []) - Set up the log directory path: >>> os.makedirs(self.log_dir, exist_ok=False) - Add a ``FileHandler`` to the logger: >>> self.add_file_handler_to_logger(self.log_file) - Set random seeds: >>> self.set_random_seeds(self.params['training'], 'training') - Creates training problem and model: >>> self.training_problem = ProblemFactory.build_problem(self.params['training']['problem']) >>> self.model = ModelFactory.build_model(self.params['model'], self.training_problem.default_values) - Creates the DataLoader: >>> self.training_dataloader = DataLoader(dataset=self.training_problem, ...) - Handles curriculum learning if indicated: >>> if 'curriculum_learning' in self.params['training']: >>> ... - Handles the validation of the model: - Creates validation problem & DataLoader - Set optimizer: >>> self.optimizer = getattr(torch.optim, optimizer_name) - Handles TensorBoard writers & files: >>> self.training_writer = SummaryWriter(self.log_dir + '/training')
https://github.com/ibm/mi-prometheus/blob/a8e8a5b339598b0637a251834c560bc24d5a9500/miprometheus/workers/trainer.py#L88-L298
__author__ = "Vincent Marois, Tomasz Kornuta" import os import yaml import torch from time import sleep from random import randrange from datetime import datetime from miprometheus.workers.worker import Worker from miprometheus.models.model_factory import ModelFactory from miprometheus.utils.statistics_collector import StatisticsCollector from miprometheus.utils.statistics_aggregator import StatisticsAggregator class Trainer(Worker): def __init__(self, name="Trainer"): super(Trainer, self).__init__(name) self.parser.add_argument('--tensorboard', action='store', dest='tensorboard', choices=[0, 1, 2], type=int, help="If present, enable logging to TensorBoard. Available log levels:\n" "0: Log the collected statistics.\n" "1: Add the histograms of the model's biases & weights (Warning: Slow).\n" "2: Add the histograms of the model's biases & weights gradients " "(Warning: Even slower).") self.parser.add_argument('--visualize', dest='visualize', default='-1', choices=[-1, 0, 1, 2, 3], type=int, help="Activate dynamic visualization (Warning: will require user interaction):\n" "-1: disabled (DEFAULT)\n" "0: Only during training episodes.\n" "1: During both training and validation episodes.\n" "2: Only during validation episodes.\n" "3: Only during the last validation, after the training is completed.\n")
Apache License 2.0
pidgeot/python-lnp
core/mods.py
simplify_pack
python
def simplify_pack(pack): i = baselines.simplify_pack(pack, 'mods') if not i: i = 0 if i > 10: log.w('Reducing mod "{}": assume vanilla files were omitted ' 'deliberately'.format(pack)) i += make_blank_files(pack) i += baselines.remove_vanilla_raws_from_pack(pack, 'mods') i += baselines.remove_empty_dirs(pack, 'mods') return i
Removes unnecessary files from one mod. Params: pack path segment in './LNP/Mods/pack/' as a string Returns: The sum of files affected by the operations
https://github.com/pidgeot/python-lnp/blob/e738c0003e119e92478c831b94a5fd44a52031c6/core/mods.py#L67-L91
from __future__ import print_function, unicode_literals, absolute_import import sys, os, shutil, glob, time from difflib import ndiff, SequenceMatcher from io import open from . import paths, baselines, log, manifest from .lnp import lnp def _shutil_wrap(fn): def _wrapped_fn(*args, **kwargs): i = 0 while i < 5: try: fn(*args, **kwargs) except: i += 1 time.sleep(0.1) else: break return _wrapped_fn if sys.platform == 'win32': shutil.rmtree = _shutil_wrap(shutil.rmtree) shutil.copytree = _shutil_wrap(shutil.copytree) def toggle_premerge_gfx(): lnp.userconfig['premerge_graphics'] = not lnp.userconfig.get_bool( 'premerge_graphics') lnp.userconfig.save_data() def will_premerge_gfx(): return lnp.userconfig.get_bool('premerge_graphics') def read_mods(): return [os.path.basename(o) for o in glob.glob(paths.get('mods', '*')) if os.path.isdir(o) and manifest.is_compatible('mods', os.path.basename(o))] def get_title(mod): title = manifest.get_cfg('mods', mod).get_string('title') if title: return title return mod def get_tooltip(mod): return manifest.get_cfg('mods', mod).get_string('tooltip') def simplify_mods(): mods, files = 0, 0 for pack in read_mods(): mods += 1 files += simplify_pack(pack) return mods, files
ISC License
pathoschild/stewbot
stewbot/components/modules/mechanize/_form.py
MimeWriter.startbody
python
def startbody(self, ctype=None, plist=[], prefix=1, add_to_http_hdrs=0, content_type=1): if content_type and ctype: for name, value in plist: ctype = ctype + ';\r\n %s=%s' % (name, value) self.addheader("Content-Type", ctype, prefix=prefix, add_to_http_hdrs=add_to_http_hdrs) self.flushheaders() if not add_to_http_hdrs: self._fp.write("\r\n") self._first_part = True return self._fp
prefix is ignored if add_to_http_hdrs is true.
https://github.com/pathoschild/stewbot/blob/8cfcb8378684b0083eb11748f21d1bc74636cdb6/stewbot/components/modules/mechanize/_form.py#L305-L318
__all__ = ['AmbiguityError', 'CheckboxControl', 'Control', 'ControlNotFoundError', 'FileControl', 'FormParser', 'HTMLForm', 'HiddenControl', 'IgnoreControl', 'ImageControl', 'IsindexControl', 'Item', 'ItemCountError', 'ItemNotFoundError', 'Label', 'ListControl', 'LocateError', 'Missing', 'ParseError', 'ParseFile', 'ParseFileEx', 'ParseResponse', 'ParseResponseEx','PasswordControl', 'RadioControl', 'ScalarControl', 'SelectControl', 'SubmitButtonControl', 'SubmitControl', 'TextControl', 'TextareaControl', 'XHTMLCompatibleFormParser'] import HTMLParser from cStringIO import StringIO import inspect import logging import random import re import sys import urllib import urlparse import warnings import _beautifulsoup import _request import sgmllib import _sgmllib_copy VERSION = "0.2.11" CHUNK = 1024 DEFAULT_ENCODING = "latin-1" _logger = logging.getLogger("mechanize.forms") OPTIMIZATION_HACK = True def debug(msg, *args, **kwds): if OPTIMIZATION_HACK: return caller_name = inspect.stack()[1][3] extended_msg = '%%s %s' % msg extended_args = (caller_name,)+args _logger.debug(extended_msg, *extended_args, **kwds) def _show_debug_messages(): global OPTIMIZATION_HACK OPTIMIZATION_HACK = False _logger.setLevel(logging.DEBUG) handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) _logger.addHandler(handler) def deprecation(message, stack_offset=0): warnings.warn(message, DeprecationWarning, stacklevel=3+stack_offset) class Missing: pass _compress_re = re.compile(r"\s+") def compress_text(text): return _compress_re.sub(" ", text.strip()) def normalize_line_endings(text): return re.sub(r"(?:(?<!\r)\n)|(?:\r(?!\n))", "\r\n", text) def unescape(data, entities, encoding=DEFAULT_ENCODING): if data is None or "&" not in data: return data def replace_entities(match, entities=entities, encoding=encoding): ent = match.group() if ent[1] == "#": return unescape_charref(ent[2:-1], encoding) repl = entities.get(ent) if repl is not None: if type(repl) != type(""): try: repl = repl.encode(encoding) except UnicodeError: repl = ent else: repl = ent return repl return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data) def unescape_charref(data, encoding): name, base = data, 10 if name.startswith("x"): name, base= name[1:], 16 uc = unichr(int(name, base)) if encoding is None: return uc else: try: repl = uc.encode(encoding) except UnicodeError: repl = "&#%s;" % data return repl def get_entitydefs(): import htmlentitydefs from codecs import latin_1_decode entitydefs = {} try: htmlentitydefs.name2codepoint except AttributeError: entitydefs = {} for name, char in htmlentitydefs.entitydefs.items(): uc = latin_1_decode(char)[0] if uc.startswith("&#") and uc.endswith(";"): uc = unescape_charref(uc[2:-1], None) entitydefs["&%s;" % name] = uc else: for name, codepoint in htmlentitydefs.name2codepoint.items(): entitydefs["&%s;" % name] = unichr(codepoint) return entitydefs def issequence(x): try: x[0] except (TypeError, KeyError): return False except IndexError: pass return True def isstringlike(x): try: x+"" except: return False else: return True def choose_boundary(): nonce = "".join([str(random.randint(0, sys.maxint-1)) for i in 0,1,2]) return "-"*27 + nonce class MimeWriter: def __init__(self, fp, http_hdrs=None): self._http_hdrs = http_hdrs self._fp = fp self._headers = [] self._boundary = [] self._first_part = True def addheader(self, key, value, prefix=0, add_to_http_hdrs=0): lines = value.split("\r\n") while lines and not lines[-1]: del lines[-1] while lines and not lines[0]: del lines[0] if add_to_http_hdrs: value = "".join(lines) self._http_hdrs.append((key.capitalize(), value)) else: for i in range(1, len(lines)): lines[i] = " " + lines[i].strip() value = "\r\n".join(lines) + "\r\n" line = key.title() + ": " + value if prefix: self._headers.insert(0, line) else: self._headers.append(line) def flushheaders(self): self._fp.writelines(self._headers) self._headers = []
ISC License
nuagenetworks/vspk-python
vspk/v6/numacfilterprofile.py
NUMACFilterProfile.customer_id
python
def customer_id(self): return self._customer_id
Get customer_id value. Notes: The customer ID given to parent enterprise. This is used by Netconf/Config manager. This attribute is named `customerID` in VSD API.
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v6/numacfilterprofile.py#L328-L338
from .fetchers import NUPermissionsFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from bambou import NURESTObject class NUMACFilterProfile(NURESTObject): __rest_name__ = "macfilterprofile" __resource_name__ = "macfilterprofiles" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" def __init__(self, **kwargs): super(NUMACFilterProfile, self).__init__() self._name = None self._last_updated_by = None self._last_updated_date = None self._description = None self._embedded_metadata = None self._entity_scope = None self._creation_date = None self._assoc_entity_type = None self._customer_id = None self._owner = None self._external_id = None self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="assoc_entity_type", remote_name="assocEntityType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="customer_id", remote_name="customerID", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def last_updated_by(self): return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): self._last_updated_by = value @property def last_updated_date(self): return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): self._last_updated_date = value @property def description(self): return self._description @description.setter def description(self, value): self._description = value @property def embedded_metadata(self): return self._embedded_metadata @embedded_metadata.setter def embedded_metadata(self, value): self._embedded_metadata = value @property def entity_scope(self): return self._entity_scope @entity_scope.setter def entity_scope(self, value): self._entity_scope = value @property def creation_date(self): return self._creation_date @creation_date.setter def creation_date(self, value): self._creation_date = value @property def assoc_entity_type(self): return self._assoc_entity_type @assoc_entity_type.setter def assoc_entity_type(self, value): self._assoc_entity_type = value @property
BSD 3-Clause New or Revised License
rebiocoder/bioforum
venv/Lib/site-packages/django/template/loaders/cached.py
Loader.reset
python
def reset(self): self.template_cache.clear() self.get_template_cache.clear()
Empty the template cache.
https://github.com/rebiocoder/bioforum/blob/08c8ff2f07ae667d37ce343f537e878d78ac8fe2/venv/Lib/site-packages/django/template/loaders/cached.py#L93-L96
import hashlib from django.template import TemplateDoesNotExist from django.template.backends.django import copy_exception from django.utils.encoding import force_bytes from .base import Loader as BaseLoader class Loader(BaseLoader): def __init__(self, engine, loaders): self.template_cache = {} self.get_template_cache = {} self.loaders = engine.get_template_loaders(loaders) super().__init__(engine) def get_contents(self, origin): return origin.loader.get_contents(origin) def get_template(self, template_name, skip=None): key = self.cache_key(template_name, skip) cached = self.get_template_cache.get(key) if cached: if isinstance(cached, type) and issubclass(cached, TemplateDoesNotExist): raise cached(template_name) elif isinstance(cached, TemplateDoesNotExist): raise copy_exception(cached) return cached try: template = super().get_template(template_name, skip) except TemplateDoesNotExist as e: self.get_template_cache[key] = copy_exception(e) if self.engine.debug else TemplateDoesNotExist raise else: self.get_template_cache[key] = template return template def get_template_sources(self, template_name): for loader in self.loaders: yield from loader.get_template_sources(template_name) def cache_key(self, template_name, skip=None): dirs_prefix = '' skip_prefix = '' if skip: matching = [origin.name for origin in skip if origin.template_name == template_name] if matching: skip_prefix = self.generate_hash(matching) return '-'.join(s for s in (str(template_name), skip_prefix, dirs_prefix) if s) def generate_hash(self, values): return hashlib.sha1(force_bytes('|'.join(values))).hexdigest()
MIT License
roseou/flasky
venv/lib/python2.7/site-packages/coverage/templite.py
Templite.do_dots
python
def do_dots(self, value, *dots): for dot in dots: try: value = getattr(value, dot) except AttributeError: value = value[dot] if hasattr(value, '__call__'): value = value() return value
Evaluate dotted expressions at runtime.
https://github.com/roseou/flasky/blob/0ebf366ddfe9604acfba99756f69a6d63063b3f9/venv/lib/python2.7/site-packages/coverage/templite.py#L199-L208
import re from coverage.backward import set class CodeBuilder(object): def __init__(self, indent=0): self.code = [] self.indent_amount = indent def add_line(self, line): self.code.append(" " * self.indent_amount) self.code.append(line) self.code.append("\n") def add_section(self): sect = CodeBuilder(self.indent_amount) self.code.append(sect) return sect def indent(self): self.indent_amount += 4 def dedent(self): self.indent_amount -= 4 def __str__(self): return "".join([str(c) for c in self.code]) def get_function(self, fn_name): assert self.indent_amount == 0 g = {} code_text = str(self) exec(code_text, g) return g[fn_name] class Templite(object): def __init__(self, text, *contexts): self.text = text self.context = {} for context in contexts: self.context.update(context) code = CodeBuilder() code.add_line("def render(ctx, dot):") code.indent() vars_code = code.add_section() self.all_vars = set() self.loop_vars = set() code.add_line("result = []") code.add_line("a = result.append") code.add_line("e = result.extend") code.add_line("s = str") buffered = [] def flush_output(): if len(buffered) == 1: code.add_line("a(%s)" % buffered[0]) elif len(buffered) > 1: code.add_line("e([%s])" % ",".join(buffered)) del buffered[:] toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) ops_stack = [] for tok in toks: if tok.startswith('{{'): buffered.append("s(%s)" % self.expr_code(tok[2:-2].strip())) elif tok.startswith('{#'): continue elif tok.startswith('{%'): flush_output() words = tok[2:-2].strip().split() if words[0] == 'if': assert len(words) == 2 ops_stack.append('if') code.add_line("if %s:" % self.expr_code(words[1])) code.indent() elif words[0] == 'for': assert len(words) == 4 and words[2] == 'in' ops_stack.append('for') self.loop_vars.add(words[1]) code.add_line( "for c_%s in %s:" % ( words[1], self.expr_code(words[3]) ) ) code.indent() elif words[0].startswith('end'): end_what = words[0][3:] if ops_stack[-1] != end_what: raise SyntaxError("Mismatched end tag: %r" % end_what) ops_stack.pop() code.dedent() else: raise SyntaxError("Don't understand tag: %r" % words[0]) else: if tok: buffered.append("%r" % tok) flush_output() for var_name in self.all_vars - self.loop_vars: vars_code.add_line("c_%s = ctx[%r]" % (var_name, var_name)) if ops_stack: raise SyntaxError("Unmatched action tag: %r" % ops_stack[-1]) code.add_line("return ''.join(result)") code.dedent() self.render_function = code.get_function('render') def expr_code(self, expr): if "|" in expr: pipes = expr.split("|") code = self.expr_code(pipes[0]) for func in pipes[1:]: self.all_vars.add(func) code = "c_%s(%s)" % (func, code) elif "." in expr: dots = expr.split(".") code = self.expr_code(dots[0]) args = [repr(d) for d in dots[1:]] code = "dot(%s, %s)" % (code, ", ".join(args)) else: self.all_vars.add(expr) code = "c_%s" % expr return code def render(self, context=None): ctx = dict(self.context) if context: ctx.update(context) return self.render_function(ctx, self.do_dots)
MIT License
citrineinformatics/citrine-python
src/citrine/resources/gemd_resource.py
GEMDResourceCollection.get_type
python
def get_type(cls) -> Type[DataConcepts]: return DataConcepts
Return the resource type in the collection.
https://github.com/citrineinformatics/citrine-python/blob/d164744c99b17f935a09935c2e57d2f056675477/src/citrine/resources/gemd_resource.py#L26-L28
from typing import Type, Union, Optional, List, Tuple from uuid import UUID from gemd.entity.base_entity import BaseEntity from gemd.entity.link_by_uid import LinkByUID from citrine.resources.api_error import ApiError from citrine.resources.data_concepts import DataConcepts, DataConceptsCollection from citrine.resources.delete import _async_gemd_batch_delete from citrine._session import Session class GEMDResourceCollection(DataConceptsCollection[DataConcepts]): _path_template = 'projects/{project_id}/storables' _dataset_agnostic_path_template = 'projects/{project_id}/storables' def __init__(self, project_id: UUID, dataset_id: UUID, session: Session): self.project_id = project_id self.dataset_id = dataset_id self.session = session @classmethod
Apache License 2.0
cleverhans-lab/cleverhans
cleverhans_v3.1.0/cleverhans/attack_bundling.py
basic_max_confidence_recipe
python
def basic_max_confidence_recipe( sess, model, x, y, nb_classes, eps, clip_min, clip_max, eps_iter, nb_iter, report_path, batch_size=BATCH_SIZE, eps_iter_small=None, ): noise_attack = Noise(model, sess) pgd_attack = ProjectedGradientDescent(model, sess) threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max} noise_attack_config = AttackConfig(noise_attack, threat_params) attack_configs = [noise_attack_config] pgd_attack_configs = [] pgd_params = copy.copy(threat_params) pgd_params["eps_iter"] = eps_iter pgd_params["nb_iter"] = nb_iter assert batch_size % num_devices == 0 dev_batch_size = batch_size // num_devices ones = tf.ones(dev_batch_size, tf.int32) expensive_pgd = [] if eps_iter_small is None: eps_iter_small = eps_iter / 25.0 for cls in range(nb_classes): cls_params = copy.copy(pgd_params) cls_params["y_target"] = tf.to_float(tf.one_hot(ones * cls, nb_classes)) cls_attack_config = AttackConfig(pgd_attack, cls_params, "pgd_" + str(cls)) pgd_attack_configs.append(cls_attack_config) expensive_params = copy.copy(cls_params) expensive_params["eps_iter"] = eps_iter_small expensive_params["nb_iter"] *= 25.0 expensive_config = AttackConfig( pgd_attack, expensive_params, "expensive_pgd_" + str(cls) ) expensive_pgd.append(expensive_config) attack_configs = [noise_attack_config] + pgd_attack_configs + expensive_pgd new_work_goal = {config: 5 for config in attack_configs} pgd_work_goal = {config: 5 for config in pgd_attack_configs} goals = [ Misclassify(new_work_goal={noise_attack_config: 50}), Misclassify(new_work_goal=pgd_work_goal), MaxConfidence(t=0.5, new_work_goal=new_work_goal), MaxConfidence(t=0.75, new_work_goal=new_work_goal), MaxConfidence(t=0.875, new_work_goal=new_work_goal), MaxConfidence(t=0.9375, new_work_goal=new_work_goal), MaxConfidence(t=0.96875, new_work_goal=new_work_goal), MaxConfidence(t=0.984375, new_work_goal=new_work_goal), MaxConfidence(t=1.0), ] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path)
A reasonable attack bundling recipe for a max norm threat model and a defender that uses confidence thresholding. References: https://openreview.net/forum?id=H1g0piA9tQ This version runs indefinitely, updating the report on disk continuously. :param sess: tf.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param nb_classes: int, number of classes :param eps: float, maximum size of perturbation (measured by max norm) :param eps_iter: float, step size for one version of PGD attacks (will also run another version with eps_iter_small) :param nb_iter: int, number of iterations for one version of PGD attacks (will also run another version with 25X more iterations) :param report_path: str, the path that the report will be saved to. :batch_size: int, the total number of examples to run simultaneously :param eps_iter_small: optional, float. The second version of the PGD attack is run with 25 * nb_iter iterations and eps_iter_small step size. If eps_iter_small is not specified it is set to eps_iter / 25.
https://github.com/cleverhans-lab/cleverhans/blob/4aed4be702be5ce13d5017b8a3c6a2cdc4fc0009/cleverhans_v3.1.0/cleverhans/attack_bundling.py#L130-L212
import copy import logging import time import numpy as np import six from six.moves import range import tensorflow as tf from cleverhans.attacks import Noise from cleverhans.attacks import ProjectedGradientDescent from cleverhans.attacks import SPSA from cleverhans.evaluation import correctness_and_confidence from cleverhans.evaluation import batch_eval_multi_worker, run_attack from cleverhans.model import Model from cleverhans import serial from cleverhans.utils import create_logger, deep_copy, safe_zip from cleverhans.utils_tf import infer_devices from cleverhans.confidence_report import ConfidenceReport from cleverhans.confidence_report import ConfidenceReportEntry from cleverhans.confidence_report import print_stats _logger = create_logger("attack_bundling") _logger.setLevel(logging.INFO) devices = infer_devices() num_devices = len(devices) DEFAULT_EXAMPLES_PER_DEVICE = 128 BATCH_SIZE = DEFAULT_EXAMPLES_PER_DEVICE * num_devices REPORT_TIME_INTERVAL = 60 def single_run_max_confidence_recipe( sess, model, x, y, nb_classes, eps, clip_min, clip_max, eps_iter, nb_iter, report_path, batch_size=BATCH_SIZE, eps_iter_small=None, ): noise_attack = Noise(model, sess) pgd_attack = ProjectedGradientDescent(model, sess) threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max} noise_attack_config = AttackConfig(noise_attack, threat_params, "noise") attack_configs = [noise_attack_config] pgd_attack_configs = [] pgd_params = copy.copy(threat_params) pgd_params["eps_iter"] = eps_iter pgd_params["nb_iter"] = nb_iter assert batch_size % num_devices == 0 dev_batch_size = batch_size // num_devices ones = tf.ones(dev_batch_size, tf.int32) expensive_pgd = [] if eps_iter_small is None: eps_iter_small = eps_iter / 25.0 for cls in range(nb_classes): cls_params = copy.copy(pgd_params) cls_params["y_target"] = tf.to_float(tf.one_hot(ones * cls, nb_classes)) cls_attack_config = AttackConfig(pgd_attack, cls_params, "pgd_" + str(cls)) pgd_attack_configs.append(cls_attack_config) expensive_params = copy.copy(cls_params) expensive_params["eps_iter"] = eps_iter_small expensive_params["nb_iter"] *= 25.0 expensive_config = AttackConfig( pgd_attack, expensive_params, "expensive_pgd_" + str(cls) ) expensive_pgd.append(expensive_config) attack_configs = [noise_attack_config] + pgd_attack_configs + expensive_pgd new_work_goal = {config: 1 for config in attack_configs} goals = [MaxConfidence(t=1.0, new_work_goal=new_work_goal)] bundle_attacks( sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=batch_size, eval_batch_size=batch_size, )
MIT License
deepset-ai/haystack
haystack/document_store/base.py
BaseDocumentStore._drop_duplicate_documents
python
def _drop_duplicate_documents(self, documents: List[Document]) -> List[Document]: _hash_ids: list = [] _documents: List[Document] = [] for document in documents: if document.id in _hash_ids: logger.warning(f"Duplicate Documents: Document with id '{document.id}' already exists in index " f"'{self.index}'") continue _documents.append(document) _hash_ids.append(document.id) return _documents
Drop duplicates documents based on same hash ID :param documents: A list of Haystack Document objects. :return: A list of Haystack Document objects.
https://github.com/deepset-ai/haystack/blob/38652dd4dd3e5f4b66bbf70def84220f723add2c/haystack/document_store/base.py#L264-L282
import logging import collections from abc import abstractmethod from pathlib import Path from typing import Optional, Dict, List, Union import numpy as np from haystack import Document, Label, MultiLabel, BaseComponent from haystack.errors import DuplicateDocumentError from haystack.preprocessor.preprocessor import PreProcessor from haystack.preprocessor.utils import eval_data_from_json, eval_data_from_jsonl, squad_json_to_jsonl logger = logging.getLogger(__name__) class BaseDocumentStore(BaseComponent): index: Optional[str] label_index: Optional[str] similarity: Optional[str] duplicate_documents_options: tuple = ('skip', 'overwrite', 'fail') ids_iterator = None @abstractmethod def write_documents(self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, batch_size: int = 10_000, duplicate_documents: Optional[str] = None): pass @abstractmethod def get_all_documents( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, return_embedding: Optional[bool] = None ) -> List[Document]: pass def __iter__(self): if not self.ids_iterator: self.ids_iterator = [x.id for x in self.get_all_documents()] return self def __next__(self): if len(self.ids_iterator) == 0: raise StopIteration else: curr_id = self.ids_iterator[0] ret = self.get_document_by_id(curr_id) self.ids_iterator = self.ids_iterator[1:] return ret @abstractmethod def get_all_labels(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) -> List[Label]: pass def get_all_labels_aggregated(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, open_domain: bool=True, drop_negative_labels: bool=False, drop_no_answers: bool=False, aggregate_by_meta: Optional[Union[str, list]]=None) -> List[MultiLabel]: aggregated_labels = [] all_labels = self.get_all_labels(index=index, filters=filters) question_ans_dict: dict = {} for l in all_labels: group_by_id_list: list = [] if open_domain: group_by_id_list = [l.query] else: group_by_id_list = [l.document.id, l.query] if aggregate_by_meta: if type(aggregate_by_meta) == str: aggregate_by_meta = [aggregate_by_meta] if l.meta is None: l.meta = {} for meta_key in aggregate_by_meta: curr_meta = l.meta.get(meta_key, None) if curr_meta: group_by_id_list.append(curr_meta) group_by_id = tuple(group_by_id_list) if group_by_id in question_ans_dict: question_ans_dict[group_by_id].append(l) else: question_ans_dict[group_by_id] = [l] for q, ls in question_ans_dict.items(): agg_label = MultiLabel(labels=ls, drop_negative_labels=drop_negative_labels, drop_no_answers=drop_no_answers) aggregated_labels.append(agg_label) return aggregated_labels @abstractmethod def get_document_by_id(self, id: str, index: Optional[str] = None) -> Optional[Document]: pass @abstractmethod def get_document_count(self, filters: Optional[Dict[str, List[str]]] = None, index: Optional[str] = None) -> int: pass @abstractmethod def query_by_embedding(self, query_emb: np.ndarray, filters: Optional[Optional[Dict[str, List[str]]]] = None, top_k: int = 10, index: Optional[str] = None, return_embedding: Optional[bool] = None) -> List[Document]: pass @abstractmethod def get_label_count(self, index: Optional[str] = None) -> int: pass @abstractmethod def write_labels(self, labels: Union[List[Label], List[dict]], index: Optional[str] = None): pass def add_eval_data(self, filename: str, doc_index: str = "eval_document", label_index: str = "label", batch_size: Optional[int] = None, preprocessor: Optional[PreProcessor] = None, max_docs: Union[int, bool] = None, open_domain: bool = False): if preprocessor is not None: assert preprocessor.split_by != "sentence", f"Split by sentence not supported.\n" f"Please set 'split_by' to either 'word' or 'passage' in the supplied PreProcessor." assert preprocessor.split_respect_sentence_boundary == False, f"split_respect_sentence_boundary not supported yet.\n" f"Please set 'split_respect_sentence_boundary' to False in the supplied PreProcessor." assert preprocessor.split_overlap == 0, f"Overlapping documents are currently not supported when adding eval data.\n" f"Please set 'split_overlap=0' in the supplied PreProcessor." assert preprocessor.clean_empty_lines == False, f"clean_empty_lines currently not supported when adding eval data.\n" f"Please set 'clean_empty_lines=False' in the supplied PreProcessor." assert preprocessor.clean_whitespace == False, f"clean_whitespace is currently not supported when adding eval data.\n" f"Please set 'clean_whitespace=False' in the supplied PreProcessor." assert preprocessor.clean_header_footer == False, f"clean_header_footer is currently not supported when adding eval data.\n" f"Please set 'clean_header_footer=False' in the supplied PreProcessor." file_path = Path(filename) if file_path.suffix == ".json": if batch_size is None: docs, labels = eval_data_from_json(filename, max_docs=max_docs, preprocessor=preprocessor, open_domain=open_domain) self.write_documents(docs, index=doc_index) self.write_labels(labels, index=label_index) else: jsonl_filename = (file_path.parent / (file_path.stem + '.jsonl')).as_posix() logger.info(f"Adding evaluation data batch-wise is not compatible with json-formatted SQuAD files. " f"Converting json to jsonl to: {jsonl_filename}") squad_json_to_jsonl(filename, jsonl_filename) self.add_eval_data(jsonl_filename, doc_index, label_index, batch_size, open_domain=open_domain) elif file_path.suffix == ".jsonl": for docs, labels in eval_data_from_jsonl(filename, batch_size, max_docs=max_docs, preprocessor=preprocessor, open_domain=open_domain): if docs: self.write_documents(docs, index=doc_index) if labels: self.write_labels(labels, index=label_index) else: logger.error("File needs to be in json or jsonl format.") def delete_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None): pass @abstractmethod def delete_documents(self, index: Optional[str] = None, ids: Optional[List[str]] = None, filters: Optional[Dict[str, List[str]]] = None): pass @abstractmethod def delete_labels(self, index: Optional[str] = None, ids: Optional[List[str]] = None, filters: Optional[Dict[str, List[str]]] = None): pass def run(self, documents: List[dict], index: Optional[str] = None): self.write_documents(documents=documents, index=index) return {}, "output_1" @abstractmethod def get_documents_by_id(self, ids: List[str], index: Optional[str] = None, batch_size: int = 10_000) -> List[Document]: pass
Apache License 2.0
solid-mechanics/matplotlib-4-abaqus
matplotlib/mlab.py
PCA.__init__
python
def __init__(self, a): n, m = a.shape if n<m: raise RuntimeError('we assume data in a is organized with numrows>numcols') self.numrows, self.numcols = n, m self.mu = a.mean(axis=0) self.sigma = a.std(axis=0) a = self.center(a) self.a = a U, s, Vh = np.linalg.svd(a, full_matrices=False) Y = np.dot(Vh, a.T).T vars = s**2/float(len(s)) self.fracs = vars/vars.sum() self.Wt = Vh self.Y = Y
compute the SVD of a and store data for PCA. Use project to project the data onto a reduced set of dimensions Inputs: *a*: a numobservations x numdims array Attrs: *a* a centered unit sigma version of input a *numrows*, *numcols*: the dimensions of a *mu* : a numdims array of means of a *sigma* : a numdims array of atandard deviation of a *fracs* : the proportion of variance of each of the principal components *Wt* : the weight vector for projecting a numdims point or array into PCA space *Y* : a projected into PCA space The factor loadings are in the Wt factor, ie the factor loadings for the 1st principal component are given by Wt[0]
https://github.com/solid-mechanics/matplotlib-4-abaqus/blob/1117070fb824210c217c564ac36e69112ce70501/matplotlib/mlab.py#L815-L867
from __future__ import division, print_function import csv, warnings, copy, os, operator from itertools import izip import numpy as np ma = np.ma from matplotlib import verbose import matplotlib.cbook as cbook from matplotlib import docstring from matplotlib.path import Path def logspace(xmin,xmax,N): return np.exp(np.linspace(np.log(xmin), np.log(xmax), N)) def _norm(x): return np.sqrt(np.dot(x,x)) def window_hanning(x): return np.hanning(len(x))*x def window_none(x): return x def detrend(x, key=None): if key is None or key=='constant': return detrend_mean(x) elif key=='linear': return detrend_linear(x) def demean(x, axis=0): x = np.asarray(x) if axis == 0 or axis is None or x.ndim <= 1: return x - x.mean(axis) ind = [slice(None)] * x.ndim ind[axis] = np.newaxis return x - x.mean(axis)[ind] def detrend_mean(x): return x - x.mean() def detrend_none(x): return x def detrend_linear(y): x = np.arange(len(y), dtype=np.float_) C = np.cov(x, y, bias=1) b = C[0,1]/C[0,0] a = y.mean() - b*x.mean() return y - (b*x + a) def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): same_data = y is x x = np.asarray(x) if not same_data: y = np.asarray(y) else: y = x if len(x)<NFFT: n = len(x) x = np.resize(x, (NFFT,)) x[n:] = 0 if not same_data and len(y)<NFFT: n = len(y) y = np.resize(y, (NFFT,)) y[n:] = 0 if pad_to is None: pad_to = NFFT if scale_by_freq is None: scale_by_freq = True if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided': numFreqs = pad_to scaling_factor = 1. elif sides in ('default', 'onesided'): numFreqs = pad_to//2 + 1 scaling_factor = 2. else: raise ValueError("sides must be one of: 'default', 'onesided', or " "'twosided'") if cbook.iterable(window): assert(len(window) == NFFT) windowVals = window else: windowVals = window(np.ones((NFFT,), x.dtype)) step = NFFT - noverlap ind = np.arange(0, len(x) - NFFT + 1, step) n = len(ind) Pxy = np.zeros((numFreqs, n), np.complex_) for i in range(n): thisX = x[ind[i]:ind[i]+NFFT] thisX = windowVals * detrend(thisX) fx = np.fft.fft(thisX, n=pad_to) if same_data: fy = fx else: thisY = y[ind[i]:ind[i]+NFFT] thisY = windowVals * detrend(thisY) fy = np.fft.fft(thisY, n=pad_to) Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs] Pxy /= (np.abs(windowVals)**2).sum() Pxy[1:-1] *= scaling_factor if scale_by_freq: Pxy /= Fs t = 1./Fs * (ind + NFFT / 2.) freqs = float(Fs) / pad_to * np.arange(numFreqs) if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided': freqs = np.concatenate((freqs[numFreqs//2:] - Fs, freqs[:numFreqs//2])) Pxy = np.concatenate((Pxy[numFreqs//2:, :], Pxy[:numFreqs//2, :]), 0) return Pxy, freqs, t docstring.interpd.update(PSD=cbook.dedent(""" Keyword arguments: *NFFT*: integer The number of data points used in each block for the FFT. Must be even; a power 2 is most efficient. The default value is 256. This should *NOT* be used to get zero padding, or the scaling of the result will be incorrect. Use *pad_to* for this instead. *Fs*: scalar The sampling frequency (samples per time unit). It is used to calculate the Fourier frequencies, freqs, in cycles per time unit. The default value is 2. *detrend*: callable The function applied to each segment before fft-ing, designed to remove the mean or linear trend. Unlike in MATLAB, where the *detrend* parameter is a vector, in matplotlib is it a function. The :mod:`~matplotlib.pylab` module defines :func:`~matplotlib.pylab.detrend_none`, :func:`~matplotlib.pylab.detrend_mean`, and :func:`~matplotlib.pylab.detrend_linear`, but you can use a custom function as well. *window*: callable or ndarray A function or a vector of length *NFFT*. To create window vectors see :func:`window_hanning`, :func:`window_none`, :func:`numpy.blackman`, :func:`numpy.hamming`, :func:`numpy.bartlett`, :func:`scipy.signal`, :func:`scipy.signal.get_window`, etc. The default is :func:`window_hanning`. If a function is passed as the argument, it must take a data segment as an argument and return the windowed version of the segment. *pad_to*: integer The number of points to which the data segment is padded when performing the FFT. This can be different from *NFFT*, which specifies the number of data points used. While not increasing the actual resolution of the psd (the minimum distance between resolvable peaks), this can give more points in the plot, allowing for more detail. This corresponds to the *n* parameter in the call to fft(). The default is None, which sets *pad_to* equal to *NFFT* *sides*: [ 'default' | 'onesided' | 'twosided' ] Specifies which sides of the PSD to return. Default gives the default behavior, which returns one-sided for real data and both for complex data. 'onesided' forces the return of a one-sided PSD, while 'twosided' forces two-sided. *scale_by_freq*: boolean Specifies whether the resulting density values should be scaled by the scaling frequency, which gives density in units of Hz^-1. This allows for integration over the returned frequency values. The default is True for MATLAB compatibility. """)) @docstring.dedent_interpd def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) return Pxx.real,freqs @docstring.dedent_interpd def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) if len(Pxy.shape) == 2 and Pxy.shape[1]>1: Pxy = Pxy.mean(axis=1) return Pxy, freqs @docstring.dedent_interpd def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=128, pad_to=None, sides='default', scale_by_freq=None): assert(NFFT > noverlap) Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Pxx = Pxx.real return Pxx, freqs, t _coh_error = """Coherence is calculated by averaging over *NFFT* length segments. Your signal is too short for your choice of *NFFT*. """ @docstring.dedent_interpd def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): if len(x)<2*NFFT: raise ValueError(_coh_error) Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy) Cxy.shape = (len(f),) return Cxy, f def donothing_callback(*args): pass def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, preferSpeedOverMemory=True, progressCallback=donothing_callback, returnPxx=False): numRows, numCols = X.shape if numRows < NFFT: tmp = X X = np.zeros( (NFFT, numCols), X.dtype) X[:numRows,:] = tmp del tmp numRows, numCols = X.shape allColumns = set() for i,j in ij: allColumns.add(i); allColumns.add(j) Ncols = len(allColumns) if np.iscomplexobj(X): numFreqs = NFFT else: numFreqs = NFFT//2+1 if cbook.iterable(window): assert(len(window) == NFFT) windowVals = window else: windowVals = window(np.ones(NFFT, X.dtype)) ind = range(0, numRows-NFFT+1, NFFT-noverlap) numSlices = len(ind) FFTSlices = {} FFTConjSlices = {} Pxx = {} slices = range(numSlices) normVal = np.linalg.norm(windowVals)**2 for iCol in allColumns: progressCallback(i/Ncols, 'Cacheing FFTs') Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_) for iSlice in slices: thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol] thisSlice = windowVals*detrend(thisSlice) Slices[iSlice,:] = np.fft.fft(thisSlice)[:numFreqs] FFTSlices[iCol] = Slices if preferSpeedOverMemory: FFTConjSlices[iCol] = np.conjugate(Slices) Pxx[iCol] = np.divide(np.mean(abs(Slices)**2, axis=0), normVal) del Slices, ind, windowVals Cxy = {} Phase = {} count = 0 N = len(ij) for i,j in ij: count +=1 if count%10==0: progressCallback(count/N, 'Computing coherences') if preferSpeedOverMemory: Pxy = FFTSlices[i] * FFTConjSlices[j] else: Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j]) if numSlices>1: Pxy = np.mean(Pxy, axis=0) Pxy /= normVal Cxy[i,j] = abs(Pxy)**2 / (Pxx[i]*Pxx[j]) Phase[i,j] = np.arctan2(Pxy.imag, Pxy.real) freqs = Fs/NFFT*np.arange(numFreqs) if returnPxx: return Cxy, Phase, freqs, Pxx else: return Cxy, Phase, freqs def entropy(y, bins): n, bins = np.histogram(y, bins) n = n.astype(np.float_) n = np.take(n, np.nonzero(n)[0]) p = np.divide(n, len(y)) delta = bins[1] - bins[0] S = -1.0 * np.sum(p * np.log(p)) + np.log(delta) return S def normpdf(x, *args): mu, sigma = args return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2) def levypdf(x, gamma, alpha): N = len(x) if N % 2 != 0: raise ValueError('x must be an event length array; try\n' + 'x = np.linspace(minx, maxx, N), where N is even') dx = x[1] - x[0] f = 1/(N*dx)*np.arange(-N / 2, N / 2, np.float_) ind = np.concatenate([np.arange(N / 2, N, int), np.arange(0, N / 2, int)]) df = f[1] - f[0] cfl = np.exp(-gamma * np.absolute(2 * np.pi * f) ** alpha) px = np.fft.fft(np.take(cfl, ind) * df).astype(np.float_) return np.take(px, ind) def find(condition): res, = np.nonzero(np.ravel(condition)) return res def longest_contiguous_ones(x): x = np.ravel(x) if len(x)==0: return np.array([]) ind = (x==0).nonzero()[0] if len(ind)==0: return np.arange(len(x)) if len(ind)==len(x): return np.array([]) y = np.zeros( (len(x)+2,), x.dtype) y[1:-1] = x dif = np.diff(y) up = (dif == 1).nonzero()[0]; dn = (dif == -1).nonzero()[0]; i = (dn-up == max(dn - up)).nonzero()[0][0] ind = np.arange(up[i], dn[i]) return ind def longest_ones(x): return longest_contiguous_ones(x) def prepca(P, frac=0): warnings.warn('This function is deprecated -- see class PCA instead') U,s,v = np.linalg.svd(P) varEach = s**2/P.shape[1] totVar = varEach.sum() fracVar = varEach/totVar ind = slice((fracVar>=frac).sum()) Trans = U[:,ind].transpose() Pcomponents = np.dot(Trans,P) return Pcomponents, Trans, fracVar[ind] class PCA:
MIT License
codeeu/coding-events
api/processors.py
get_created_events
python
def get_created_events( creator, limit=None, order=None, country_code=None, past=False): events = Event.objects.filter(creator=creator) if not past: events = events.filter(end_date__gte=datetime.datetime.now()) if country_code: events = events.filter(country=country_code) if order: events = events.order_by(order) if limit: events = events = events[:limit] return events
Select all future or past events which are created by user and optionally limit and/or order them
https://github.com/codeeu/coding-events/blob/9e34a937c4503f8fb6e406142bd4ef43075ad6dc/api/processors.py#L133-L152
import datetime from django.db.models import Q from models.events import Event from models.events import EventTheme from models.events import EventAudience def get_all_events(): return Event.objects.all() def get_event_by_id(event_id): event = Event.objects.get(id=event_id) return event def get_approved_events(limit=None, order=None, country_code=None, past=False): events = Event.objects.filter(status='APPROVED') if not past: events = events.filter(end_date__gte=datetime.datetime.now()) if country_code: events = events.filter(country=country_code) if order: events = events.order_by(order) if limit: events = events[:limit] return events def get_event_detail(id): events = Event.objects.filter(id=id) return events def get_pending_events(limit=None, order=None, country_code=None, past=False): events = Event.objects.filter(status='PENDING') if not past: events = events.filter(end_date__gte=datetime.datetime.now()) if country_code: events = events.filter(country=country_code) if order: events = events.order_by(order) if limit: events = events[:limit] return events def get_next_or_previous(event, country_code=None, past=False, direction=True): next_event = None events = Event.objects.filter(status='PENDING') if direction: events = events.filter(pk__gt=event.pk).order_by("pk") else: events = events.filter(pk__lt=event.pk).order_by("-pk") if not past: events = events.filter(end_date__gte=datetime.datetime.now()) if country_code: events = events.filter(country=country_code) if events: next_event = events[0] return next_event def get_filtered_events( search_filter=None, country_filter=None, theme_filter=None, audience_filter=None, past_events=None): filter_args = () filter_kwargs = {'status': 'APPROVED'} if not past_events: filter_kwargs['end_date__gte'] = datetime.datetime.now() if search_filter: filter_args = ( Q( title__icontains=search_filter) | Q( description__icontains=search_filter) | Q( tags__name__icontains=search_filter) | Q( organizer__icontains=search_filter) | Q( location__icontains=search_filter),) if country_filter and country_filter not in [custom_country[ 0] for custom_country in Event.CUSTOM_COUNTRY_ENTRIES]: filter_kwargs['country'] = country_filter if theme_filter: filter_kwargs['theme__in'] = theme_filter if audience_filter: audience = EventAudience.objects.filter() filter_kwargs['audience__in'] = audience_filter if len(filter_args) > 0: events = Event.objects.filter(*filter_args, **filter_kwargs).distinct() else: events = Event.objects.filter(**filter_kwargs).distinct() return events
MIT License
ooici/pyon
prototype/sci_data/stream_defs.py
ctd_stream_packet
python
def ctd_stream_packet(stream_id = None, c=None, t=None, p=None , lat=None, lon=None, height=None, time=None, create_hdf=True): stream_def = ctd_stream_definition(stream_id=stream_id) psc = PointSupplementConstructor(point_definition=stream_def, stream_id=stream_id) assert time assert lat assert lon def listify(input): if hasattr(input, '__iter__'): return input else: return [input,] length = False if c is not None: c = listify(c) if length: assert length == len(c), 'Conductivity input is the wrong length' else: length = len(c) if t is not None: t = listify(t) if length: assert length == len(t), 'Temperature input is the wrong length' else: length = len(t) if p is not None: p = listify(p) if length: assert length == len(p), 'Pressure input is the wrong length' else: length = len(p) if lat is not None: lat = listify(lat) if length: if 1 == len(lat): lat = lat*length else: length = len(lat) else: raise RuntimeError('Did not specify longitude') if lon is not None: lon = listify(lon) if length: if 1 == len(lon): lon = lon*length else: length = len(lon) else: raise RuntimeError('Did not specify longitude') if height is not None: height = listify(height) if length: if 1 == len(height): height = height*length else: length = len(height) else: height = [0,]*length if time is not None: time = listify(time) if length: if 1 == len(time): time = time*length else: length = len(time) else: raise RuntimeError('Did not specify time') for idx, time_val in enumerate(time): p_id = psc.add_point(time=time_val, location=(lon[idx], lat[idx], height[idx])) if t: psc.add_scalar_point_coverage(point_id=p_id, coverage_id='temperature', value=t[idx]) if p: psc.add_scalar_point_coverage(point_id=p_id, coverage_id='pressure', value=p[idx]) if c: psc.add_scalar_point_coverage(point_id=p_id, coverage_id='conductivity', value=c[idx]) granule = psc.close_stream_granule() if not create_hdf: data_stream = granule.identifiables['data_stream'] data_stream.values = '' return granule
### ### This method is deprecated! ### This is a simple interface for creating a packet of ctd data for a given stream defined by the method above. The string names of content are tightly coupled to the method above. To send actual data you must have hdf5, numpy and h5py installed. @brief build a demo ctd data packet as an ion object. All values arguments are optional, but any argument provided should have the same length. @param stream_id should be the same as the stream_id for the definition - the stream resource ID @param c is a list, tuple or ndarray of conductivity values @param t is a list, tuple or ndarray of temperature values @param p is a list, tuple or ndarray of presure values @param lat is a list, tuple or ndarray of latitude values @param lon is a list, tuple or ndarray of longitude values @param time is a list, tuple or ndarray of time values
https://github.com/ooici/pyon/blob/122c629290d27f32f2f41dafd5c12469295e8acf/prototype/sci_data/stream_defs.py#L429-L556
from interface.objects import StreamDefinitionContainer, StreamGranuleContainer from interface.objects import DataStream, ElementType, DataRecord, Vector, Coverage, RangeSet, Domain, Mesh, CoordinateAxis, Encoding from interface.objects import UnitReferenceProperty, NilValue, AllowedValues from interface.objects import CategoryElement, CountElement from interface.objects import QuantityRangeElement from prototype.hdf.hdf_codec import HDFEncoder from prototype.sci_data.constructor_apis import StreamDefinitionConstructor, PointSupplementConstructor import hashlib from pyon.util.log import log def SBE37_CDM_stream_definition(): sdc = StreamDefinitionConstructor( description='Parsed conductivity temperature and pressure observations from a Seabird 37 CTD', nil_value=-999.99, encoding='hdf5' ) sdc.define_temporal_coordinates( reference_frame='http://www.opengis.net/def/trs/OGC/0/GPS', definition='http://www.opengis.net/def/property/OGC/0/SamplingTime', reference_time='1970-01-01T00:00:00Z', unit_code='s' ) sdc.define_geospatial_coordinates( definition="http://www.opengis.net/def/property/OGC/0/PlatformLocation", reference_frame='urn:ogc:def:crs:EPSG::4979' ) sdc.define_coverage( field_name='temperature', field_definition="urn:x-ogc:def:phenomenon:OGC:temperature", field_units_code='Cel', field_range=[-10.0, 100.0] ) sdc.define_coverage( field_name = 'conductivity', field_definition = "urn:x-ogc:def:phenomenon:OGC:conductivity", field_units_code = 'mS/cm', field_range = [0.0, 100.0] ) sdc.define_coverage( field_name = 'pressure', field_definition = "urn:x-ogc:def:phenomenon:OGC:pressure", field_units_code = 'dBar', field_range = [0.0, 1000.0] ) return sdc.close_structure() def USGS_stream_definition(): sdc = StreamDefinitionConstructor( description='CONNECTICUT RIVER AT THOMPSONVILLE CT (01184000) - Daily Value', nil_value=-9999.99, encoding='hdf5' ) sdc.define_temporal_coordinates( reference_frame='http://www.opengis.net/def/trs/OGC/0/GPS', definition='http://www.opengis.net/def/property/OGC/0/SamplingTime', reference_time='1970-01-01T00:00:00Z', unit_code='s' ) sdc.define_geospatial_coordinates( definition="http://www.opengis.net/def/property/OGC/0/PlatformLocation", reference_frame='urn:ogc:def:crs:EPSG::4326' ) sdc.define_coverage( field_name='water_height', field_definition="urn:x-ogc:def:phenomenon:OGC:water_height", field_units_code='ft_us' ) sdc.define_coverage( field_name = 'water_temperature', field_definition = "urn:x-ogc:def:phenomenon:OGC:water_temperature", field_units_code = 'Cel' ) sdc.define_coverage( field_name = 'streamflow', field_definition = "urn:x-ogc:def:phenomenon:OGC:streamflow", field_units_code = 'cft_i/s' ) return sdc.close_structure() def ctd_stream_definition(stream_id = None): sd = SBE37_CDM_stream_definition() sd.stream_resource_id = stream_id or '' return sd def SBE37_RAW_stream_definition(): stream_definition = StreamDefinitionContainer( data_stream_id='data_stream', ) ident = stream_definition.identifiables ident['data_stream'] = DataStream( description='Raw data from an SBE 37', element_count_id='record_count', element_type_id='element_type', encoding_id='stream_encoding', values=None ) ident['stream_encoding'] = Encoding( encoding_type='raw', compression=None, sha1=None ) ident['record_count'] = CountElement( value=0, optional=False, updatable=True ) ident['element_type'] = ElementType( updatable=False, optional=False, definition='Raw SBE 37 data' ) return stream_definition def L0_conductivity_stream_definition(): sdc = StreamDefinitionConstructor( description='L0 Conductivity observations from a Seabird 37 CTD', nil_value=-999.99, encoding='hdf5' ) sdc.define_temporal_coordinates( reference_frame='http://www.opengis.net/def/trs/OGC/0/GPS', definition='http://www.opengis.net/def/property/OGC/0/SamplingTime', reference_time='1970-01-01T00:00:00Z', unit_code='s' ) sdc.define_geospatial_coordinates( definition="http://www.opengis.net/def/property/OGC/0/PlatformLocation", reference_frame='urn:ogc:def:crs:EPSG::4979' ) sdc.define_coverage( field_name = 'conductivity', field_definition = "urn:x-ogc:def:phenomenon:OGC:conductivity", field_units_code = 'mS/cm', field_range = [0.0, 100.0] ) return sdc.close_structure() def L0_temperature_stream_definition(): sdc = StreamDefinitionConstructor( description='L0 Temperature observations from a Seabird 37 CTD', nil_value=-999.99, encoding='hdf5' ) sdc.define_temporal_coordinates( reference_frame='http://www.opengis.net/def/trs/OGC/0/GPS', definition='http://www.opengis.net/def/property/OGC/0/SamplingTime', reference_time='1970-01-01T00:00:00Z', unit_code='s' ) sdc.define_geospatial_coordinates( definition="http://www.opengis.net/def/property/OGC/0/PlatformLocation", reference_frame='urn:ogc:def:crs:EPSG::4979' ) sdc.define_coverage( field_name='temperature', field_definition="urn:x-ogc:def:phenomenon:OGC:temperature", field_units_code='Cel', field_range=[-10.0, 100.0] ) return sdc.close_structure() def L0_pressure_stream_definition(): sdc = StreamDefinitionConstructor( description='L0 Pressure observations from a Seabird 37 CTD', nil_value=-999.99, encoding='hdf5' ) sdc.define_temporal_coordinates( reference_frame='http://www.opengis.net/def/trs/OGC/0/GPS', definition='http://www.opengis.net/def/property/OGC/0/SamplingTime', reference_time='1970-01-01T00:00:00Z', unit_code='s' ) sdc.define_geospatial_coordinates( definition="http://www.opengis.net/def/property/OGC/0/PlatformLocation", reference_frame='urn:ogc:def:crs:EPSG::4979' ) sdc.define_coverage( field_name = 'pressure', field_definition = "urn:x-ogc:def:phenomenon:OGC:pressure", field_units_code = 'dBar', field_range = [0.0, 1000.0] ) return sdc.close_structure() def L1_conductivity_stream_definition(): sdc = StreamDefinitionConstructor( description='L1 Conductivity observations from a Seabird 37 CTD', nil_value=-999.99, encoding='hdf5' ) sdc.define_temporal_coordinates( reference_frame='http://www.opengis.net/def/trs/OGC/0/GPS', definition='http://www.opengis.net/def/property/OGC/0/SamplingTime', reference_time='1970-01-01T00:00:00Z', unit_code='s' ) sdc.define_geospatial_coordinates( definition="http://www.opengis.net/def/property/OGC/0/PlatformLocation", reference_frame='urn:ogc:def:crs:EPSG::4979' ) sdc.define_coverage( field_name = 'conductivity', field_definition = "urn:x-ogc:def:phenomenon:OGC:conductivity", field_units_code = 'mS/cm', field_range = [0.0, 100.0] ) return sdc.close_structure() def L1_temperature_stream_definition(): sdc = StreamDefinitionConstructor( description='L1 Temperature observations from a Seabird 37 CTD', nil_value=-999.99, encoding='hdf5' ) sdc.define_temporal_coordinates( reference_frame='http://www.opengis.net/def/trs/OGC/0/GPS', definition='http://www.opengis.net/def/property/OGC/0/SamplingTime', reference_time='1970-01-01T00:00:00Z', unit_code='s' ) sdc.define_geospatial_coordinates( definition="http://www.opengis.net/def/property/OGC/0/PlatformLocation", reference_frame='urn:ogc:def:crs:EPSG::4979' ) sdc.define_coverage( field_name='temperature', field_definition="urn:x-ogc:def:phenomenon:OGC:temperature", field_units_code='Cel', field_range=[-10.0, 100.0] ) return sdc.close_structure() def L1_pressure_stream_definition(): sdc = StreamDefinitionConstructor( description='L1 Pressure observations from a Seabird 37 CTD', nil_value=-999.99, encoding='hdf5' ) sdc.define_temporal_coordinates( reference_frame='http://www.opengis.net/def/trs/OGC/0/GPS', definition='http://www.opengis.net/def/property/OGC/0/SamplingTime', reference_time='1970-01-01T00:00:00Z', unit_code='s' ) sdc.define_geospatial_coordinates( definition="http://www.opengis.net/def/property/OGC/0/PlatformLocation", reference_frame='urn:ogc:def:crs:EPSG::4979' ) sdc.define_coverage( field_name = 'pressure', field_definition = "urn:x-ogc:def:phenomenon:OGC:pressure", field_units_code = 'dBar', field_range = [0.0, 1000.0] ) return sdc.close_structure() def L2_practical_salinity_stream_definition(): sdc = StreamDefinitionConstructor( description='L2 practical salinity observations', nil_value=-999.99, encoding='hdf5' ) sdc.define_temporal_coordinates( reference_frame='http://www.opengis.net/def/trs/OGC/0/GPS', definition='http://www.opengis.net/def/property/OGC/0/SamplingTime', reference_time='1970-01-01T00:00:00Z', unit_code='s' ) sdc.define_geospatial_coordinates( definition="http://www.opengis.net/def/property/OGC/0/PlatformLocation", reference_frame='urn:ogc:def:crs:EPSG::4979' ) sdc.define_coverage( field_name = 'salinity', field_definition = "urn:x-ogc:def:phenomenon:OGC:practical_salinity", field_units_code = '', field_range = [0.1, 40.0] ) return sdc.close_structure() def L2_density_stream_definition(): sdc = StreamDefinitionConstructor( description='L2 practical salinity observations', nil_value=-999.99, encoding='hdf5' ) sdc.define_temporal_coordinates( reference_frame='http://www.opengis.net/def/trs/OGC/0/GPS', definition='http://www.opengis.net/def/property/OGC/0/SamplingTime', reference_time='1970-01-01T00:00:00Z', unit_code='s' ) sdc.define_geospatial_coordinates( definition="http://www.opengis.net/def/property/OGC/0/PlatformLocation", reference_frame='urn:ogc:def:crs:EPSG::4979' ) sdc.define_coverage( field_name = 'density', field_definition = "urn:x-ogc:def:phenomenon:OGC:density", field_units_code = 'kg/m3', field_range = [1000.0, 1050.0] ) return sdc.close_structure()
BSD 2-Clause Simplified License
alexsavio/hansel
hansel/crumb.py
Crumb.get_first
python
def get_first(self, arg_name: str) -> str: return self[arg_name][0]
Return the first existing value of the crumb argument `arg_name`. Parameters ---------- arg_name: str Returns ------- values: str
https://github.com/alexsavio/hansel/blob/dd20150b372488767a35eab42f486780e76fc455/hansel/crumb.py#L797-L807
import os import pathlib import re from collections import OrderedDict from copy import deepcopy from typing import List, Dict, Iterator, Tuple from hansel._utils import ( _first_txt, _build_path, _arg_names, _find_arg_depth, _check, _depth_names, _depth_names_regexes, _has_arg, _is_crumb_arg, _split_exists, _split, _touch, has_crumbs, is_valid, ) from hansel.utils import ( list_subpaths, fnmatch_filter, regex_match_filter, CrumbArgsSequence, CrumbArgsSequences) class Crumb(object): def __init__(self, crumb_path: str, ignore_list: List[str] = None, regex: str = 'fnmatch'): self._path = _check(crumb_path) self._argval = {} self._re_method = regex self._re_args = None if ignore_list is None: ignore_list = [] self._ignore = ignore_list self._update() def _update(self): self._set_match_function() def _set_match_function(self): if self._re_method == 'fnmatch': self._match_filter = fnmatch_filter elif self._re_method == 're': self._match_filter = regex_match_filter elif self._re_method == 're.ignorecase': self._match_filter = regex_match_filter self._re_args = (re.IGNORECASE,) else: raise ValueError('Expected regex method value to be "fnmatch", "re" or "re.ignorecase"' ', got {}.'.format(self._re_method)) def is_valid(self, crumb_path: str = None) -> bool: if crumb_path is None: crumb_path = self.path return is_valid(crumb_path) @property def patterns(self): return {arg: rgx for _, (arg, rgx) in _depth_names_regexes(self._path) if rgx} def set_pattern(self, arg_name: str, arg_regex: str): if not _has_arg(self.path, arg_name): raise KeyError('Crumb argument {} is not present in {}.'.format(arg_name, self)) self._path = _build_path( self._path, arg_values={}, with_regex=True, regexes={arg_name: arg_regex} ) def set_patterns(self, **kwargs): for arg, pat in kwargs.items(): self.set_pattern(arg, pat) def clear_pattern(self, arg_name: str): self.set_pattern(arg_name, '') def clear(self, arg_name: str): del self._argval[arg_name] @property def arg_values(self) -> Dict[str, str]: return self._argval @property def path(self) -> str: return _build_path(self._path, arg_values=self.arg_values, with_regex=True) @path.setter def path(self, value: str): self._path = value self._update() def has_crumbs(self, crumb_path: str = None) -> bool: if crumb_path is None: crumb_path = self.path return has_crumbs(crumb_path) def _open_arg_items(self): for depth, arg_name in _depth_names(self.path): yield depth, arg_name def _last_open_arg(self): open_args = list(self._open_arg_items()) if not open_args: return None, None for dpth, arg in reversed(open_args): return dpth, arg def _first_open_arg(self): for dpth, arg in self._open_arg_items(): return dpth, arg def _is_first_open_arg(self, arg_name: str) -> bool: return arg_name == self._first_open_arg()[1] def has_set(self, arg_name: str) -> bool: return arg_name not in set(self.open_args()) def open_args(self) -> Iterator[str]: for _, arg_name in self._open_arg_items(): yield arg_name def all_args(self) -> Iterator[str]: yield from _arg_names(self._path) def copy(self, crumb: 'Crumb' = None) -> 'Crumb': if crumb is None: crumb = self if isinstance(crumb, Crumb): nucr = Crumb( crumb._path, ignore_list=crumb._ignore, regex=crumb._re_method ) nucr._argval = deepcopy(crumb._argval) return nucr if isinstance(crumb, str): return Crumb.from_path(crumb) raise TypeError("Expected a Crumb or a str to copy, " "got {}.".format(type(crumb))) def isabs(self) -> bool: subp = _first_txt(self.path) return os.path.isabs(subp) def abspath(self, first_is_basedir: bool = False) -> 'Crumb': nucr = self.copy() if not nucr.isabs(): nucr._path = self._abspath(first_is_basedir=first_is_basedir) return nucr def _abspath(self, first_is_basedir: bool = False) -> str: if os.path.isabs(self._path): return self._path splits = self._path.split(os.path.sep) basedir = [os.path.abspath(os.path.curdir)] if _is_crumb_arg(splits[0]): if first_is_basedir: splits.pop(0) basedir.extend(splits) return os.path.sep.join(basedir) def split(self) -> Tuple[str, str]: return _split(self.path) @classmethod def from_path(cls, crumb_path: [str, 'Crumb', pathlib.Path]) -> 'Crumb': if isinstance(crumb_path, Crumb): return crumb_path.copy() elif isinstance(crumb_path, pathlib.Path): return cls(str(crumb_path)) elif isinstance(crumb_path, str): return cls(crumb_path) else: raise TypeError("Expected a `val` to be a `str`, got {}.".format(type(crumb_path))) def _arg_values(self, arg_name: str, arg_values: CrumbArgsSequence = None) -> CrumbArgsSequences: if arg_values is None and not self._is_first_open_arg(arg_name): raise ValueError("Cannot get the list of values for {} if" " the previous arguments are not filled" " in `paths`.".format(arg_name)) path = self.path dpth, arg_name, arg_regex = _find_arg_depth(path, arg_name) splt = path.split(os.path.sep) if dpth == len(splt) - 1: just_dirs = False else: just_dirs = True if arg_values is None: vals = self._arg_values_from_base( basedir=os.path.sep.join(splt[:dpth]), arg_name=arg_name, arg_regex=arg_regex, just_dirs=just_dirs ) else: vals = self._extend_arg_values( arg_values=arg_values, arg_name=arg_name, arg_regex=arg_regex, just_dirs=just_dirs ) return vals def _extend_arg_values( self, arg_values: CrumbArgsSequence, arg_name: str, arg_regex: str, just_dirs: bool ) -> CrumbArgsSequences: path = self.path vals = [] for aval in arg_values: nupath = _split(_build_path(path, arg_values=dict(aval)))[0] if not os.path.exists(nupath): continue paths = list_subpaths( nupath, just_dirs=just_dirs, ignore=self._ignore, pattern=arg_regex, filter_func=self._match_filter ) vals.extend([aval + [(arg_name, sp)] for sp in paths]) return vals def _arg_values_from_base(self, basedir: str, arg_name: str, arg_regex: str, just_dirs: bool) -> CrumbArgsSequences: vals = list_subpaths(basedir, just_dirs=just_dirs, ignore=self._ignore, pattern=arg_regex, filter_func=self._match_filter, filter_args=self._re_args) return [[(arg_name, val)] for val in vals] def _check_args(self, arg_names: Iterator[str], self_args: Iterator[str]): anames = set(arg_names) aself = set(self_args) if not anames and not aself: return if not aself or aself is None: raise AttributeError('This Crumb has no remaining arguments: {}.'.format(self.path)) if not anames.issubset(aself): raise KeyError("Expected `arg_names` to be a subset of ({})," " got {}.".format(list(aself), anames)) def _check_open_args(self, arg_names: Iterator[str]): self._check_args(arg_names, self_args=self.open_args()) def update(self, **kwargs) -> 'Crumb': self._check_args(list(kwargs.keys()), self_args=self.all_args()) for k, v in kwargs.items(): if not isinstance(v, str): raise ValueError("Expected a string for the value of argument {}, " "got {}.".format(k, v)) path = _build_path(self.path, arg_values=kwargs, with_regex=True) _check(path) self._argval.update(**kwargs) return self def replace(self, **kwargs) -> 'Crumb': cr = self.copy(self) return cr.update(**kwargs) def _arg_parents(self, arg_name: str) -> Dict[str, int]: if arg_name not in self.arg_values: path = self.path else: path = self._path dpth, _, _ = _find_arg_depth(path, arg_name) return OrderedDict([(arg, idx) for idx, arg in self._open_arg_items() if idx <= dpth]) def _args_open_parents(self, arg_names: Iterator[str]) -> Iterator[str]: started = False arg_dads = [] for an in reversed(list(self.open_args())): if an in arg_names: started = True else: if started: arg_dads.append(an) return list(reversed(arg_dads)) def values_map(self, arg_name: str = '', check_exists: bool = False) -> CrumbArgsSequences: if not arg_name: _, arg_name = self._last_open_arg() if arg_name is None: return [list(self.arg_values.items())] arg_deps = self._arg_parents(arg_name) values_map = None if arg_deps: for arg in arg_deps: values_map = self._arg_values(arg, values_map) elif arg_name in self.arg_values: values_map = [[(arg_name, self.arg_values[arg_name])]] else: raise ValueError('Could not build a map of values with ' 'argument {}.'.format(arg_name)) return sorted(self._build_and_check(values_map) if check_exists else values_map) def _build_and_check(self, values_map: CrumbArgsSequences) -> CrumbArgsSequences: paths = list(self.build_paths(values_map, make_crumbs=True)) yield from (args for args, path in zip(values_map, paths) if path.exists()) def build_paths( self, values_map: CrumbArgsSequences, make_crumbs: bool = True ) -> [Iterator[str], Iterator['Crumb']]: if make_crumbs: yield from (self.replace(**dict(val)) for val in values_map) else: yield from (_build_path(self.path, arg_values=dict(val)) for val in values_map) def ls( self, arg_name: str = '', fullpath: bool = True, make_crumbs: bool = True, check_exists: bool = True ) -> [Iterator[str], Iterator['Crumb']]: if not arg_name and not fullpath: raise ValueError('Expecting an `arg_name` if `fullpath` is False.') if not arg_name: _, arg_name = self._last_open_arg() if arg_name is None: arg_name = '' arg_regex = False if arg_name: _, (arg_name, arg_regex) = tuple(_depth_names_regexes('{' + arg_name + '}'))[0] if arg_regex: old_regex = self.patterns.get(arg_name, None) self.set_pattern(arg_name=arg_name, arg_regex=arg_regex) self._check_args([arg_name], self.all_args()) self._check_ls_params(make_crumbs, fullpath) if not fullpath: make_crumbs = fullpath values_map = self.values_map(arg_name, check_exists=check_exists) if fullpath: paths = self.build_paths(values_map, make_crumbs=make_crumbs) else: paths = (dict(val)[arg_name] for val in values_map) if arg_regex: self.clear_pattern(arg_name=arg_name) if old_regex is not None: self.set_pattern(arg_name=arg_name, arg_regex=old_regex) return sorted(paths) def _check_ls_params(self, make_crumbs: bool, fullpath: bool): if not self.isabs() and self.path.startswith('{'): raise NotImplementedError("Cannot list paths that start with an argument. " "If this is a relative path, use the `abspath()` " "member function.") def touch(self, exist_ok: bool = True) -> str: return _touch(self.path, exist_ok=exist_ok) def joinpath(self, suffix: str) -> 'Crumb': return Crumb(os.path.join(self.path, suffix)) def exists(self) -> bool: if not has_crumbs(self.path): return os.path.exists(str(self)) or os.path.islink(str(self)) if not os.path.exists(self.split()[0]): return False _, last = self._last_open_arg() paths = self.ls(last, fullpath=True, make_crumbs=False, check_exists=False) return any((_split_exists(lp) for lp in paths)) def has_files(self) -> bool: if not os.path.exists(self.split()[0]): return False _, last = self._last_open_arg() paths = self.ls( last, fullpath=True, make_crumbs=True, check_exists=True ) return any((os.path.isfile(str(lp)) for lp in paths)) def unfold(self) -> [List['Crumb'], Iterator[pathlib.Path]]: if list(self.open_args()): return self.ls( self._last_open_arg()[1], fullpath=True, make_crumbs=True, check_exists=True ) return [self]
Apache License 2.0
lindsayyoung/python-class
lesson-6/restaurant.py
Restaurant.take_order
python
def take_order(self, item, money): return money
Take an order for item, given a certain amount of money. If the item doesn't exist, print "Sorry, we don't have <ITEM>". If the cost of the item is greater than the money provided, print "Sorry, <ITEM> costs <COST>" (eg. "Sorry, PB&J costs 1.99") If the order is successful, record the order and return the person's change (money - price) to them.
https://github.com/lindsayyoung/python-class/blob/63821805dab826ab4c8b030c3cbdd6835435a3ce/lesson-6/restaurant.py#L24-L36
class Restaurant(object): def __init__(self, name, menu): self.name = name self.menu = menu self.items_ordered = [] def print_menu(self): def has_item(self, item): return False
MIT License
someengineering/cloudkeeper
plugins/gcp/cloudkeeper_plugin_gcp/collector.py
GCPProjectCollector.post_process_machine_type
python
def post_process_machine_type(resource: GCPMachineType, graph: Graph): if ( resource.region(graph).name == "undefined" and resource.zone(graph).name == "undefined" ): log.error( f"Resource {resource.rtdname} has no region or zone" " - removing from graph" ) graph.remove_node(resource) return log.debug( ( f"Looking up pricing for {resource.rtdname}" f" in {resource.location(graph).rtdname}" ) ) skus = [] for sku in graph.searchall( { "kind": "gcp_service_sku", "resource_family": "Compute", "usage_type": "OnDemand", } ): if sku.resource_group not in ( "G1Small", "F1Micro", "N1Standard", "CPU", "RAM", ): continue if ("custom" not in resource.name and "Custom" in sku.name) or ( "custom" in resource.name and "Custom" not in sku.name ): continue if resource.region(graph).name not in sku.geo_taxonomy_regions: continue if resource.name == "g1-small" and sku.resource_group != "G1Small": continue if resource.name == "f1-micro" and sku.resource_group != "F1Micro": continue if ( resource.name.startswith("n2d-") and not sku.name.startswith("N2D AMD ") ) or ( not resource.name.startswith("n2d-") and sku.name.startswith("N2D AMD ") ): continue if (resource.name.startswith("n2-") and not sku.name.startswith("N2 ")) or ( not resource.name.startswith("n2-") and sku.name.startswith("N2 ") ): continue if ( resource.name.startswith("m1-") and not sku.name.startswith("Memory-optimized ") ) or ( not resource.name.startswith("m1-") and sku.name.startswith("Memory-optimized ") ): continue if ( resource.name.startswith("c2-") and not sku.name.startswith("Compute optimized ") ) or ( not resource.name.startswith("c2-") and sku.name.startswith("Compute optimized ") ): continue if resource.name.startswith("n1-") and sku.resource_group != "N1Standard": continue if "custom" not in resource.name: if ( resource.name.startswith("e2-") and not sku.name.startswith("E2 ") ) or ( not resource.name.startswith("e2-") and sku.name.startswith("E2 ") ): continue skus.append(sku) if len(skus) == 1 and resource.name in ("g1-small", "f1-micro"): graph.add_edge(skus[0], resource) resource.ondemand_cost = skus[0].usage_unit_nanos / 1000000000 elif len(skus) == 2 or (len(skus) == 3 and "custom" in resource.name): ondemand_cost = 0 cores = resource.instance_cores ram = resource.instance_memory extended_memory_pricing = False if "custom" in resource.name: extended_memory_pricing = ram / cores > 8 for sku in skus: if "Core" in sku.name: ondemand_cost += sku.usage_unit_nanos * cores elif "Ram" in sku.name: if (extended_memory_pricing and "Extended" not in sku.name) or ( not extended_memory_pricing and "Extended" in sku.name ): continue ondemand_cost += sku.usage_unit_nanos * ram graph.add_edge(sku, resource) if ondemand_cost > 0: resource.ondemand_cost = ondemand_cost / 1000000000 else: log.debug( ( f"Unable to determine SKU(s) for {resource}:" f" {[sku.dname for sku in skus]}" ) )
Adds edges from machine type to SKUs and determines ondemand pricing TODO: Implement GPU types
https://github.com/someengineering/cloudkeeper/blob/2bfbf2d815b80806943fc0a2e749cfaeb1ae7d81/plugins/gcp/cloudkeeper_plugin_gcp/collector.py#L976-L1090
import cklib.logging import socket from pprint import pformat from retrying import retry from typing import Callable, List, Dict, Type, Union from cklib.baseresources import BaseResource from cklib.graph import Graph from cklib.args import ArgumentParser from cklib.utils import except_log_and_pass from prometheus_client import Summary from .resources import ( GCPProject, GCPQuota, GCPRegion, GCPZone, GCPDiskType, GCPDisk, GCPInstance, GCPMachineType, GCPNetwork, GCPSubnetwork, GCPTargetVPNGateway, GCPVPNGateway, GCPVPNTunnel, GCPRouter, GCPRoute, GCPSecurityPolicy, GCPSnapshot, GCPSSLCertificate, GCPNetworkEndpointGroup, GCPGlobalNetworkEndpointGroup, GCPInstanceGroup, GCPInstanceGroupManager, GCPAutoscaler, GCPHealthCheck, GCPHTTPHealthCheck, GCPHTTPSHealthCheck, GCPUrlMap, GCPTargetPool, GCPTargetHttpProxy, GCPTargetHttpsProxy, GCPTargetSslProxy, GCPTargetTcpProxy, GCPTargetGrpcProxy, GCPTargetInstance, GCPBackendService, GCPForwardingRule, GCPGlobalForwardingRule, GCPBucket, GCPDatabase, GCPService, GCPServiceSKU, GCPInstanceTemplate, ) from .utils import ( Credentials, gcp_client, gcp_resource, paginate, iso2datetime, get_result_data, common_resource_kwargs, retry_on_error, ) log = cklib.logging.getLogger("cloudkeeper." + __name__) metrics_collect_regions = Summary( "cloudkeeper_plugin_gcp_collect_regions_seconds", "Time it took the collect_regions() method", ) metrics_collect_zones = Summary( "cloudkeeper_plugin_gcp_collect_zones_seconds", "Time it took the collect_zones() method", ) metrics_collect_disks = Summary( "cloudkeeper_plugin_gcp_collect_disks_seconds", "Time it took the collect_disks() method", ) metrics_collect_instances = Summary( "cloudkeeper_plugin_gcp_collect_instances_seconds", "Time it took the collect_instances() method", ) metrics_collect_disk_types = Summary( "cloudkeeper_plugin_gcp_collect_disk_types_seconds", "Time it took the collect_disk_types() method", ) metrics_collect_networks = Summary( "cloudkeeper_plugin_gcp_collect_networks_seconds", "Time it took the collect_networks() method", ) metrics_collect_subnetworks = Summary( "cloudkeeper_plugin_gcp_collect_subnetworks_seconds", "Time it took the collect_subnetworks() method", ) metrics_collect_vpn_tunnels = Summary( "cloudkeeper_plugin_gcp_collect_vpn_tunnels_seconds", "Time it took the collect_vpn_tunnels() method", ) metrics_collect_vpn_gateways = Summary( "cloudkeeper_plugin_gcp_collect_vpn_gateways_seconds", "Time it took the collect_vpn_gateways() method", ) metrics_collect_target_vpn_gateways = Summary( "cloudkeeper_plugin_gcp_collect_target_vpn_gateways_seconds", "Time it took the collect_target_vpn_gateways() method", ) metrics_collect_routers = Summary( "cloudkeeper_plugin_gcp_collect_routers_seconds", "Time it took the collect_routers() method", ) metrics_collect_routes = Summary( "cloudkeeper_plugin_gcp_collect_routes_seconds", "Time it took the collect_routes() method", ) metrics_collect_security_policies = Summary( "cloudkeeper_plugin_gcp_collect_security_policies_seconds", "Time it took the collect_security_policies() method", ) metrics_collect_snapshots = Summary( "cloudkeeper_plugin_gcp_collect_snapshots_seconds", "Time it took the collect_snapshots() method", ) metrics_collect_ssl_certificates = Summary( "cloudkeeper_plugin_gcp_collect_ssl_certificates_seconds", "Time it took the collect_ssl_certificates() method", ) metrics_collect_machine_types = Summary( "cloudkeeper_plugin_gcp_collect_machine_types_seconds", "Time it took the collect_machine_types() method", ) metrics_collect_network_endpoint_groups = Summary( "cloudkeeper_plugin_gcp_collect_network_endpoint_groups_seconds", "Time it took the collect_network_endpoint_groups() method", ) metrics_collect_global_network_endpoint_groups = Summary( "cloudkeeper_plugin_gcp_collect_global_network_endpoint_groups_seconds", "Time it took the collect_global_network_endpoint_groups() method", ) metrics_collect_instance_groups = Summary( "cloudkeeper_plugin_gcp_collect_instance_groups_seconds", "Time it took the collect_instance_groups() method", ) metrics_collect_instance_group_managers = Summary( "cloudkeeper_plugin_gcp_collect_instance_group_managers_seconds", "Time it took the collect_instance_group_managers() method", ) metrics_collect_autoscalers = Summary( "cloudkeeper_plugin_gcp_collect_autoscalers_seconds", "Time it took the collect_autoscalers() method", ) metrics_collect_health_checks = Summary( "cloudkeeper_plugin_gcp_collect_health_checks_seconds", "Time it took the collect_health_checks() method", ) metrics_collect_http_health_checks = Summary( "cloudkeeper_plugin_gcp_collect_http_health_checks_seconds", "Time it took the collect_http_health_checks() method", ) metrics_collect_https_health_checks = Summary( "cloudkeeper_plugin_gcp_collect_https_health_checks_seconds", "Time it took the collect_https_health_checks() method", ) metrics_collect_url_maps = Summary( "cloudkeeper_plugin_gcp_collect_url_maps_seconds", "Time it took the collect_url_maps() method", ) metrics_collect_target_pools = Summary( "cloudkeeper_plugin_gcp_collect_target_pools_seconds", "Time it took the collect_target_pools() method", ) metrics_collect_target_instances = Summary( "cloudkeeper_plugin_gcp_collect_target_instances_seconds", "Time it took the collect_target_instances() method", ) metrics_collect_target_http_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_http_proxies_seconds", "Time it took the collect_target_http_proxies() method", ) metrics_collect_target_https_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_https_proxies_seconds", "Time it took the collect_target_https_proxies() method", ) metrics_collect_target_ssl_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_ssl_proxies_seconds", "Time it took the collect_target_ssl_proxies() method", ) metrics_collect_target_tcp_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_tcp_proxies_seconds", "Time it took the collect_target_tcp_proxies() method", ) metrics_collect_target_grpc_proxies = Summary( "cloudkeeper_plugin_gcp_collect_target_grpc_proxies_seconds", "Time it took the collect_target_grpc_proxies() method", ) metrics_collect_backend_services = Summary( "cloudkeeper_plugin_gcp_collect_backend_services_seconds", "Time it took the collect_backend_services() method", ) metrics_collect_forwarding_rules = Summary( "cloudkeeper_plugin_gcp_collect_forwarding_rules_seconds", "Time it took the collect_forwarding_rules() method", ) metrics_collect_global_forwarding_rules = Summary( "cloudkeeper_plugin_gcp_collect_global_forwarding_rules_seconds", "Time it took the collect_global_forwarding_rules() method", ) metrics_collect_buckets = Summary( "cloudkeeper_plugin_gcp_collect_buckets_seconds", "Time it took the collect_buckets() method", ) metrics_collect_databases = Summary( "cloudkeeper_plugin_gcp_collect_databases_seconds", "Time it took the collect_databases() method", ) metrics_collect_services = Summary( "cloudkeeper_plugin_gcp_collect_services_seconds", "Time it took the collect_services() method", ) metrics_collect_instance_templates = Summary( "cloudkeeper_plugin_gcp_collect_instance_templates_seconds", "Time it took the collect_instance_templates() method", ) class GCPProjectCollector: def __init__(self, project: GCPProject) -> None: self.project = project self.credentials = Credentials.get(self.project.id) self.graph = Graph(root=self.project) self.mandatory_collectors = { "regions": self.collect_regions, "zones": self.collect_zones, } self.global_collectors = { "services": self.collect_services, "networks": self.collect_networks, "subnetworks": self.collect_subnetworks, "routers": self.collect_routers, "routes": self.collect_routes, "health_checks": self.collect_health_checks, "http_health_checks": self.collect_http_health_checks, "https_health_checks": self.collect_https_health_checks, "machine_types": self.collect_machine_types, "instances": self.collect_instances, "disk_types": self.collect_disk_types, "disks": self.collect_disks, "target_vpn_gateways": self.collect_target_vpn_gateways, "vpn_gateways": self.collect_vpn_gateways, "vpn_tunnels": self.collect_vpn_tunnels, "security_policies": self.collect_security_policies, "snapshots": self.collect_snapshots, "ssl_certificates": self.collect_ssl_certificates, "network_endpoint_groups": self.collect_network_endpoint_groups, "instance_groups": self.collect_instance_groups, "instance_group_managers": self.collect_instance_group_managers, "autoscalers": self.collect_autoscalers, "backend_services": self.collect_backend_services, "url_maps": self.collect_url_maps, "target_pools": self.collect_target_pools, "target_instances": self.collect_target_instances, "target_http_proxies": self.collect_target_http_proxies, "target_https_proxies": self.collect_target_https_proxies, "target_ssl_proxies": self.collect_target_ssl_proxies, "target_tcp_proxies": self.collect_target_tcp_proxies, "target_grpc_proxies": self.collect_target_grpc_proxies, "forwarding_rules": self.collect_forwarding_rules, "buckets": self.collect_buckets, "databases": self.collect_databases, "instance_templates": self.collect_instance_templates, } self.region_collectors = {} self.zone_collectors = {} self.all_collectors = dict(self.mandatory_collectors) self.all_collectors.update(self.global_collectors) self.all_collectors.update(self.region_collectors) self.all_collectors.update(self.zone_collectors) self.collector_set = set(self.all_collectors.keys()) @retry( stop_max_attempt_number=10, wait_exponential_multiplier=3000, wait_exponential_max=300000, retry_on_exception=retry_on_error, ) def collect(self) -> None: self.graph = Graph(root=self.project) collectors = set(self.collector_set) if len(ArgumentParser.args.gcp_collect) > 0: collectors = set(ArgumentParser.args.gcp_collect).intersection(collectors) if len(ArgumentParser.args.gcp_no_collect) > 0: collectors = collectors - set(ArgumentParser.args.gcp_no_collect) collectors = collectors.union(set(self.mandatory_collectors.keys())) log.debug( ( f"Running the following collectors in {self.project.rtdname}:" f" {', '.join(collectors)}" ) ) for collector_name, collector in self.mandatory_collectors.items(): if collector_name in collectors: log.info(f"Collecting {collector_name} in {self.project.rtdname}") collector() regions = [r for r in self.graph.nodes if isinstance(r, GCPRegion)] zones = [z for z in self.graph.nodes if isinstance(z, GCPZone)] log.debug(f"Found {len(zones)} zones in {len(regions)} regions") for collector_name, collector in self.global_collectors.items(): if collector_name in collectors: log.info(f"Collecting {collector_name} in {self.project.rtdname}") collector() for region in regions: for collector_name, collector in self.region_collectors.items(): if collector_name in collectors: log.info( ( f"Collecting {collector_name} in {region.rtdname}" f" {self.project.rtdname}" ) ) collector(region=region) for zone in zones: for collector_name, collector in self.zone_collectors.items(): if collector_name in collectors: log.info( ( f"Collecting {collector_name} in {zone.rtdname}" f" {self.project.rtdname}" ) ) collector(zone=zone) def default_attributes( self, result: Dict, attr_map: Dict = None, search_map: Dict = None ) -> Dict: kwargs = { "id": result.get("id", result.get("name", result.get("selfLink"))), "tags": result.get("labels", {}), "name": result.get("name"), "ctime": iso2datetime(result.get("creationTimestamp")), "link": result.get("selfLink"), "label_fingerprint": result.get("labelFingerprint"), "_account": self.project, } if attr_map is not None: for map_to, map_from in attr_map.items(): data = get_result_data(result, map_from) if data is None: log.debug(f"Attribute {map_from} not in result") continue log.debug(f"Found attribute {map_to}: {pformat(data)}") kwargs[map_to] = data default_search_map = {"_region": ["link", "region"], "_zone": ["link", "zone"]} search_results = {} if search_map is None: search_map = dict(default_search_map) else: updated_search_map = dict(default_search_map) updated_search_map.update(search_map) search_map = updated_search_map for map_to, search_data in search_map.items(): search_attr = search_data[0] search_value_name = search_data[1] search_value = get_result_data(result, search_value_name) if search_value is None: continue if isinstance(search_value, List): search_values = search_value else: search_values = [search_value] for search_value in search_values: search_result = self.graph.search_first(search_attr, search_value) if search_result: if map_to not in search_results: search_results[map_to] = [] search_results[map_to].append(search_result) if ( map_to not in kwargs and map_to in search_results and not str(map_to).startswith("__") ): search_result = search_results[map_to] if len(search_result) == 1: kwargs[map_to] = search_result[0] else: kwargs[map_to] = list(search_result) if ( "_zone" in kwargs and "_region" not in kwargs and isinstance(kwargs["_zone"], BaseResource) ): region = kwargs["_zone"].region(self.graph) if region: kwargs["_region"] = region if "_region" in search_map.keys() and "_region" not in search_results: search_results["_region"] = region return kwargs, search_results @except_log_and_pass(do_raise=socket.timeout) def collect_something( self, resource_class: Type[BaseResource], paginate_method_name: str = "list", paginate_items_name: str = "items", parent_resource: Union[BaseResource, str] = None, attr_map: Dict = None, search_map: Dict = None, successors: List = None, predecessors: List = None, client_kwargs: Dict = None, resource_kwargs: Dict = None, paginate_subitems_name: str = None, post_process: Callable = None, dump_resource: bool = False, ) -> List: client_method_name = resource_class("", {})._client_method default_resource_args = resource_class("", {}).resource_args log.debug(f"Collecting {client_method_name}") if paginate_subitems_name is None: paginate_subitems_name = client_method_name if client_kwargs is None: client_kwargs = {} if resource_kwargs is None: resource_kwargs = {} if successors is None: successors = [] if predecessors is None: predecessors = [] parent_map = {True: predecessors, False: successors} if "project" in default_resource_args: resource_kwargs["project"] = self.project.id client = gcp_client( resource_class.client, resource_class.api_version, credentials=self.credentials, **client_kwargs, ) gcp_resource = getattr(client, client_method_name) if not callable(gcp_resource): raise RuntimeError(f"No method {client_method_name} on client {client}") for resource in paginate( gcp_resource=gcp_resource(), method_name=paginate_method_name, items_name=paginate_items_name, subitems_name=paginate_subitems_name, **resource_kwargs, ): kwargs, search_results = self.default_attributes( resource, attr_map=attr_map, search_map=search_map ) r = resource_class(**kwargs) pr = parent_resource log.debug(f"Adding {r.rtdname} to the graph") if dump_resource: log.debug(f"Resource Dump: {pformat(resource)}") if isinstance(pr, str) and pr in search_results: pr = search_results[parent_resource][0] log.debug(f"Parent resource for {r.rtdname} set to {pr.rtdname}") if not isinstance(pr, BaseResource): pr = kwargs.get("_zone", kwargs.get("_region", self.graph.root)) log.debug( f"Parent resource for {r.rtdname} automatically set to {pr.rtdname}" ) self.graph.add_resource(pr, r) for is_parent, sr_names in parent_map.items(): for sr_name in sr_names: if sr_name in search_results: srs = search_results[sr_name] for sr in srs: if is_parent: src = sr dst = r else: src = r dst = sr self.graph.add_edge(src, dst) else: if sr_name in search_map: graph_search = search_map[sr_name] attr = graph_search[0] value_name = graph_search[1] if value_name in resource: value = resource[value_name] if isinstance(value, List): values = value for value in values: r.add_deferred_connection( attr, value, is_parent ) elif isinstance(value, str): r.add_deferred_connection(attr, value, is_parent) else: log.error( ( "Unable to add deferred connection for" f" value {value} of type {type(value)}" ) ) else: log.error(f"Key {sr_name} is missing in search_map") if callable(post_process): post_process(r, self.graph) @metrics_collect_regions.time() def collect_regions(self) -> List: def post_process(resource: GCPRegion, graph: Graph): for quota in resource._quotas: if set(["metric", "limit", "usage"]) == set(quota.keys()): q = GCPQuota( quota["metric"], {}, quota=quota["limit"], usage=quota["usage"], _region=resource.region(), _account=resource.account(), _zone=resource.zone(), ctime=resource.ctime, ) graph.add_resource(resource, q) resource._quotas = None self.collect_something( resource_class=GCPRegion, attr_map={"region_status": "status", "quotas": "quotas"}, post_process=post_process, ) @metrics_collect_zones.time() def collect_zones(self) -> List: self.collect_something( resource_class=GCPZone, ) @metrics_collect_disks.time() def collect_disks(self): def volume_status(result): status = result.get("status") num_users = len(result.get("users", [])) if num_users == 0 and status == "READY": status = "AVAILABLE" return status self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPDisk, search_map={ "volume_type": ["link", "type"], "__users": ["link", "users"], }, attr_map={ "volume_size": (lambda r: int(r.get("sizeGb"))), "volume_status": volume_status, "last_attach_timestamp": ( lambda r: iso2datetime( r.get("lastAttachTimestamp", r["creationTimestamp"]) ) ), "last_detach_timestamp": ( lambda r: iso2datetime( r.get("lastDetachTimestamp", r["creationTimestamp"]) ) ), }, predecessors=["volume_type"], successors=["__users"], ) @metrics_collect_instances.time() def collect_instances(self): def post_process(resource: GCPInstance, graph: Graph): if resource.instance_type == "" and "custom" in resource._machine_type_link: log.debug(f"Fetching custom instance type for {resource.rtdname}") machine_type = GCPMachineType( resource._machine_type_link.split("/")[-1], {}, _zone=resource.zone(graph), _account=resource.account(graph), link=resource._machine_type_link, ) resource._machine_type_link = None kwargs = {str(machine_type._get_identifier): machine_type.name} common_kwargs = common_resource_kwargs(machine_type) kwargs.update(common_kwargs) gr = gcp_resource(machine_type) request = gr.get(**kwargs) result = request.execute() machine_type.id = result.get("id") machine_type.instance_cores = float(result.get("guestCpus")) machine_type.instance_memory = float(result.get("memoryMb", 0) / 1024) graph.add_resource(machine_type.zone(graph), machine_type) graph.add_edge(machine_type, resource) self.post_process_machine_type(machine_type, graph) resource._machine_type = machine_type self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPInstance, post_process=post_process, search_map={ "__network": [ "link", ( lambda r: next(iter(r.get("networkInterfaces", [])), {}).get( "network" ) ), ], "__subnetwork": [ "link", ( lambda r: next(iter(r.get("networkInterfaces", [])), {}).get( "subnetwork" ) ), ], "machine_type": ["link", "machineType"], }, attr_map={ "instance_status": "status", "machine_type_link": "machineType", }, predecessors=["__network", "__subnetwork", "machine_type"], ) @metrics_collect_disk_types.time() def collect_disk_types(self): def post_process(resource: GCPDiskType, graph: Graph): if ( resource.region(graph).name == "undefined" and resource.zone(graph).name == "undefined" ): log.error( f"Resource {resource.rtdname} has no region or zone" " - removing from graph" ) graph.remove_node(resource) return log.debug( ( f"Looking up pricing for {resource.rtdname}" f" in {resource.location(graph).rtdname}" ) ) resource_group_map = { "local-ssd": "LocalSSD", "pd-balanced": "SSD", "pd-ssd": "SSD", "pd-standard": "PDStandard", } resource_group = resource_group_map.get(resource.name) skus = [] for sku in graph.searchall( { "kind": "gcp_service_sku", "resource_family": "Storage", "usage_type": "OnDemand", "resource_group": resource_group, } ): try: if resource.region(graph).name not in sku.geo_taxonomy_regions: continue except TypeError: log.exception( f"Problem accessing geo_taxonomy_regions in {sku.rtdname}:" f" {type(sku.geo_taxonomy_regions)}" ) if resource.name == "pd-balanced" and not sku.name.startswith( "Balanced" ): continue if resource.name != "pd-balanced" and sku.name.startswith("Balanced"): continue if resource.zone(graph).name != "undefined" and sku.name.startswith( "Regional" ): continue if ( resource.zone(graph).name == "undefined" and not sku.name.startswith("Regional") and resource.name != "pd-balanced" ): continue skus.append(sku) if len(skus) == 1: graph.add_edge(skus[0], resource) resource.ondemand_cost = skus[0].usage_unit_nanos / 1000000000 else: log.debug(f"Unable to determine SKU for {resource}") self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPDiskType, post_process=post_process, ) @metrics_collect_networks.time() def collect_networks(self): self.collect_something( resource_class=GCPNetwork, ) @metrics_collect_subnetworks.time() def collect_subnetworks(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPSubnetwork, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_vpn_tunnels.time() def collect_vpn_tunnels(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPVPNTunnel, search_map={ "__vpn_gateway": ["link", "vpnGateway"], "__target_vpn_gateway": ["link", "targetVpnGateway"], }, successors=["__target_vpn_gateway", "__vpn_gateway"], ) @metrics_collect_vpn_gateways.time() def collect_vpn_gateways(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPVPNGateway, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_target_vpn_gateways.time() def collect_target_vpn_gateways(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPTargetVPNGateway, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_routers.time() def collect_routers(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPRouter, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_routes.time() def collect_routes(self): self.collect_something( resource_class=GCPRoute, search_map={ "__network": ["link", "network"], }, predecessors=["__network"], ) @metrics_collect_security_policies.time() def collect_security_policies(self): self.collect_something(resource_class=GCPSecurityPolicy) @metrics_collect_snapshots.time() def collect_snapshots(self): self.collect_something( resource_class=GCPSnapshot, search_map={ "volume_id": ["link", "sourceDisk"], }, attr_map={ "volume_size": lambda r: int(r.get("diskSizeGb", -1)), "storage_bytes": lambda r: int(r.get("storageBytes", -1)), }, ) @metrics_collect_ssl_certificates.time() def collect_ssl_certificates(self): self.collect_something( paginate_method_name="aggregatedList", resource_class=GCPSSLCertificate, attr_map={ "ctime": lambda r: iso2datetime(r.get("creationTimestamp")), "expires": lambda r: iso2datetime(r.get("expireTime")), "description": "description", "certificate": "certificate", "certificate_type": "type", "certificate_managed": "managed", "subject_alternative_names": "subjectAlternativeNames", }, search_map={ "__user": ["link", "user"], }, successors=["__user"], ) @staticmethod
Apache License 2.0
codyberenson/pgma-modernized
Fagalicious.bundle/Contents/Libraries/Shared/google_translate/translator.py
GoogleTranslator._try_make_request
python
def _try_make_request(self, request_url): current_attempt = 1 while current_attempt <= self._retries: self.logger.info("Attempt no. %s out of %s", current_attempt, self._retries) proxy = self._get_proxy() try: return make_request(request_url, self._get_headers(), proxy, self._timeout, self._simulate) except (urllib2.HTTPError, urllib2.URLError, IOError) as error: self.logger.error("Error %s", error) if proxy is not None and self._proxy_selector is not None: self._proxy_selector.remove_proxy(proxy) if current_attempt < self._retries: self._wait() current_attempt += 1 self.logger.warning("Maximum attempts reached") return None
Try to make the request and return the reply or None.
https://github.com/codyberenson/pgma-modernized/blob/f8d8771809d6b619f9e356e55bfcbfa7368c4abd/Fagalicious.bundle/Contents/Libraries/Shared/google_translate/translator.py#L595-L619
from __future__ import unicode_literals import re import sys import copy import json import random import os.path import urllib2 import logging from time import sleep try: from twodict import TwoWayOrderedDict except ImportError as error: print error sys.exit(1) from .tk_generator import get_tk from .cache import Cache from .utils import ( display_unicode_item, get_absolute_path, load_from_file, quote_unicode, make_request, parse_reply ) _JSON_REPLACE_PATTERN = re.compile(r",(?=,)|\[,+") class GoogleTranslator(object): LANGUAGES_DB = os.path.join(get_absolute_path(__file__), "data", "languages") WAIT_MIN = 1.0 WAIT_MAX = 20.0 MAX_INPUT_SIZE = 1980 MAX_CACHE_SIZE = 500 CACHE_VALID_PERIOD = 604800.0 DEFAULT_USERAGENT = "Mozilla/5.0" DOMAIN_NAME = "translate.google.ru" REQUEST_URL = "{prot}://{host}/translate_a/single?{params}" def __init__(self, proxy_selector=None, ua_selector=None, simulate=False, https=True, timeout=10.0, retries=5, wait_time=1.0, random_wait=False, encoding="UTF-8"): self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__) self._https = https self._timeout = timeout self._retries = retries self._encoding = encoding self._simulate = simulate self._wait_time = wait_time self._random_wait = random_wait self._ua_selector = ua_selector self._proxy_selector = proxy_selector self.cache = Cache(self.MAX_CACHE_SIZE, self.CACHE_VALID_PERIOD) if https: referer = "https://{0}/".format(self.DOMAIN_NAME) else: referer = "http://{0}/".format(self.DOMAIN_NAME) self._default_headers = { "Accept": "*/*", "Referer": referer, "Connection": "close", "Host": self.DOMAIN_NAME, "Accept-Language": "en-US,en;q=0.8", "Accept-Encoding": "gzip, deflate, sdch", "User-Agent": self.DEFAULT_USERAGENT } self._user_specific_headers = {} self._lang_dict = TwoWayOrderedDict(auto="auto") for line in load_from_file(self.LANGUAGES_DB): lang, code = line.split(':') self._lang_dict[lang.lower()] = code def add_header(self, header): if (not isinstance(header, tuple) or len(header) != 2 or not isinstance(header[0], basestring) or not isinstance(header[1], basestring)): raise ValueError(header) self._user_specific_headers[header[0]] = header[1] def word_exists(self, word, lang="en", output="text"): lang = self._validate_language(lang, allow_auto=False) return self._do_work(self._word_exists, word, lang, output) def romanize(self, word, src_lang="auto", output="text"): src_lang = self._validate_language(src_lang) return self._do_work(self._romanize, word, src_lang, output) def detect(self, word, output="text"): return self._do_work(self._detect, word, output) def translate(self, word, dst_lang, src_lang="auto", additional=False, output="text"): if not isinstance(additional, bool): raise ValueError(additional) src_lang = self._validate_language(src_lang) dst_lang = self._validate_language(dst_lang, allow_auto=False) return self._do_work(self._translate, word, dst_lang, src_lang, additional, output) def get_info_dict(self, word, dst_lang, src_lang="auto", output="text"): src_lang = self._validate_language(src_lang) dst_lang = self._validate_language(dst_lang, allow_auto=False) return self._do_work(self._get_info, word, dst_lang, src_lang, output) def _do_work(self, func, *args): if isinstance(args[0], list): results_list = [] for word in args[0]: self._validate_word(word) results_list.append(func(word, *args[1:-1])) if word != args[0][-1]: self._wait() return self._convert_output(args[0], results_list, args[-1]) self._validate_word(args[0]) return self._convert_output(args[0], func(*args[:-1]), args[-1]) def _convert_output(self, word, output, output_type): if output_type not in ["text", "dict", "json"]: raise ValueError(output_type) if output_type == "text": return output if isinstance(word, list): temp_dict = dict(zip(word, output)) else: temp_dict = {word: output} if output_type == "dict": return temp_dict return json.dumps(temp_dict, indent=4, ensure_ascii=False) def _validate_word(self, word): if not isinstance(word, basestring): self.logger.critical("Invalid word: %r", word) raise ValueError(word) quoted_text_length = len(quote_unicode(word, self._encoding)) self.logger.debug("Max input size: (%s)", self.MAX_INPUT_SIZE) self.logger.debug("Unquoted text size: (%s)", len(word)) self.logger.debug("Quoted text size: (%s)", quoted_text_length) if quoted_text_length >= self.MAX_INPUT_SIZE: self.logger.critical("Input size over limit: %r", word) raise ValueError("Input size exceeds the maximum value") def _validate_language(self, language, allow_auto=True): if not isinstance(language, basestring): self.logger.critical("Invalid language: %r", language) raise ValueError(language) if '-' in language: language = language[:2].lower() + language[2:].upper() else: language = language.lower() if language == "auto" and not allow_auto: self.logger.critical("'auto' language is not allowed") raise ValueError(language) if not language in self._lang_dict: self.logger.critical("Could not locate language: %r", language) raise ValueError(language) if language in self._lang_dict.keys(): language = self._lang_dict[language] return language def _word_exists(self, word, src_lang): self.logger.info("Searching for word: %r", word) dst_lang = src_lang for lang_code in self._lang_dict.values(): if lang_code != "auto" and lang_code != src_lang: dst_lang = lang_code break self.logger.debug("src_lang: %r dst_lang: %r", src_lang, dst_lang) data = self._get_info(word, dst_lang, src_lang) if data is not None: if data["original_text"] == data["translation"]: return False return not data["has_typo"] return None def _romanize(self, word, src_lang): self.logger.info("Romanizing word: %r", word) self.logger.debug("src_lang: %r", src_lang) data = self._get_info(word, "en", src_lang) if data is not None: if not data["has_typo"]: return data["romanization"] return None def _detect(self, word): self.logger.info("Detecting language for word: %r", word) data = self._get_info(word, "en", "auto") if data is not None: try: return self._lang_dict[data["src_lang"]] except KeyError: return data["src_lang"] return None def _translate(self, word, dst_lang, src_lang, additional): self.logger.info("Translating word: %r", word) self.logger.debug("src_lang: %r dst_lang: %r", src_lang, dst_lang) if dst_lang == src_lang: return word data = self._get_info(word, dst_lang, src_lang) if data is not None: if not data["has_typo"]: if additional: return data["extra"] return data["translation"] return None def _get_info(self, word, dst_lang, src_lang): cache_key = word + dst_lang + src_lang info_dict = self.cache.get(cache_key) if info_dict is not None: return copy.deepcopy(info_dict) reply = self._try_make_request(self._build_request(word, dst_lang, src_lang)) if reply is not None: self.logger.info("Parsing reply") data = parse_reply(reply, self._encoding) self.logger.debug("Raw data: %s\n", data) json_data = self._string_to_json(data) self.logger.debug("JSON data: %s\n", display_unicode_item(json_data)) info_dict = self._extract_data(json_data) self.logger.debug("Extracted data: %s\n", display_unicode_item(info_dict)) self.cache.add(cache_key, info_dict) return copy.deepcopy(info_dict) return None def _build_request(self, word, dst_lang, src_lang): self.logger.info("Building the request") params = [ ["client", "t"], ["sl", src_lang], ["tl", dst_lang], ["dt", "t"], ["dt", "bd"], ["dt", "rm"], ["dt", "qca"], ["ie", self._encoding], ["oe", self._encoding], ["tk", get_tk(word)], ["q", quote_unicode(word, self._encoding)] ] if self._https: protocol = "https" else: protocol = "http" params_str = "" for param in params: params_str += "{key}={value}&".format(key=param[0], value=param[1]) params_str = params_str[:-1] request_url = self.REQUEST_URL.format(prot=protocol, host=self.DOMAIN_NAME, params=params_str) self.logger.debug("Request url: %r", request_url) self.logger.debug("URL size: (%s)", len(request_url)) return request_url
MIT License
aiven/aiven-client
aiven/client/cli.py
AivenCLI.account__authentication_method__list
python
def account__authentication_method__list(self): methods = self.client.get_account_authentication_methods(self.args.account_id) self.print_response(methods, json=self.args.json, table_layout=AUTHENTICATION_METHOD_COLUMNS)
Lists all current account authentication methods
https://github.com/aiven/aiven-client/blob/d0514693762cf4279a5ceed2bd0cd8ba64ba94b5/aiven/client/cli.py#L737-L740
from . import argx, client from aiven.client import envdefault from aiven.client.cliarg import arg from aiven.client.common import UNDEFINED from aiven.client.connection_info.common import Store from aiven.client.connection_info.kafka import KafkaCertificateConnectionInfo, KafkaSASLConnectionInfo from aiven.client.connection_info.pg import PGConnectionInfo from aiven.client.speller import suggest from collections import Counter from datetime import datetime, timedelta, timezone from decimal import Decimal from typing import Callable, List, Optional from urllib.parse import urlparse import errno import getpass import json as jsonlib import os import re import requests import subprocess import sys import time AUTHENTICATION_METHOD_COLUMNS = [ "account_id", "authentication_method_enabled", "authentication_method_id", "authentication_method_name", "authentication_method_type", "state", "create_time", "update_time", ] PLUGINS = [] EOL_ADVANCE_WARNING_TIME = timedelta(weeks=26) try: from aiven.admin import plugin as adminplugin PLUGINS.append(adminplugin) except ImportError: pass def convert_str_to_value(schema, str_value): if "string" in schema["type"]: return str_value elif "integer" in schema["type"]: return int(str_value, 0) elif "number" in schema["type"]: return float(str_value) elif "boolean" in schema["type"]: values = { "1": True, "0": False, "true": True, "false": False, } try: return values[str_value] except KeyError as ex: raise argx.UserError( "Invalid boolean value {!r}: expected one of {}".format(str_value, ", ".join(values)) ) from ex elif "array" in schema["type"]: return [convert_str_to_value(schema["items"], val) for val in str_value.split(",")] elif "null" in schema["type"] and str_value is None: return None else: raise argx.UserError("Support for option value type(s) {!r} not implemented".format(schema["type"])) tag_key_re = re.compile(r"[\w\-]+") tag_value_re = re.compile(r"[\w\-,. ]*") def parse_tag_str(kv): k, v = (kv.split(sep='=', maxsplit=1) + [''])[:2] if not tag_key_re.fullmatch(k): raise argx.UserError(f"Tag key '{k}' must consist of alpha-numeric characters, underscores or dashes") if not tag_value_re.fullmatch(v): raise argx.UserError( f"Tag value '{k}={v}' must consist of alpha-numeric characters, underscores, dashes, commas or dots" ) return {"key": k, "value": v} def parse_untag_str(k): if not tag_key_re.match(k): raise argx.UserError(f"Tag key {k} must consist of alpha-numeric characters, underscores or dashes") return k def no_auth(fun): fun.no_auth = True return fun def optional_auth(fun): fun.optional_auth = True return fun def is_truthy(value: str) -> bool: return value.lower() in {"y", "yes", "t", "true", "1", "ok"} def parse_iso8601(value: str) -> datetime: if value[-1] == 'Z': value = value[:-1] + '+0000' return datetime.strptime(value, "%Y-%m-%dT%H:%M:%S%z") def get_current_date() -> datetime: return datetime.now(timezone.utc) if (sys.version_info.major, sys.version_info.minor) >= (3, 8): from typing import Protocol class ClientFactory(Protocol): def __call__(self, base_url: str, show_http: bool, request_timeout: Optional[int]): ... else: ClientFactory = Callable[..., client.AivenClient] class AivenCLI(argx.CommandLineTool): def __init__(self, client_factory: ClientFactory = client.AivenClient): argx.CommandLineTool.__init__(self, "avn") self.client_factory = client_factory self.client = None for plugin in PLUGINS: plugincli = plugin.ClientPlugin() self.extend_commands(plugincli) def add_args(self, parser): parser.add_argument( "--auth-ca", help="CA certificate to use [AIVEN_CA_CERT], default %(default)r", default=envdefault.AIVEN_CA_CERT, metavar="FILE", ) parser.add_argument( "--auth-token", help="Client auth token to use [AIVEN_AUTH_TOKEN], [AIVEN_CREDENTIALS_FILE]", default=envdefault.AIVEN_AUTH_TOKEN, ) parser.add_argument("--show-http", help="Show HTTP requests and responses", action="store_true") parser.add_argument( "--url", help="Server base url default %(default)r", default=envdefault.AIVEN_WEB_URL or "https://api.aiven.io", ) parser.add_argument( "--request-timeout", type=int, default=None, help="Wait for up to N seconds for a response to a request (default: infinite)", ) def collect_user_config_options(self, obj_def, prefixes=None): opts = {} for prop, spec in sorted(obj_def.get("properties", {}).items()): full_prop = prefixes + [prop] if prefixes else [prop] full_name = ".".join(full_prop) types = spec["type"] if not isinstance(types, list): types = [types] if "object" in types: opts.update(self.collect_user_config_options(spec, prefixes=full_prop)) if "null" in types: opts[full_name] = { "property_parts": full_prop, "title": "Remove {}".format(prop), "type": "null", } else: opts[full_name] = dict(spec, property_parts=full_prop) for spec in sorted(obj_def.get("patternProperties", {}).values()): full_prop = prefixes + ["KEY"] if prefixes else ["KEY"] full_name = ".".join(full_prop) if spec["type"] == "object": opts.update(self.collect_user_config_options(spec, prefixes=full_prop)) else: title = ': '.join([obj_def["title"], spec["title"]]) if "title" in spec else obj_def["title"] opts[full_name] = dict(spec, property_parts=full_prop, title=title) return opts def create_user_config(self, user_config_schema): user_option_remove = [] if hasattr(self.args, "user_option_remove"): user_option_remove = self.args.user_option_remove if not self.args.user_config and not user_option_remove: return {} options = self.collect_user_config_options(user_config_schema) user_config = {} for key_value in self.args.user_config: try: key, value = key_value.split("=", 1) except ValueError as ex: raise argx.UserError( "Invalid config value: {!r}, expected '<KEY>[.<SUBKEY>]=<JSON_VALUE>'".format(key_value) ) from ex opt_schema = options.get(key) if not opt_schema: generic_key = ".".join(key.split(".")[:-1] + ["KEY"]) opt_schema = options.get(generic_key) if not opt_schema: raise argx.UserError( "Unsupported option {!r}, available options: {}".format(key, ", ".join(options) or "none") ) try: value = convert_str_to_value(opt_schema, value) except ValueError as ex: raise argx.UserError("Invalid value {!r}: {}".format(key_value, ex)) leaf_config, leaf_key = self.get_leaf_config_and_key(config=user_config, key=key, opt_schema=opt_schema) leaf_config[leaf_key] = value for opt in user_option_remove: opt_schema = options.get(opt) if not opt_schema: raise argx.UserError( "Unsupported option {!r}, available options: {}".format(opt, ", ".join(options) or "none") ) if "null" not in opt_schema["type"]: raise argx.UserError("Removing option {!r} is not supported".format(opt)) leaf_config, leaf_key = self.get_leaf_config_and_key(config=user_config, key=opt, opt_schema=opt_schema) leaf_config[leaf_key] = None return user_config @classmethod def get_leaf_config_and_key(cls, *, config, key, opt_schema): key_suffix = key for part in opt_schema["property_parts"][:-1]: prefix = "{}.".format(part) if not key_suffix.startswith(prefix): raise argx.UserError("Expected {} to start with {} (full key {})".format(key_suffix, prefix, key)) key_suffix = key_suffix[len(prefix):] config = config.setdefault(part, {}) return config, key_suffix def enter_password(self, prompt, var="AIVEN_PASSWORD", confirm=False): password = os.environ.get(var) if password: return password password = getpass.getpass(prompt) if confirm: again = getpass.getpass("Confirm password again: ") if password != again: raise argx.UserError("Passwords do not match") return password def print_boxed(self, lines: List[str]) -> None: longest = max(len(line) for line in lines) print("*" * longest) for line in lines: print(line) print("*" * longest) def confirm(self, prompt: str = "confirm (y/N)? "): if self.args.force or is_truthy(os.environ.get("AIVEN_FORCE", "no")): return True answer = input(prompt) return is_truthy(answer) def get_project(self): if getattr(self.args, "project", None) and self.args.project: return self.args.project return self.config.get("default_project") @no_auth @arg("pattern", nargs="*", help="command search pattern") def help(self): output = [] patterns = [re.compile(p, re.I) for p in self.args.pattern] for plugin in self._extensions: for prop_name in dir(plugin): if prop_name.startswith("_"): continue prop = getattr(plugin, prop_name) arg_list = getattr(prop, argx.ARG_LIST_PROP, None) if arg_list is not None: cmd = prop_name.replace("__", " ").replace("_", "-") if patterns and not all((p.search(cmd) or p.search(prop.__doc__)) for p in patterns): continue output.append({"command": cmd, "help": " ".join(prop.__doc__.split())}) layout = ["command", "help"] self.print_response(output, json=False, table_layout=layout) @no_auth @arg() def crab(self): output = """ `'+;` `'+;` '@@@#@@@` '@@@#@@@` #@. #@. @@. #@. @: ,@@ @@ @: ,@@ @@ ,@ @@@@@ :@ :@ @@@@@ .@ @ #@@@. #@ @` #@@@` @@ @@ `@# @@ `@# @@#. :@@+ @@#. :@@# `+@@@' `#@@@' ,;:` ,;;. @@@@@@# .+@@@@@@@@@@@@@'. `@@@@@@@ @@@@@# @@@@@@@@@@@@@@@@@@@@@@+ @@@@@@ @@@ ;@@@@@@@@@@@@@@@@@@@@@@@@@@@` `@@; ` `@@@@@@@@@@@ ;@@@@@@@@@@@ `@@@ '@@@@@@@@@@@@@ @@@@@@@@@@@@@` @@@ '@@@` .@@@@@@@@@@@@@@@ `@@@@@@@@@@@@@@@ @@@@` @@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@ '@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@ ,:::; @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ,::: :@ ,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ #@ @@@ +@#+#@@@@@@@@@@@@@@@@@@@@@@@@@#+#@. @@@ @@@@ '@@@@@@@@@@@@@@@@@@@. @@@@ @@@ @@@@@@+ @@@@@@@@@@@@@@@@@ @@@@@@; @@@ @@ @@@@@@@@@ @@@@@@@@@@@@@@@ `@@@@@@@@@ @+ @@@@@@@@@@@ :@@@@@@@@@@@@@ @@@@@@@@@@@ ' `@@@@@@@@@@@ ``` ,@@@@@@@@@@@ `@@@@@@ '@ :@: @@@@@@ @@@@@: @@@@@@ @@@@@ @@@@@ @@@@# @@@@' """ print(output) @no_auth @arg("email", nargs="?", help="User email address") @arg("--tenant", help="Login under a different tenant") @arg("--token", action="store_true", help="Provide an access token instead of password") def user__login(self): email = self.args.email if not email: email = input("Username (email): ") if self.args.token: token = self.enter_password(f"{email}'s Aiven access token: ", var="AIVEN_AUTH_TOKEN") else: password = self.enter_password(f"{email}'s Aiven password: ", var="AIVEN_PASSWORD") try: result = self.client.authenticate_user(email=email, password=password, tenant_id=self.args.tenant) except client.Error as ex: if ex.status == 510: otp = input("Two-factor authentication OTP: ") result = self.client.authenticate_user(email=email, password=password, otp=otp) else: raise token = result["token"] self._write_auth_token_file(token=token, email=email) auth_token = self._get_auth_token() if auth_token: self.client.set_auth_token(auth_token) project = self.get_project() projects = self.client.get_projects() if project and any(p["project_name"] == project for p in projects): return if projects: default_project = projects[0]["project_name"] self.config["default_project"] = default_project self.config.save() self.log.info( "Default project set as '%s' (change with 'avn project switch <project>')", default_project, ) else: self.log.info("No projects exists. You should probably create one with 'avn project create <name>'") @arg() def user__logout(self): self.client.access_token_revoke(token_prefix=self._get_auth_token()) self._remove_auth_token_file() @arg.verbose def user__tokens_expire(self): message = self.client.expire_user_tokens()["message"] print(message) @arg("--description", required=True, help="Description of how the token will be used") @arg("--max-age-seconds", type=int, help="Maximum age of the token, if any") @arg( "--extend-when-used", action="store_true", help="Extend token's expiry time when used (only applicable if token is set to expire)", ) @arg.json def user__access_token__create(self): token_info = self.client.access_token_create( description=self.args.description, extend_when_used=self.args.extend_when_used, max_age_seconds=self.args.max_age_seconds, ) layout = [ "expiry_time", "description", "max_age_seconds", "extend_when_used", "full_token", ] self.print_response([token_info], json=self.args.json, table_layout=layout) @arg( "token_prefix", help="The full token or token prefix identifying the token to update", ) @arg("--description", required=True, help="Description of how the token will be used") @arg.json def user__access_token__update(self): token_info = self.client.access_token_update(token_prefix=self.args.token_prefix, description=self.args.description) layout = [ "expiry_time", "token_prefix", "description", "max_age_seconds", "extend_when_used", "last_used_time", "last_ip", "last_user_agent", ] self.print_response([token_info], json=self.args.json, table_layout=layout) @arg( "token_prefix", help="The full token or token prefix identifying the token to revoke", ) def user__access_token__revoke(self): self.client.access_token_revoke(token_prefix=self.args.token_prefix) print("Revoked") @arg.json def user__access_token__list(self): tokens = self.client.access_tokens_list() layout = [ "expiry_time", "token_prefix", "description", "max_age_seconds", "extend_when_used", "last_used_time", "last_ip", "last_user_agent", ] self.print_response(tokens, json=self.args.json, table_layout=layout) def _show_logs(self, msgs): if self.args.json: print(jsonlib.dumps(msgs["logs"], indent=4, sort_keys=True)) else: for log_msg in msgs["logs"]: print("{time:<27}{hostname} {unit} {msg}".format(**log_msg)) return msgs["offset"] @arg.project @arg.service_name @arg.json @arg( "-S", "--sort-order", type=str, default="asc", choices=["desc", "asc"], help="Sort direction for log fetching", ) @arg("-n", "--limit", type=int, default=100, help="Get up to N rows of logs") @arg("-f", "--follow", action="store_true", default=False) def service__logs(self): previous_offset = None consecutive_errors = 0 while True: try: msgs = self.client.get_service_logs( project=self.get_project(), limit=self.args.limit, offset=previous_offset, service=self.args.service_name, sort_order=self.args.sort_order, ) except requests.RequestException as ex: if not self.args.follow: raise ex consecutive_errors += 1 if consecutive_errors > 10: raise argx.UserError("Fetching logs failed repeatedly, aborting.") sys.stderr.write("Fetching log messages failed with {}. Retrying after 10s\n".format(ex)) time.sleep(10.0) continue consecutive_errors = 0 new_offset = self._show_logs(msgs) if not msgs["logs"] and previous_offset is not None and self.args.sort_order == "desc": break if not self.args.follow: break if previous_offset == new_offset: time.sleep(10.0) previous_offset = new_offset @arg.project @arg.json @arg("-n", "--limit", type=int, default=100, help="Get up to N rows of logs") def events(self): events = self.client.get_events(project=self.get_project(), limit=self.args.limit) if self.args.json: print(jsonlib.dumps(events, indent=4, sort_keys=True)) return for msg in events: if not msg["service_name"]: msg["service_name"] = "" layout = ["time", "actor", "event_type", "service_name", "event_desc"] self.print_response(events, json=self.args.json, table_layout=layout) @optional_auth @arg.project @arg.json def cloud__list(self): project = self.get_project() if project and not self.client.auth_token: raise argx.UserError("authentication is required to list clouds for a specific project") self.print_response(self.client.get_clouds(project=project), json=self.args.json) @staticmethod def describe_plan(plan, node_count, service_plan): if plan["node_memory_mb"] < 1024: ram_amount = "{} MB".format(plan["node_memory_mb"]) else: ram_amount = "{:.0f} GB".format(plan["node_memory_mb"] / 1024.0) if plan["disk_space_mb"]: if plan.get("disk_space_cap_mb"): disk_desc = ", {:.0f}-{:.0f} GB disk".format( plan["disk_space_mb"] / 1024.0, plan["disk_space_cap_mb"] / 1024.0 ) else: disk_desc = ", {:.0f} GB disk".format(plan["disk_space_mb"] / 1024.0) else: disk_desc = "" if node_count == 2: plan_qual = " high availability pair" elif node_count > 2: plan_qual = " {}-node high availability set".format(node_count) else: plan_qual = "" return "{name} ({cpu_count} CPU, {ram_amount} RAM{disk_desc}){qual}".format( name=service_plan.title(), cpu_count=plan["node_cpu_count"], ram_amount=ram_amount, disk_desc=disk_desc, qual=plan_qual, ) @arg.json @arg("-n", "--name", required=True, help="Name of the account to create") def account__create(self): account = self.client.create_account(self.args.name) self.print_response(account, json=self.args.json, single_item=True) @arg.json @arg.account_id @arg("-n", "--name", required=True, help="New name for the account") def account__update(self): account = self.client.update_account(self.args.account_id, self.args.name) self.print_response(account, json=self.args.json, single_item=True) @arg.account_id def account__delete(self): self.client.delete_account(self.args.account_id) print("Deleted") @arg.json def account__list(self): accounts = self.client.get_accounts() self.print_response(accounts, json=self.args.json) @staticmethod def _parse_auth_config_options(config_cmdline, config_file): options = {} for name_and_value in config_cmdline: if "=" not in name_and_value: raise argx.UserError("Invalid custom value, missing '=': {}".format(name_and_value)) name, value = name_and_value.split("=", 1) options[name] = value for name_and_value in config_file: if "=" not in name_and_value: raise argx.UserError("Invalid custom value, missing '=': {}".format(name_and_value)) name, filename = name_and_value.split("=", 1) if not os.path.isfile(filename): raise argx.UserError("No such file {!r}".format(filename)) with open(filename, "rt") as fob: value = fob.read() options[name] = value return options @arg.json @arg.account_id @arg("-n", "--name", required=True, help="Authentication method name") @arg( "-t", "--type", required=True, help="Authentication method type", choices=["saml"], ) @arg.config_cmdline @arg.config_file def account__authentication_method__create(self): options = self._parse_auth_config_options(self.args.config_cmdline, self.args.config_file) method = self.client.create_account_authentication_method( self.args.account_id, method_name=self.args.name, method_type=self.args.type, options=options, ) acs_url = "https://api.aiven.io/v1/sso/saml/account/{}/method/{}/acs".format( self.args.account_id, method["authentication_method_id"] ) metadata_url = "https://api.aiven.io/v1/sso/saml/account/{}/method/{}/metadata".format( self.args.account_id, method["authentication_method_id"] ) acs_key = "ACS (Single Sign On / Recipient) URL" metadata_key = "Metadata URL" method[acs_key] = acs_url method[metadata_key] = metadata_url table_layout = [AUTHENTICATION_METHOD_COLUMNS, acs_key, metadata_key] self.print_response(method, json=self.args.json, single_item=True, table_layout=table_layout) @arg.json @arg.account_id @arg.authentication_id @arg("-n", "--name", help="New name for the authentication method") @arg("--enable", help="Enable the authentication method", action="store_true") @arg("--disable", help="Disable the authentication method", action="store_true") @arg.config_cmdline @arg.config_file def account__authentication_method__update(self): if self.args.enable and self.args.disable: raise argx.UserError("Only set at most one of --enable and --disable") enable = None if self.args.enable: enable = True elif self.args.disable: enable = False options = self._parse_auth_config_options(self.args.config_cmdline, self.args.config_file) account = self.client.update_account_authentication_method( self.args.account_id, self.args.authentication_id, method_enable=enable, method_name=self.args.name, options=options, ) self.print_response( account, json=self.args.json, single_item=True, table_layout=AUTHENTICATION_METHOD_COLUMNS, ) @arg.account_id @arg.authentication_id def account__authentication_method__delete(self): self.client.delete_account_authentication_method(self.args.account_id, self.args.authentication_id) print("Deleted") @arg.json @arg.account_id
Apache License 2.0
thunlp/openprompt
openprompt/prompt_base.py
Template.process_batch
python
def process_batch(self, batch): raise NotImplementedError
r"""All template should rewrite this method to process the batch input such as substituting embeddings.
https://github.com/thunlp/openprompt/blob/5d3d5dce8c4babe9a265b69576e63eec2d067fe3/openprompt/prompt_base.py#L164-L168
from abc import abstractmethod import json from openprompt.config import convert_cfg_to_dict from transformers.utils.dummy_pt_objects import PreTrainedModel from openprompt.utils.utils import signature from yacs.config import CfgNode from openprompt.data_utils.data_utils import InputFeatures import torch import torch.nn as nn from openprompt.data_utils import InputExample from typing import * from transformers.tokenization_utils import PreTrainedTokenizer from openprompt.utils.logging import logger import numpy as np import torch.nn.functional as F class Template(nn.Module): registered_inputflag_names = ["loss_ids", "shortenable_ids"] def __init__(self, tokenizer: PreTrainedTokenizer, mask_token: str = '<mask>', placeholder_mapping: dict = {'<text_a>':'text_a','<text_b>':'text_b'}, ): super().__init__() self.mask_token = mask_token self.tokenizer = tokenizer self.placeholder_mapping = placeholder_mapping self._in_on_text_set = False def get_default_loss_ids(self): idx = [ 1 if token==self.mask_token else 0 for token in self.text ] return idx def get_default_shortenable_ids(self) -> List[int]: idx = [ 1 if any([placeholder in token for placeholder in self.placeholder_mapping.keys()]) else 0 for token in self.text ] return idx def get_default_new_token_ids(self) -> List[int]: raise NotImplementedError def get_default_soft_token_ids(self) -> List[int]: raise NotImplementedError def wrap_one_example(self, example: InputExample) -> List[Dict]: not_empty_keys = example.keys() if self.text is None: raise ValueError("template text has not been initialized") if isinstance(example, InputExample): text = self.text.copy() for placeholder_token in self.placeholder_mapping: for i in range(len(text)): text[i] = text[i].replace(placeholder_token, getattr(example, self.placeholder_mapping[placeholder_token])) not_empty_keys.remove(self.placeholder_mapping[placeholder_token]) for key, value in example.meta.items(): for i in range(len(text)): text[i] = text[i].replace("<meta:"+key+">", value) not_empty_keys.remove('meta') keys, values= ['text'], [text] for inputflag_name in self.registered_inputflag_names: keys.append(inputflag_name) v = None if hasattr(self, inputflag_name) and getattr(self, inputflag_name) is not None: v = getattr(self, inputflag_name) elif hasattr(self, "get_default_"+inputflag_name): v = getattr(self, "get_default_"+inputflag_name)() else: raise ValueError(""" Template's inputflag '{}' is registered but not initialize. Try using template.{} = [...] to initialize or create an method get_default_{}(self) in your template. """.format(inputflag_name, inputflag_name, inputflag_name)) if len(v) != len(text): raise ValueError("Template: len({})={} doesn't match len(text)={}." .format(inputflag_name, len(v), len(text))) values.append(v) wrapped_parts_to_tokenize = [] for piece in list(zip(*values)): wrapped_parts_to_tokenize.append(dict(zip(keys, piece))) wrapped_parts_not_tokenize = {key: getattr(example, key) for key in not_empty_keys} return [wrapped_parts_to_tokenize, wrapped_parts_not_tokenize] else: raise TypeError("InputExample") @abstractmethod
Apache License 2.0
burnash/gspread
gspread/client.py
Client.remove_permission
python
def remove_permission(self, file_id, permission_id): url = "{}/{}/permissions/{}".format( DRIVE_FILES_API_V2_URL, file_id, permission_id ) params = {"supportsAllDrives": True} self.request("delete", url, params=params)
Deletes a permission from a file. :param str file_id: a spreadsheet ID (aka file ID.) :param str permission_id: an ID for the permission.
https://github.com/burnash/gspread/blob/90a728fac1c8f6fb38f19da588de0337697854cc/gspread/client.py#L388-L399
from google.auth.transport.requests import AuthorizedSession from .exceptions import APIError, SpreadsheetNotFound from .spreadsheet import Spreadsheet from .urls import ( DRIVE_FILES_API_V2_URL, DRIVE_FILES_API_V3_URL, DRIVE_FILES_UPLOAD_API_V2_URL, ) from .utils import convert_credentials, extract_id_from_url, finditem class Client: def __init__(self, auth, session=None): if auth is not None: self.auth = convert_credentials(auth) self.session = session or AuthorizedSession(self.auth) else: self.session = session def login(self): from google.auth.transport.requests import Request self.auth.refresh(Request(self.session)) self.session.headers.update({"Authorization": "Bearer %s" % self.auth.token}) def request( self, method, endpoint, params=None, data=None, json=None, files=None, headers=None, ): response = getattr(self.session, method)( endpoint, json=json, params=params, data=data, files=files, headers=headers, ) if response.ok: return response else: raise APIError(response) def list_spreadsheet_files(self, title=None): files = [] page_token = "" url = DRIVE_FILES_API_V3_URL q = 'mimeType="application/vnd.google-apps.spreadsheet"' if title: q += ' and name = "{}"'.format(title) params = { "q": q, "pageSize": 1000, "supportsAllDrives": True, "includeItemsFromAllDrives": True, "fields": "kind,nextPageToken,files(id,name,createdTime,modifiedTime)", } while page_token is not None: if page_token: params["pageToken"] = page_token res = self.request("get", url, params=params).json() files.extend(res["files"]) page_token = res.get("nextPageToken", None) return files def open(self, title): try: properties = finditem( lambda x: x["name"] == title, self.list_spreadsheet_files(title), ) properties["title"] = properties["name"] return Spreadsheet(self, properties) except StopIteration: raise SpreadsheetNotFound def open_by_key(self, key): return Spreadsheet(self, {"id": key}) def open_by_url(self, url): return self.open_by_key(extract_id_from_url(url)) def openall(self, title=None): spreadsheet_files = self.list_spreadsheet_files(title) if title: spreadsheet_files = [ spread for spread in spreadsheet_files if title == spread["name"] ] return [ Spreadsheet(self, dict(title=x["name"], **x)) for x in spreadsheet_files ] def create(self, title, folder_id=None): payload = { "name": title, "mimeType": "application/vnd.google-apps.spreadsheet", } params = { "supportsAllDrives": True, } if folder_id is not None: payload["parents"] = [folder_id] r = self.request("post", DRIVE_FILES_API_V3_URL, json=payload, params=params) spreadsheet_id = r.json()["id"] return self.open_by_key(spreadsheet_id) def copy(self, file_id, title=None, copy_permissions=False, folder_id=None): url = "{}/{}/copy".format(DRIVE_FILES_API_V2_URL, file_id) payload = { "title": title, "mimeType": "application/vnd.google-apps.spreadsheet", } if folder_id is not None: payload["parents"] = [{"id": folder_id}] params = {"supportsAllDrives": True} r = self.request("post", url, json=payload, params=params) spreadsheet_id = r.json()["id"] new_spreadsheet = self.open_by_key(spreadsheet_id) if copy_permissions is True: original = self.open_by_key(file_id) permissions = original.list_permissions() for p in permissions: if p.get("deleted"): continue try: new_spreadsheet.share( value=p["emailAddress"], perm_type=p["type"], role=p["role"], notify=False, ) except Exception: pass return new_spreadsheet def del_spreadsheet(self, file_id): url = "{}/{}".format(DRIVE_FILES_API_V3_URL, file_id) params = {"supportsAllDrives": True} self.request("delete", url, params=params) def import_csv(self, file_id, data): headers = {"Content-Type": "text/csv"} url = "{}/{}".format(DRIVE_FILES_UPLOAD_API_V2_URL, file_id) self.request( "put", url, data=data, params={ "uploadType": "media", "convert": True, "supportsAllDrives": True, }, headers=headers, ) def list_permissions(self, file_id): url = "{}/{}/permissions".format(DRIVE_FILES_API_V2_URL, file_id) params = {"supportsAllDrives": True} r = self.request("get", url, params=params) return r.json()["items"] def insert_permission( self, file_id, value, perm_type, role, notify=True, email_message=None, with_link=False, ): url = "{}/{}/permissions".format(DRIVE_FILES_API_V2_URL, file_id) payload = { "value": value, "type": perm_type, "role": role, "withLink": with_link, } params = { "sendNotificationEmails": notify, "emailMessage": email_message, "supportsAllDrives": "true", } self.request("post", url, json=payload, params=params)
MIT License
cakebread/yolk
yolk/pypi.py
CheeseShop.package_releases
python
def package_releases(self, package_name): if self.debug: self.logger.debug("DEBUG: querying PyPI for versions of " + package_name) return self.xmlrpc.package_releases(package_name)
Query PYPI via XMLRPC interface for a pkg's available versions
https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/pypi.py#L245-L250
__docformat__ = 'restructuredtext' import re import platform if platform.python_version().startswith('2'): import xmlrpclib import cPickle import urllib2 else: import xmlrpc.client as xmlrpclib import pickle import urllib.request as urllib2 import os import time import logging import urllib from yolk.utils import get_yolk_dir XML_RPC_SERVER = 'http://pypi.python.org/pypi' class addinfourl(urllib2.addinfourl): def getheader(self, name, default=None): if self.headers is None: raise httplib.ResponseNotReady() return self.headers.getheader(name, default) def getheaders(self): if self.headers is None: raise httplib.ResponseNotReady() return self.headers.items() urllib2.addinfourl = addinfourl class ProxyTransport(xmlrpclib.Transport): def request(self, host, handler, request_body, verbose): self.verbose = verbose url = 'http://' + host + handler request = urllib2.Request(url) request.add_data(request_body) request.add_header('User-Agent', self.user_agent) request.add_header('Content-Type', 'text/xml') proxy_handler = urllib2.ProxyHandler() opener = urllib2.build_opener(proxy_handler) fhandle = opener.open(request) return(self.parse_response(fhandle)) def check_proxy_setting(): try: http_proxy = os.environ['HTTP_PROXY'] except KeyError: return if not http_proxy.startswith('http://'): match = re.match('(http://)?([-_\.A-Za-z]+):(\d+)', http_proxy) os.environ['HTTP_PROXY'] = 'http://%s:%s' % (match.group(2), match.group(3)) return class CheeseShop(object): def __init__(self, debug=False, no_cache=False, yolk_dir=None): self.debug = debug self.no_cache = no_cache if yolk_dir: self.yolk_dir = yolk_dir else: self.yolk_dir = get_yolk_dir() self.xmlrpc = self.get_xmlrpc_server() self.pkg_cache_file = self.get_pkg_cache_file() self.last_sync_file = self.get_last_sync_file() self.pkg_list = None self.logger = logging.getLogger("yolk") self.get_cache() def get_cache(self): if self.no_cache: self.pkg_list = self.list_packages() return if not os.path.exists(self.yolk_dir): os.mkdir(self.yolk_dir) if os.path.exists(self.pkg_cache_file): self.pkg_list = self.query_cached_package_list() else: self.logger.debug("DEBUG: Fetching package list cache from PyPi...") self.fetch_pkg_list() def get_last_sync_file(self): return os.path.abspath(self.yolk_dir + "/last_sync") def get_xmlrpc_server(self): check_proxy_setting() if os.environ.has_key('XMLRPC_DEBUG'): debug = 1 else: debug = 0 try: return xmlrpclib.Server(XML_RPC_SERVER, transport=ProxyTransport(), verbose=debug) except IOError: self.logger("ERROR: Can't connect to XML-RPC server: %s" % XML_RPC_SERVER) def get_pkg_cache_file(self): return os.path.abspath('%s/pkg_list.pkl' % self.yolk_dir) def query_versions_pypi(self, package_name): if not package_name in self.pkg_list: self.logger.debug("Package %s not in cache, querying PyPI..." % package_name) self.fetch_pkg_list() versions = [] for pypi_pkg in self.pkg_list: if pypi_pkg.lower() == package_name.lower(): if self.debug: self.logger.debug("DEBUG: %s" % package_name) versions = self.package_releases(pypi_pkg) package_name = pypi_pkg break return (package_name, versions) def query_cached_package_list(self): if self.debug: self.logger.debug("DEBUG: reading pickled cache file") return cPickle.load(open(self.pkg_cache_file, "r")) def fetch_pkg_list(self): self.logger.debug("DEBUG: Fetching package name list from PyPI") package_list = self.list_packages() cPickle.dump(package_list, open(self.pkg_cache_file, "w")) self.pkg_list = package_list def search(self, spec, operator): return self.xmlrpc.search(spec, operator.lower()) def changelog(self, hours): return self.xmlrpc.changelog(get_seconds(hours)) def updated_releases(self, hours): return self.xmlrpc.updated_releases(get_seconds(hours)) def list_packages(self): return self.xmlrpc.list_packages() def release_urls(self, package_name, version): return self.xmlrpc.release_urls(package_name, version) def release_data(self, package_name, version): try: return self.xmlrpc.release_data(package_name, version) except xmlrpclib.Fault: return
BSD 3-Clause New or Revised License
pkgcore/pkgcore
src/pkgcore/scripts/pmerge.py
unmerge
python
def unmerge(out, err, installed_repos, targets, options, formatter, world_set=None): vdb = installed_repos.real.combined fake_vdb = installed_repos.virtual.combined matches = set() fake = set() unknown = set() for token, restriction in targets: installed = vdb.match(restriction) if not installed: fake_pkgs = fake_vdb.match(restriction) if fake_pkgs: fake.update(fake_pkgs) else: unknown.add(token) continue categories = set(pkg.category for pkg in installed) if len(categories) > 1: raise parserestrict.ParseError( "%r is in multiple categories (%s)" % ( token, ', '.join(sorted(set(pkg.key for pkg in installed))))) matches.update(installed) if unknown: unknowns = ', '.join(map(repr, unknown)) if matches: err.write(f"Skipping unknown matches: {unknowns}\n") else: raise Failure(f"no matches found: {unknowns}") if fake: err.write('Skipping virtual pkg%s: %s' % ( pluralism(fake_pkgs), ', '.join(f'{x.versioned_atom}::{x.repo_id}' for x in fake))) if matches: out.write(out.bold, 'The following packages are to be unmerged:') out.prefix = [out.bold, ' * ', out.reset] for pkg in matches: out.write(pkg.cpvstr) out.prefix = [] repo_obs = observer.repo_observer( observer.formatter_output(out), debug=options.debug) if options.pretend: return if (options.ask and not formatter.ask("Would you like to unmerge these packages?")): return return do_unmerge(options, out, err, vdb, matches, world_set, repo_obs)
Unmerge tokens. hackish, should be rolled back into the resolver
https://github.com/pkgcore/pkgcore/blob/6c57606c15101590f4eed81636ae583f3f900d6a/src/pkgcore/scripts/pmerge.py#L281-L342
import sys from functools import partial from textwrap import dedent from time import time from snakeoil.cli.exceptions import ExitException from snakeoil.sequences import iflatten_instance, stable_unique from snakeoil.strings import pluralism from ..ebuild import resolver, restricts from ..ebuild.atom import atom from ..ebuild.misc import run_sanity_checks from ..merge import errors as merge_errors from ..operations import format, observer from ..repository.util import get_raw_repos from ..repository.virtual import RestrictionRepo from ..resolver.util import reduce_to_failures from ..restrictions import packages from ..restrictions.boolean import OrRestriction from ..util import commandline, parserestrict argparser = commandline.ArgumentParser( domain=True, description=__doc__, script=(__file__, __name__)) argparser.add_argument( nargs='*', dest='targets', metavar='TARGET', action=commandline.StoreTarget, use_sets='sets', help="extended package matching", docs=commandline.StoreTarget.__doc__.split('\n')[1:]) operation_args = argparser.add_argument_group('operations') operation_options = operation_args.add_mutually_exclusive_group() operation_options.add_argument( '-u', '--upgrade', action='store_true', help='try to upgrade installed pkgs/deps', docs=""" Try to upgrade specified targets to the latest visible version. Note that altered package visibility due to keywording or masking can often hide the latest versions of packages, especially for stable configurations. """) operation_options.add_argument( '-d', '--downgrade', action='store_true', help='try to downgrade installed pkgs/deps', docs=""" Try to downgrade specified targets to a lower visible version compared to what's currently installed. Useful for reverting to the previously installed package version; however, note that the -O/--nodeps option is generally used with this otherwise lots of downgrades will be pulled into the resolved deptree. """) operation_options.add_argument( '-C', '--unmerge', action='store_true', help='unmerge packages', docs=""" Target packages for unmerging from the system. WARNING: This does not ask for user confirmation for any targets so it's possible to quickly break a system. """) operation_options.add_argument( '--clean', action='store_true', help='remove installed packages not referenced by any target pkgs/sets', docs=""" Remove installed packages that aren't referenced by any target packages or sets. This defaults to using the world and system sets if no targets are specified. Use with *caution*, this option used incorrectly can render your system unusable. Note that this implies --deep. """) operation_options.add_argument( '--list-sets', action='store_true', help='display the list of available package sets') resolution_options = argparser.add_argument_group("resolver options") resolution_options.add_argument( '-p', '--pretend', action='store_true', help="only perform the dep resolution", docs=""" Resolve package dependencies and display the results without performing any merges. """) resolution_options.add_argument( '-a', '--ask', action='store_true', help="ask for user confirmation after dep resolution", docs=""" Perform the dependency resolution, but ask for user confirmation before beginning the fetch/build/merge process. The choice defaults to yes so pressing the "Enter" key will trigger acceptance. """) resolution_options.add_argument( '-f', '--fetchonly', action='store_true', help="do only the fetch steps of the resolved plan", docs=""" Only perform fetching of all targets from SRC_URI based on the current USE configuration. """) resolution_options.add_argument( '-1', '--oneshot', action='store_true', help="do not record changes in the world file", docs=""" Build and merge packages normally, but do not add any targets to the world file. Note that this is forcibly enabled if a package set is specified. """) resolution_options.add_argument( '-D', '--deep', action='store_true', help='force the resolver to verify installed deps', docs=""" Force dependency resolution across the entire dependency tree for all specified targets. """) resolution_options.add_argument( '-N', '--newuse', action='store_true', help="add installed pkgs with changed useflags to targets", docs=""" Include installed packages with USE flag changes in the list of viable targets for rebuilding. USE flag changes include flags being added, removed, enabled, or disabled with regards to a package. USE flag changes can occur via ebuild alterations, profile updates, or local configuration modifications. Note that this option implies -1/--oneshot. """) resolution_options.add_argument( '-i', '--ignore-cycles', action='store_true', help="ignore unbreakable dep cycles", docs=""" Ignore dependency cycles if they're found to be unbreakable; for example: a depends on b, and b depends on a, with neither built. """) resolution_options.add_argument( '--with-bdeps', action='store_true', help="process build deps for built packages", docs=""" Pull in build time dependencies for built packages during dependency resolution, by default they're ignored. """) resolution_options.add_argument( '-O', '--nodeps', action='store_true', help='disable dependency resolution', docs=""" Build and merge packages without resolving any dependencies. """) resolution_options.add_argument( '-o', '--onlydeps', action='store_true', help='only merge the deps of the specified packages', docs=""" Build and merge only the dependencies for the packages specified. """) resolution_options.add_argument( '-n', '--noreplace', action='store_false', dest='replace', help="don't reinstall target pkgs that are already installed", docs=""" Skip packages that are already installed. By default when running without this option, any specified target packages will be remerged regardless of if they are already installed. """) resolution_options.add_argument( '-b', '--buildpkg', action='store_true', help="build binpkgs", docs=""" Force binary packages to be built for all merged packages. """) resolution_options.add_argument( '-k', '--usepkg', action='store_true', help="prefer to use binpkgs", docs=""" Binary packages are preferred over ebuilds when performing dependency resolution. """) resolution_options.add_argument( '-K', '--usepkgonly', action='store_true', help="use only binpkgs", docs=""" Only binary packages are considered when performing dependency resolution. """) resolution_options.add_argument( '-S', '--source-only', action='store_true', help="use only ebuilds, no binpkgs", docs=""" Only ebuilds are considered when performing dependency resolution. """) resolution_options.add_argument( '-e', '--empty', action='store_true', help="force rebuilding of all involved packages", docs=""" Force all targets and their dependencies to be rebuilt. """) resolution_options.add_argument( '-x', '--exclude', dest='excludes', metavar='TARGET[,TARGET,...]', action=commandline.StoreTarget, separator=',', help='inject packages into the installed set', docs=""" Comma-separated list of targets to pretend are installed. This supports extended package globbing, e.g. ``'dev-python/*'`` equates to faking the entire dev-python category is installed. """) resolution_options.add_argument( '--ignore-failures', action='store_true', help='ignore failures while running all types of tasks', docs=""" Skip failures during the following phases: sanity checks (pkg_pretend), fetching, dep resolution, and (un)merging. """) resolution_options.add_argument( '--force', action='store_true', dest='force', help="force changes to a repo, regardless of if it's frozen", docs=""" Force (un)merging on the livefs (vdb), regardless of if it's frozen. """) resolution_options.add_argument( '--preload-vdb-state', action='store_true', help="enable preloading of the installed packages database", docs=""" Preload the installed package database which causes the resolver to work with a complete graph, thus disallowing actions that conflict with installed packages. If disabled, it's possible for the requested action to conflict with already installed dependencies that aren't involved in the graph of the requested operation. """) output_options = argparser.add_argument_group("output options") output_options.add_argument( '--quiet-repo-display', action='store_true', help="use indexes instead of ::repo suffixes in dep resolution output", docs=""" In the package merge list display, suppress ::repo output and instead use index numbers to indicate which repos packages come from. """) output_options.add_argument( '-F', '--formatter', priority=90, metavar='FORMATTER', action=commandline.StoreConfigObject, get_default=True, config_type='pmerge_formatter', help='output formatter to use', docs=""" Select an output formatter to use for text formatting of --pretend or --ask output, currently available formatters include the following: basic, pkgcore, portage, and portage-verbose. The basic formatter is the nearest to simple text output and is intended for scripting while the portage/portage-verbose formatter closely emulates portage output and is used by default. """) class AmbiguousQuery(parserestrict.ParseError): def __init__(self, token, keys): self.token = token self.keys = keys def __str__(self): return f"multiple matches for {self.token!r}: {', '.join(map(str, self.keys))}" class NoMatches(parserestrict.ParseError): def __init__(self, token): super().__init__(f'{token!r}: no matches') class Failure(ValueError):
BSD 3-Clause New or Revised License
apache/cloudstack-ec2stack
ec2stack/providers/cloudstack/instances.py
_reboot_instance_request
python
def _reboot_instance_request(instance_id): args = {'command': 'rebootVirtualMachine', 'id': instance_id} response = requester.make_request_async(args) response = response['virtualmachine'] return response
Request to reboot an instance. @param instance_id: Id of instance to be rebooted. @return: Response.
https://github.com/apache/cloudstack-ec2stack/blob/9773f7ce2df76562f26743078f2f651e7fb71144/ec2stack/providers/cloudstack/instances.py#L138-L149
from flask import current_app from ec2stack.providers import cloudstack from ec2stack.providers.cloudstack import requester, service_offerings, zones, disk_offerings from ec2stack import helpers, errors @helpers.authentication_required def describe_instance_attribute(): instance_id = helpers.get('InstanceId') attribute = helpers.get('Attribute') supported_attribute_map = { 'instanceType': 'serviceofferingname', 'groupSet': 'securitygroup' } if attribute not in supported_attribute_map.iterkeys(): errors.invalid_parameter_value( 'The specified attribute is not valid, please specify a valid ' + 'instance attribute.' ) response = describe_instance_by_id(instance_id) return _describe_instance_attribute_response( response, attribute, supported_attribute_map) def _describe_instance_attribute_response(response, attribute, attr_map): response = { 'template_name_or_list': 'instance_attribute.xml', 'response_type': 'DescribeInstanceAttributeResponse', 'attribute': attribute, 'response': response[attr_map[attribute]], 'id': response['id'] } return response @helpers.authentication_required def describe_instances(): args = {'command': 'listVirtualMachines'} response = cloudstack.describe_item( args, 'virtualmachine', errors.invalid_instance_id, 'InstanceId' ) return _describe_instances_response( response ) def describe_instance_by_id(instance_id): args = {'id': instance_id, 'command': 'listVirtualMachines'} response = cloudstack.describe_item_request( args, 'virtualmachine', errors.invalid_instance_id ) return response def _describe_instances_response(response): return { 'template_name_or_list': 'instances.xml', 'response_type': 'DescribeInstancesResponse', 'response': response } @helpers.authentication_required def reboot_instance(): helpers.require_parameters(['InstanceId.1']) instance_id = helpers.get('InstanceId.1') _reboot_instance_request(instance_id) return _reboot_instance_response()
Apache License 2.0
matchms/matchms
matchms/similarity/CosineGreedy.py
CosineGreedy.__init__
python
def __init__(self, tolerance: float = 0.1, mz_power: float = 0.0, intensity_power: float = 1.0): self.tolerance = tolerance self.mz_power = mz_power self.intensity_power = intensity_power
Parameters ---------- tolerance: Peaks will be considered a match when <= tolerance apart. Default is 0.1. mz_power: The power to raise m/z to in the cosine function. The default is 0, in which case the peak intensity products will not depend on the m/z ratios. intensity_power: The power to raise intensity to in the cosine function. The default is 1.
https://github.com/matchms/matchms/blob/d5c6cd79c891325815099c87bf6dfe2ff2db562b/matchms/similarity/CosineGreedy.py#L54-L69
from typing import Tuple import numpy from matchms.typing import SpectrumType from .BaseSimilarity import BaseSimilarity from .spectrum_similarity_functions import collect_peak_pairs from .spectrum_similarity_functions import score_best_matches class CosineGreedy(BaseSimilarity): is_commutative = True score_datatype = [("score", numpy.float64), ("matches", "int")]
Apache License 2.0
artefactory-global/streamlit_prophet
streamlit_prophet/lib/utils/load.py
write_bytesio_to_file
python
def write_bytesio_to_file(filename: str, bytesio: io.BytesIO) -> None: with open(filename, "wb") as outfile: outfile.write(bytesio.getbuffer())
Write the contents of the given BytesIO to a file. Parameters ---------- filename Uploaded toml config file. bytesio BytesIO object.
https://github.com/artefactory-global/streamlit_prophet/blob/20ec5e94214fcf1c253e79a9d014d74018b26a02/streamlit_prophet/lib/utils/load.py#L118-L130
from typing import Any, Dict, Tuple import io from pathlib import Path import pandas as pd import requests import streamlit as st import toml from PIL import Image def get_project_root() -> str: return str(Path(__file__).parent.parent.parent) @st.cache(suppress_st_warning=True, ttl=300) def load_dataset(file: str, load_options: Dict[Any, Any]) -> pd.DataFrame: try: return pd.read_csv(file, sep=load_options["separator"]) except: st.error( "This file can't be converted into a dataframe. Please import a csv file with a valid separator." ) st.stop() @st.cache(allow_output_mutation=True, ttl=300) def load_config( config_streamlit_filename: str, config_instructions_filename: str, config_readme_filename: str ) -> Tuple[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]]: config_streamlit = toml.load(Path(get_project_root()) / f"config/{config_streamlit_filename}") config_instructions = toml.load( Path(get_project_root()) / f"config/{config_instructions_filename}" ) config_readme = toml.load(Path(get_project_root()) / f"config/{config_readme_filename}") return dict(config_streamlit), dict(config_instructions), dict(config_readme) @st.cache(ttl=300) def download_toy_dataset(url: str) -> pd.DataFrame: download = requests.get(url).content df = pd.read_csv(io.StringIO(download.decode("utf-8"))) return df @st.cache(ttl=300) def load_custom_config(config_file: io.BytesIO) -> Dict[Any, Any]: toml_file = Path(get_project_root()) / f"config/custom_{config_file.name}" write_bytesio_to_file(str(toml_file), config_file) config = toml.load(toml_file) return dict(config)
MIT License
gd-zhang/acktr
ops/fisher_blocks.py
FullyConnectedDiagonalFB.__init__
python
def __init__(self, layer_collection, has_bias=False): self._inputs = [] self._outputs = [] self._has_bias = has_bias super(FullyConnectedDiagonalFB, self).__init__(layer_collection)
Creates a FullyConnectedDiagonalFB block. Args: layer_collection: The collection of all layers in the K-FAC approximate Fisher information matrix to which this FisherBlock belongs. has_bias: Whether the component Kronecker factors have an additive bias. (Default: False)
https://github.com/gd-zhang/acktr/blob/9d61318117672262c78c06a976abf3cd47a54bd6/ops/fisher_blocks.py#L284-L296
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import enum import six from ops import fisher_factors from ops import utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops NORMALIZE_DAMPING_POWER = 1.0 PI_OFF_NAME = "off" PI_TRACENORM_NAME = "tracenorm" PI_TYPE = PI_TRACENORM_NAME def set_global_constants(normalize_damping_power=None, pi_type=None): global NORMALIZE_DAMPING_POWER global PI_TYPE if normalize_damping_power is not None: NORMALIZE_DAMPING_POWER = normalize_damping_power if pi_type is not None: PI_TYPE = pi_type def _compute_pi_tracenorm(left_cov, right_cov): left_norm = math_ops.trace(left_cov) * right_cov.shape.as_list()[0] right_norm = math_ops.trace(right_cov) * left_cov.shape.as_list()[0] return math_ops.sqrt(left_norm / right_norm) def _compute_pi_adjusted_damping(left_cov, right_cov, damping): if PI_TYPE == PI_TRACENORM_NAME: pi = _compute_pi_tracenorm(left_cov, right_cov) return (damping * pi, damping / pi) elif PI_TYPE == PI_OFF_NAME: return (damping, damping) @six.add_metaclass(abc.ABCMeta) class FisherBlock(object): def __init__(self, layer_collection): self._layer_collection = layer_collection @abc.abstractmethod def instantiate_factors(self, grads_list, damping): pass @abc.abstractmethod def multiply_inverse(self, vector): pass @abc.abstractmethod def multiply(self, vector): pass @abc.abstractmethod def tensors_to_compute_grads(self): pass @abc.abstractproperty def num_registered_minibatches(self): pass class FullFB(FisherBlock): def __init__(self, layer_collection, params): self._batch_sizes = [] self._params = params super(FullFB, self).__init__(layer_collection) def instantiate_factors(self, grads_list, damping): self._damping = damping self._factor = self._layer_collection.make_or_get_factor( fisher_factors.FullFactor, (grads_list, self._batch_size)) self._factor.register_damped_inverse(damping) def multiply_inverse(self, vector): inverse = self._factor.get_damped_inverse(self._damping) out_flat = math_ops.matmul(inverse, utils.tensors_to_column(vector)) return utils.column_to_tensors(vector, out_flat) def multiply(self, vector): vector_flat = utils.tensors_to_column(vector) out_flat = ( math_ops.matmul(self._factor.get_cov(), vector_flat) + self._damping * vector_flat) return utils.column_to_tensors(vector, out_flat) def full_fisher_block(self): return self._factor.get_cov() def tensors_to_compute_grads(self): return self._params def register_additional_minibatch(self, batch_size): self._batch_sizes.append(batch_size) @property def num_registered_minibatches(self): return len(self._batch_sizes) @property def _batch_size(self): return math_ops.reduce_sum(self._batch_sizes) class NaiveDiagonalFB(FisherBlock): def __init__(self, layer_collection, params): self._params = params self._batch_sizes = [] super(NaiveDiagonalFB, self).__init__(layer_collection) def instantiate_factors(self, grads_list, damping): self._damping = damping self._factor = self._layer_collection.make_or_get_factor( fisher_factors.NaiveDiagonalFactor, (grads_list, self._batch_size)) def multiply_inverse(self, vector): vector_flat = utils.tensors_to_column(vector) out_flat = vector_flat / (self._factor.get_cov() + self._damping) return utils.column_to_tensors(vector, out_flat) def multiply(self, vector): vector_flat = utils.tensors_to_column(vector) out_flat = vector_flat * (self._factor.get_cov() + self._damping) return utils.column_to_tensors(vector, out_flat) def full_fisher_block(self): return array_ops.diag(array_ops.reshape(self._factor.get_cov(), (-1,))) def tensors_to_compute_grads(self): return self._params def register_additional_minibatch(self, batch_size): self._batch_sizes.append(batch_size) @property def num_registered_minibatches(self): return len(self._batch_sizes) @property def _batch_size(self): return math_ops.reduce_sum(self._batch_sizes) class FullyConnectedDiagonalFB(FisherBlock):
MIT License
yelp/pgctl
tests/spec/examples.py
DescribePgctlLog.it_distinguishes_multiple_services
python
def it_distinguishes_multiple_services(self):
There's some indication of which output came from which services. A (colorized?) [servicename] prefix.
https://github.com/yelp/pgctl/blob/0a921abfa6e400e9c663feca3078acc5596a59ac/tests/spec/examples.py#L152-L156
import json import os import pytest from testing import norm from testing import pty from testing.assertions import assert_svstat from testing.assertions import wait_for from testing.norm import norm_trailing_whitespace_json from testing.subprocess import assert_command from testing.subprocess import ctrl_c from testing.subprocess import run from pgctl.daemontools import SvStat from pgctl.subprocess import check_call from pgctl.subprocess import PIPE from pgctl.subprocess import Popen class ANY_INTEGER: def __eq__(self, other): return isinstance(other, int) class DescribePgctlLog: @pytest.yield_fixture def service_name(self): yield 'output' def it_is_empty_before_anything_starts(self, in_example_dir): assert_command( ('pgctl', 'log'), '''\ ==> playground/ohhi/logs/current <== ==> playground/sweet/logs/current <== ''', '', 0, ) def it_shows_stdout_and_stderr(self, in_example_dir): check_call(('pgctl', 'start', 'sweet')) assert_command( ('pgctl', 'log'), '''\ ==> playground/ohhi/logs/current <== ==> playground/sweet/logs/current <== {TIMESTAMP} sweet {TIMESTAMP} sweet_error ''', '', 0, norm=norm.pgctl, ) check_call(('pgctl', 'restart', 'sweet')) assert_command( ('pgctl', 'log'), '''\ ==> playground/ohhi/logs/current <== ==> playground/sweet/logs/current <== {TIMESTAMP} sweet {TIMESTAMP} sweet_error {TIMESTAMP} sweet {TIMESTAMP} sweet_error ''', '', 0, norm=norm.pgctl, ) def it_logs_continuously_when_run_interactively(self, in_example_dir): check_call(('pgctl', 'start')) read, write = os.openpty() pty.normalize_newlines(read) p = Popen(('pgctl', 'log'), stdout=write, stderr=write) os.close(write) import fcntl fl = fcntl.fcntl(read, fcntl.F_GETFL) fcntl.fcntl(read, fcntl.F_SETFL, fl | os.O_NONBLOCK) assert p.poll() is None limit = 3.0 wait = .1 buf = b'' while True: try: block = os.read(read, 1024) print('BLOCK:', block) except OSError as error: print('ERROR:', error) if error.errno == 11: if limit > 0: import time time.sleep(wait) limit -= wait continue else: break else: raise buf += block from testfixtures import StringComparison as S buf = norm.pgctl(buf.decode('UTF-8')) print('NORMED:') print(buf) assert buf == S('''(?s)\ ==> playground/ohhi/logs/current <== {TIMESTAMP} [oe].* ==> playground/sweet/logs/current <== {TIMESTAMP} sweet {TIMESTAMP} sweet_error ==> playground/ohhi/logs/current <== .*{TIMESTAMP} .*$''') assert p.poll() is None p.terminate() assert p.wait() == -15 def it_fails_for_nonexistent_services(self, in_example_dir): assert_command( ('pgctl', 'log', 'i-dont-exist'), '', '''\ [pgctl] ERROR: No such service: 'playground/i-dont-exist' ''', 1, ) def it_is_line_buffered(self):
MIT License
gussand/anubis
api/anubis/views/admin/autograde.py
admin_autograde_assignment_assignment_id
python
def admin_autograde_assignment_assignment_id(assignment_id): limit = get_number_arg("limit", 10) offset = get_number_arg("offset", 0) assignment = Assignment.query.filter( Assignment.id == assignment_id ).first() req_assert(assignment is not None, message='assignment does not exist') assert_course_context(assignment) bests = bulk_autograde(assignment_id, limit=limit, offset=offset) total = User.query.join(InCourse).filter( InCourse.course_id == assignment.course_id, ).count() return success_response({"stats": bests, "total": total})
Calculate result statistics for an assignment. This endpoint is potentially very IO and computationally expensive. We basically need to load the entire submission history out of the database for the given assignment, then do calculations per user. For this reason, much of the individual computations here are quite heavily cached. * Due to how heavily cached the stats calculations are, once cached they will not be updated until there is a cache bust after the timeout. * :param assignment_id: :return:
https://github.com/gussand/anubis/blob/5ff4e293b84049af92b53b3bcc264c7782ffb9e6/api/anubis/views/admin/autograde.py#L59-L98
from flask import Blueprint, request from sqlalchemy.sql import or_ from anubis.models import Submission, Assignment, User, InCourse from anubis.utils.auth.http import require_admin from anubis.utils.data import req_assert from anubis.utils.http.decorators import json_response from anubis.utils.http import success_response, get_number_arg from anubis.lms.autograde import bulk_autograde, autograde, autograde_submission_result_wrapper from anubis.lms.courses import assert_course_context from anubis.lms.questions import get_assigned_questions from anubis.utils.cache import cache from anubis.utils.visuals.assignments import ( get_admin_assignment_visual_data, get_assignment_history, get_assignment_sundial, ) autograde_ = Blueprint("admin-autograde", __name__, url_prefix="/admin/autograde") @autograde_.route('/cache-reset/<string:assignment_id>') @require_admin() @cache.memoize(timeout=60) @json_response def admin_autograde_cache_reset(assignment_id: str): assignment = Assignment.query.filter( Assignment.id == assignment_id ).first() req_assert(assignment is not None, message='assignment does not exist') assert_course_context(assignment) cache.delete_memoized(bulk_autograde) cache.delete_memoized(autograde) cache.delete_memoized(get_assignment_history) cache.delete_memoized(get_admin_assignment_visual_data) cache.delete_memoized(get_assignment_sundial) return success_response({ 'message': 'success' }) @autograde_.route("/assignment/<string:assignment_id>") @require_admin() @cache.memoize(timeout=60) @json_response
MIT License
twisted/txaws
txaws/server/call.py
Call.get_raw_params
python
def get_raw_params(self): return self._raw_params.copy()
Return a C{dict} holding the raw API call paramaters. The format of the dictionary is C{{'ParamName': param_value}}.
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/call.py#L54-L59
from uuid import uuid4 from txaws.version import ec2_api as ec2_api_version from txaws.server.exception import APIError class Call(object): def __init__(self, raw_params=None, principal=None, action=None, version=None, id=None): if id is None: id = str(uuid4()) self.id = id self._raw_params = {} if raw_params is not None: self._raw_params.update(raw_params) self.action = action if version is None: version = ec2_api_version self.version = version self.principal = principal def parse(self, schema, strict=True): self.args, self.rest = schema.extract(self._raw_params) if strict and self.rest: raise APIError(400, "UnknownParameter", "The parameter %s is not " "recognized" % self.rest.keys()[0])
MIT License