repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
linuxforhealth/connect
connect/support/encoding.py
ConnectEncoder.default
python
def default(self, o: Any) -> Any: if isinstance(o, (datetime.date, datetime.datetime, datetime.time)): return o.isoformat() elif isinstance(o, uuid.UUID): return str(o) elif isinstance(o, bytes): return base64.b64encode(o).decode() elif isinstance(o, decimal.Decimal): return float(o) else: return super().default(o)
Overridden to customize the encoding process. :param o: The current object to encode
https://github.com/linuxforhealth/connect/blob/b7ab836c8eb580bdf7baab94140be0fbb92c502b/connect/support/encoding.py#L25-L39
import base64 import decimal import json from json import JSONEncoder import datetime from typing import Any, Optional import uuid class ConnectEncoder(JSONEncoder):
Apache License 2.0
mozilla/firefox-flicks
vendor-local/lib/python/celery/worker/consumer.py
Consumer.on_task
python
def on_task(self, task, task_reserved=task_reserved, to_system_tz=timezone.to_system): if task.revoked(): return if self._does_info: info('Got task from broker: %s', task) if self.event_dispatcher.enabled: self.event_dispatcher.send( 'task-received', uuid=task.id, name=task.name, args=safe_repr(task.args), kwargs=safe_repr(task.kwargs), retries=task.request_dict.get('retries', 0), eta=task.eta and task.eta.isoformat(), expires=task.expires and task.expires.isoformat(), ) if task.eta: try: if task.utc: eta = to_timestamp(to_system_tz(task.eta)) else: eta = to_timestamp(task.eta, timezone.local) except OverflowError, exc: error("Couldn't convert eta %s to timestamp: %r. Task: %r", task.eta, exc, task.info(safe=True), exc_info=True) task.acknowledge() else: self.qos.increment_eventually() self.timer.apply_at( eta, self.apply_eta_task, (task, ), priority=6, ) else: task_reserved(task) self._quick_put(task)
Handle received task. If the task has an `eta` we enter it into the ETA schedule, otherwise we move it the ready queue for immediate processing.
https://github.com/mozilla/firefox-flicks/blob/ad19ed59aac682744badae6d19a149327037f293/vendor-local/lib/python/celery/worker/consumer.py#L500-L541
from __future__ import absolute_import from __future__ import with_statement import logging import socket import threading from time import sleep from Queue import Empty from kombu.syn import _detect_environment from kombu.utils.encoding import safe_repr from kombu.utils.eventio import READ, WRITE, ERR from celery.app import app_or_default from celery.datastructures import AttributeDict from celery.exceptions import InvalidTaskError, SystemTerminate from celery.task.trace import build_tracer from celery.utils import text from celery.utils import timer2 from celery.utils.functional import noop from celery.utils.log import get_logger from celery.utils.timer2 import to_timestamp from celery.utils.timeutils import humanize_seconds, timezone from . import state from .bootsteps import StartStopComponent from .control import Panel from .heartbeat import Heart RUN = 0x1 CLOSE = 0x2 AMQHEARTBEAT_RATE = 2.0 PREFETCH_COUNT_MAX = 0xFFFF UNKNOWN_FORMAT = """\ Received and deleted unknown message. Wrong destination?!? The full contents of the message body was: %s """ UNKNOWN_TASK_ERROR = """\ Received unregistered task of type %s. The message has been ignored and discarded. Did you remember to import the module containing this task? Or maybe you are using relative imports? More: http://docs.celeryq.org/en/latest/userguide/tasks.html#names The full contents of the message body was: %s """ INVALID_TASK_ERROR = """\ Received invalid task message: %s The message has been ignored and discarded. Please ensure your message conforms to the task message format: http://docs.celeryq.org/en/latest/internals/protocol.html The full contents of the message body was: %s """ MESSAGE_REPORT = """\ body: %s {content_type:%s content_encoding:%s delivery_info:%s}\ """ RETRY_CONNECTION = """\ consumer: Connection to broker lost. \ Trying to re-establish the connection...\ """ CONNECTION_ERROR = """\ consumer: Cannot connect to %s: %s. %s """ CONNECTION_RETRY = """\ Trying again %(when)s...\ """ CONNECTION_FAILOVER = """\ Will retry using next failover.\ """ task_reserved = state.task_reserved logger = get_logger(__name__) info, warn, error, crit = (logger.info, logger.warn, logger.error, logger.critical) def debug(msg, *args, **kwargs): logger.debug('consumer: %s' % (msg, ), *args, **kwargs) def dump_body(m, body): return "%s (%sb)" % (text.truncate(safe_repr(body), 1024), len(m.body)) class Component(StartStopComponent): name = 'worker.consumer' last = True def Consumer(self, w): return (w.consumer_cls or Consumer if w.hub else BlockingConsumer) def create(self, w): prefetch_count = w.concurrency * w.prefetch_multiplier c = w.consumer = self.instantiate( self.Consumer(w), w.ready_queue, hostname=w.hostname, send_events=w.send_events, init_callback=w.ready_callback, initial_prefetch_count=prefetch_count, pool=w.pool, timer=w.timer, app=w.app, controller=w, hub=w.hub, ) return c class QoS(object): prev = None def __init__(self, consumer, initial_value): self.consumer = consumer self._mutex = threading.RLock() self.value = initial_value or 0 def increment_eventually(self, n=1): with self._mutex: if self.value: self.value = self.value + max(n, 0) return self.value def decrement_eventually(self, n=1): with self._mutex: if self.value: self.value -= n return self.value def set(self, pcount): if pcount != self.prev: new_value = pcount if pcount > PREFETCH_COUNT_MAX: warn('QoS: Disabled: prefetch_count exceeds %r', PREFETCH_COUNT_MAX) new_value = 0 debug('basic.qos: prefetch_count->%s', new_value) self.consumer.qos(prefetch_count=new_value) self.prev = pcount return pcount def update(self): with self._mutex: return self.set(self.value) class Consumer(object): ready_queue = None send_events = False init_callback = None hostname = None initial_prefetch_count = 0 event_dispatcher = None heart = None connection = None task_consumer = None broadcast_consumer = None pidbox_node = None _pidbox_node_shutdown = None _pidbox_node_stopped = None pool = None timer = None _state = None def __init__(self, ready_queue, init_callback=noop, send_events=False, hostname=None, initial_prefetch_count=2, pool=None, app=None, timer=None, controller=None, hub=None, amqheartbeat=None, **kwargs): self.app = app_or_default(app) self.connection = None self.task_consumer = None self.controller = controller self.broadcast_consumer = None self.ready_queue = ready_queue self.send_events = send_events self.init_callback = init_callback self.hostname = hostname or socket.gethostname() self.initial_prefetch_count = initial_prefetch_count self.event_dispatcher = None self.heart = None self.pool = pool self.timer = timer or timer2.default_timer pidbox_state = AttributeDict(app=self.app, hostname=self.hostname, listener=self, consumer=self) self.pidbox_node = self.app.control.mailbox.Node(self.hostname, state=pidbox_state, handlers=Panel.data) conninfo = self.app.connection() self.connection_errors = conninfo.connection_errors self.channel_errors = conninfo.channel_errors self._does_info = logger.isEnabledFor(logging.INFO) self.strategies = {} if hub: hub.on_init.append(self.on_poll_init) self.hub = hub self._quick_put = self.ready_queue.put self.amqheartbeat = amqheartbeat if self.amqheartbeat is None: self.amqheartbeat = self.app.conf.BROKER_HEARTBEAT if not hub: self.amqheartbeat = 0 if _detect_environment() == 'gevent': self.app.conf.BROKER_CONNECTION_TIMEOUT = None def update_strategies(self): S = self.strategies app = self.app loader = app.loader hostname = self.hostname for name, task in self.app.tasks.iteritems(): S[name] = task.start_strategy(app, self) task.__trace__ = build_tracer(name, task, loader, hostname) def start(self): self.init_callback(self) while self._state != CLOSE: self.maybe_shutdown() try: self.reset_connection() self.consume_messages() except self.connection_errors + self.channel_errors: error(RETRY_CONNECTION, exc_info=True) def on_poll_init(self, hub): hub.update_readers(self.connection.eventmap) self.connection.transport.on_poll_init(hub.poller) def consume_messages(self, sleep=sleep, min=min, Empty=Empty, hbrate=AMQHEARTBEAT_RATE): with self.hub as hub: qos = self.qos update_qos = qos.update update_readers = hub.update_readers readers, writers = hub.readers, hub.writers poll = hub.poller.poll fire_timers = hub.fire_timers scheduled = hub.timer._queue connection = self.connection hb = self.amqheartbeat hbtick = connection.heartbeat_check on_poll_start = connection.transport.on_poll_start on_poll_empty = connection.transport.on_poll_empty strategies = self.strategies drain_nowait = connection.drain_nowait on_task_callbacks = hub.on_task keep_draining = connection.transport.nb_keep_draining if hb and connection.supports_heartbeats: hub.timer.apply_interval( hb * 1000.0 / hbrate, hbtick, (hbrate, )) def on_task_received(body, message): if on_task_callbacks: [callback() for callback in on_task_callbacks] try: name = body['task'] except (KeyError, TypeError): return self.handle_unknown_message(body, message) try: strategies[name](message, body, message.ack_log_error) except KeyError, exc: self.handle_unknown_task(body, message, exc) except InvalidTaskError, exc: self.handle_invalid_task(body, message, exc) self.task_consumer.callbacks = [on_task_received] self.task_consumer.consume() debug('Ready to accept tasks!') while self._state != CLOSE and self.connection: if state.should_stop: raise SystemExit() elif state.should_terminate: raise SystemTerminate() poll_timeout = fire_timers() if scheduled else 1 if qos.prev != qos.value: update_qos() update_readers(on_poll_start()) if readers or writers: connection.more_to_read = True while connection.more_to_read: try: events = poll(poll_timeout) except ValueError: return if not events: on_poll_empty() for fileno, event in events or (): try: if event & READ: readers[fileno](fileno, event) if event & WRITE: writers[fileno](fileno, event) if event & ERR: for handlermap in readers, writers: try: handlermap[fileno](fileno, event) except KeyError: pass except (KeyError, Empty): continue except socket.error: if self._state != CLOSE: raise if keep_draining: drain_nowait() poll_timeout = 0 else: connection.more_to_read = False else: sleep(min(poll_timeout, 0.1))
BSD 3-Clause New or Revised License
rajammanabrolu/worldgeneration
evennia-engine/evennia/evennia/typeclasses/tags.py
TagHandler.batch_add
python
def batch_add(self, *args): keys = defaultdict(list) data = {} for tup in args: tup = make_iter(tup) nlen = len(tup) if nlen == 1: keys[None].append(tup[0]) elif nlen == 2: keys[tup[1]].append(tup[0]) else: keys[tup[1]].append(tup[0]) data[tup[1]] = tup[2] for category, key in keys.items(): self.add(tag=key, category=category, data=data.get(category, None))
Batch-add tags from a list of tuples. Args: tuples (tuple or str): Any number of `tagstr` keys, `(keystr, category)` or `(keystr, category, data)` tuples. Notes: This will generate a mimimal number of self.add calls, based on the number of categories involved (including `None`) (data is not unique and may be overwritten by the content of a latter tuple with the same category).
https://github.com/rajammanabrolu/worldgeneration/blob/5e97df013399e1a401d0a7ec184c4b9eb3100edd/evennia-engine/evennia/evennia/typeclasses/tags.py#L428-L456
from collections import defaultdict from django.conf import settings from django.db import models from evennia.utils.utils import to_str, make_iter _TYPECLASS_AGGRESSIVE_CACHE = settings.TYPECLASS_AGGRESSIVE_CACHE class Tag(models.Model): db_key = models.CharField( "key", max_length=255, null=True, help_text="tag identifier", db_index=True ) db_category = models.CharField( "category", max_length=64, null=True, help_text="tag category", db_index=True ) db_data = models.TextField( "data", null=True, blank=True, help_text="optional data field with extra information. This is not searched for.", ) db_model = models.CharField( "model", max_length=32, null=True, help_text="database model to Tag", db_index=True ) db_tagtype = models.CharField( "tagtype", max_length=16, null=True, help_text="overall type of Tag", db_index=True ) class Meta(object): verbose_name = "Tag" unique_together = (("db_key", "db_category", "db_tagtype", "db_model"),) index_together = (("db_key", "db_category", "db_tagtype", "db_model"),) def __lt__(self, other): return str(self) < str(other) def __str__(self): return str( "<Tag: %s%s>" % (self.db_key, "(category:%s)" % self.db_category if self.db_category else "") ) class TagHandler(object): _m2m_fieldname = "db_tags" _tagtype = None def __init__(self, obj): self.obj = obj self._objid = obj.id self._model = obj.__dbclass__.__name__.lower() self._cache = {} self._catcache = {} self._cache_complete = False def _fullcache(self): query = { "%s__id" % self._model: self._objid, "tag__db_model": self._model, "tag__db_tagtype": self._tagtype, } tags = [ conn.tag for conn in getattr(self.obj, self._m2m_fieldname).through.objects.filter(**query) ] self._cache = dict( ( "%s-%s" % ( to_str(tag.db_key).lower(), tag.db_category.lower() if tag.db_category else None, ), tag, ) for tag in tags ) self._cache_complete = True def _getcache(self, key=None, category=None): key = key.strip().lower() if key else None category = category.strip().lower() if category else None if key: cachekey = "%s-%s" % (key, category) tag = _TYPECLASS_AGGRESSIVE_CACHE and self._cache.get(cachekey, None) if tag and (not hasattr(tag, "pk") and tag.pk is None): tag = None del self._cache[cachekey] if tag: return [tag] else: query = { "%s__id" % self._model: self._objid, "tag__db_model": self._model, "tag__db_tagtype": self._tagtype, "tag__db_key__iexact": key.lower(), "tag__db_category__iexact": category.lower() if category else None, } conn = getattr(self.obj, self._m2m_fieldname).through.objects.filter(**query) if conn: tag = conn[0].tag self._cache[cachekey] = tag return [tag] else: catkey = "-%s" % category if _TYPECLASS_AGGRESSIVE_CACHE and catkey in self._catcache: return [tag for key, tag in self._cache.items() if key.endswith(catkey)] else: query = { "%s__id" % self._model: self._objid, "tag__db_model": self._model, "tag__db_tagtype": self._tagtype, "tag__db_category__iexact": category.lower() if category else None, } tags = [ conn.tag for conn in getattr(self.obj, self._m2m_fieldname).through.objects.filter( **query ) ] for tag in tags: cachekey = "%s-%s" % (tag.db_key, category) self._cache[cachekey] = tag self._catcache[catkey] = True return tags return [] def _setcache(self, key, category, tag_obj): if not key: return key, category = key.strip().lower(), category.strip().lower() if category else category cachekey = "%s-%s" % (key, category) catkey = "-%s" % category self._cache[cachekey] = tag_obj self._catcache.pop(catkey, None) self._cache_complete = False def _delcache(self, key, category): key, category = key.strip().lower(), category.strip().lower() if category else category catkey = "-%s" % category if key: cachekey = "%s-%s" % (key, category) self._cache.pop(cachekey, None) else: [self._cache.pop(key, None) for key in self._cache if key.endswith(catkey)] self._catcache.pop(catkey, None) self._cache_complete = False def reset_cache(self): self._cache_complete = False self._cache = {} self._catcache = {} def add(self, tag=None, category=None, data=None): if not tag: return if not self._cache_complete: self._fullcache() for tagstr in make_iter(tag): if not tagstr: continue tagstr = str(tagstr).strip().lower() category = str(category).strip().lower() if category else category data = str(data) if data is not None else None tagobj = self.obj.__class__.objects.create_tag( key=tagstr, category=category, data=data, tagtype=self._tagtype ) getattr(self.obj, self._m2m_fieldname).add(tagobj) self._setcache(tagstr, category, tagobj) def get(self, key=None, default=None, category=None, return_tagobj=False, return_list=False): ret = [] for keystr in make_iter(key): ret.extend( [ tag if return_tagobj else to_str(tag.db_key) for tag in self._getcache(keystr, category) ] ) if return_list: return ret if ret else [default] if default is not None else [] return ret[0] if len(ret) == 1 else (ret if ret else default) def remove(self, key=None, category=None): if not key: self.clear(category=category) return for key in make_iter(key): if not (key or key.strip()): continue tagstr = key.strip().lower() category = category.strip().lower() if category else category tagobj = getattr(self.obj, self._m2m_fieldname).filter( db_key=tagstr, db_category=category, db_model=self._model, db_tagtype=self._tagtype ) if tagobj: getattr(self.obj, self._m2m_fieldname).remove(tagobj[0]) self._delcache(key, category) def clear(self, category=None): if not self._cache_complete: self._fullcache() query = { "%s__id" % self._model: self._objid, "tag__db_model": self._model, "tag__db_tagtype": self._tagtype, } if category: query["tag__db_category"] = category.strip().lower() getattr(self.obj, self._m2m_fieldname).through.objects.filter(**query).delete() self._cache = {} self._catcache = {} self._cache_complete = False def all(self, return_key_and_category=False, return_objs=False): if not self._cache_complete: self._fullcache() tags = sorted(self._cache.values()) if return_key_and_category: return [(to_str(tag.db_key), tag.db_category) for tag in tags] elif return_objs: return tags else: return [to_str(tag.db_key) for tag in tags]
MIT License
basho/riak-python-client
riak/transports/transport.py
Transport.get_search_schema
python
def get_search_schema(self, schema): raise NotImplementedError
Returns a yokozuna search schema.
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L238-L242
import base64 import random import threading import os import json import platform from six import PY2 from riak.transports.feature_detect import FeatureDetection class Transport(FeatureDetection): def _get_client_id(self): return self._client_id def _set_client_id(self, value): self._client_id = value client_id = property(_get_client_id, _set_client_id, doc="""the client ID for this connection""") @classmethod def make_random_client_id(self): if PY2: return ('py_%s' % base64.b64encode(str(random.randint(1, 0x40000000)))) else: return ('py_%s' % base64.b64encode(bytes(str(random.randint(1, 0x40000000)), 'ascii'))) @classmethod def make_fixed_client_id(self): machine = platform.node() process = os.getpid() thread = threading.currentThread().getName() return base64.b64encode('%s|%s|%s' % (machine, process, thread)) def ping(self): raise NotImplementedError def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None, notfound_ok=None, head_only=False): raise NotImplementedError def put(self, robj, w=None, dw=None, pw=None, return_body=None, if_none_match=None, timeout=None): raise NotImplementedError def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None, timeout=None): raise NotImplementedError def ts_describe(self, table): raise NotImplementedError def ts_get(self, table, key): raise NotImplementedError def ts_put(self, tsobj): raise NotImplementedError def ts_delete(self, table, key): raise NotImplementedError def ts_query(self, table, query, interpolations=None): raise NotImplementedError def ts_stream_keys(self, table, timeout=None): raise NotImplementedError def get_buckets(self, bucket_type=None, timeout=None): raise NotImplementedError def stream_buckets(self, bucket_type=None, timeout=None): raise NotImplementedError def get_bucket_props(self, bucket): raise NotImplementedError def set_bucket_props(self, bucket, props): raise NotImplementedError def get_bucket_type_props(self, bucket_type): raise NotImplementedError def set_bucket_type_props(self, bucket_type, props): raise NotImplementedError def clear_bucket_props(self, bucket): raise NotImplementedError def get_keys(self, bucket, timeout=None): raise NotImplementedError def stream_keys(self, bucket, timeout=None): raise NotImplementedError def mapred(self, inputs, query, timeout=None): raise NotImplementedError def stream_mapred(self, inputs, query, timeout=None): raise NotImplementedError def set_client_id(self, client_id): raise NotImplementedError def get_client_id(self): raise NotImplementedError def create_search_index(self, index, schema=None, n_val=None, timeout=None): raise NotImplementedError def get_search_index(self, index): raise NotImplementedError def list_search_indexes(self): raise NotImplementedError def delete_search_index(self, index): raise NotImplementedError def create_search_schema(self, schema, content): raise NotImplementedError
Apache License 2.0
byceps/byceps
byceps/util/checkdigit.py
calculate_check_digit
python
def calculate_check_digit(chars: str) -> int: chars = chars.strip().upper() total_weight = calculate_total_weight(chars) return (10 - (total_weight % 10)) % 10
Calculate the check digit for the given value, using a modified Luhn algorithm to support not only digits in the value but also letters. Based on https://wiki.openmrs.org/display/docs/Check+Digit+Algorithm
https://github.com/byceps/byceps/blob/138f928e98fd1e3d79943e1a8744ea04cef465b5/byceps/util/checkdigit.py#L17-L30
import math from string import ascii_uppercase, digits from typing import Iterator VALID_CHARS = frozenset(ascii_uppercase + digits)
BSD 3-Clause New or Revised License
bububa/pytop
pyTOP/api.py
TOP.create
python
def create(self, data, fields=[], models={}): if not fields: fields = self.fields if not models and hasattr(self, 'models'): models = self.models for field in fields: setattr(self,field,None) if not data: return None for k, v in data.iteritems(): if type(v) in (str, unicode): v = v.strip() if models and k in models: if type(v) == dict: lists = [] for k2, v2 in v.iteritems(): if type(v2) == list: for d in v2: model = models[k]() lists.append(model.create(d)) if not lists: model = models[k]() v = model.create(v) else: v = lists else: model = models[k]() v = model.create(v) setattr(self,k,v) return self
Create model attributes
https://github.com/bububa/pytop/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/api.py#L182-L211
from os import getenv import time import datetime from dateutil.parser import parse as date_parse try : import json except ImportError: import simplejson as json import urllib from hashlib import md5 import base64 from errors import TOPException import requests class TOPDate: def create(self, date_str): try: return date_parse(date_str) except: return date_str class TOPRequest : def __init__(self, method_name) : self.method_name = method_name self.api_params = {} def get_api_params(self) : return self.api_params def get_method_name(self) : return self.method_name def __setitem__(self, param_name, param_value) : self.api_params[param_name] = param_value class TOP(object): def __init__(self, API_KEY=None, APP_SECRET=None, ENVIRONMENT=None): self.PRODUCT_API_KEY = getenv('TOP_PRODUCT_API_KEY') self.PRODUCT_APP_SECRET = getenv('TOP_PRODUCT_APP_SECRET') self.PRODUCT_API_URL = getenv('TOP_PRODUCT_API_URL') if getenv('TOP_PRODUCT_API_URL') else 'http://gw.api.taobao.com/router/rest' self.PRODUCT_LOGIN_URL = 'http://container.api.taobao.com/container?appkey=' self.LOGOUT_URL = 'http://container.api.taobao.com/container/logoff' self.TaobaoID_URL = 'http://container.api.taobao.com/container/identify' self.SANDBOX_API_KEY = getenv('TOP_SANDBOX_API_KEY') self.SANDBOX_APP_SECRET = getenv('TOP_SANDBOX_APP_SECRET') self.SANDBOX_API_URL = getenv('TOP_SANDBOX_API_URL') if getenv('TOP_SANDBOX_API_URL') else 'http://gw.api.tbsandbox.com/router/rest' self.SANDBOX_LOGIN_URL = 'http://container.api.tbsandbox.com/container?appkey=' self.SANDBOX_USER_REGISTER_WITHOUT_VALIDATE = 'http://mini.tbsandbox.com/minisandbox/user/register.htm' self.REFRESH_TOKEN_URL = 'http://container.open.taobao.com/container/refresh' self.ENVIRONMENT = ENVIRONMENT if ENVIRONMENT else (getenv('TOP_ENVIRONMENT') if getenv('TOP_ENVIRONMENT') else 'sandbox') self.FORMAT = 'json' self.SIGN_METHOD = 'md5' self.API_VERSION = '2.0' self.SDK_VERSON = 'top-sdk-php-20111202' if self.ENVIRONMENT == 'sandbox': if API_KEY: self.SANDBOX_API_KEY = API_KEY if APP_SECRET: self.SANDBOX_APP_SECRET = APP_SECRET self.GATEWAY = self.SANDBOX_API_URL self.LOGIN_URL = self.SANDBOX_LOGIN_URL self.API_KEY = self.SANDBOX_API_KEY self.APP_SECRET = self.SANDBOX_APP_SECRET elif self.ENVIRONMENT == 'product': if API_KEY: self.PRODUCT_API_KEY = API_KEY if APP_SECRET: self.PRODUCT_APP_SECRET = APP_SECRET self.GATEWAY = self.PRODUCT_API_URL self.LOGIN_URL = self.PRODUCT_LOGIN_URL self.API_KEY = self.PRODUCT_API_KEY self.APP_SECRET = self.PRODUCT_APP_SECRET else: raise TOPException(0); self.AUTH_URL = 'http://container.api.taobao.com/container?appkey=%s' % self.API_KEY def set_format(self, format): if format in ('json','xml'): self.FORMAT = format def _sign(self,params): for k, v in params.iteritems(): if type(v) == int: v = str(v) elif type(v) == float: v = '%.2f'%v elif type(v) in (list, set): v = ','.join([str(i) for i in v]) elif type(v) == bool: v = 'true' if v else 'false' elif type(v) == datetime.datetime: v = v.strftime('%Y-%m-%d %X') if type(v) == unicode: params[k] = v.encode('utf-8') else: params[k] = v src = self.APP_SECRET + ''.join(["%s%s" % (k, v) for k, v in sorted(params.iteritems())]) return md5(src).hexdigest().upper() def decode_params(top_parameters) : params = {} param_string = base64.b64decode(top_parameters) for p in param_string.split('&') : key, value = p.split('=') params[key] = value return params def _get_timestamp(self): if(time.timezone == 0): gmtimefix = 28800 stime = time.gmtime(gmtimefix + time.time()) else: stime = time.localtime() strtime = time.strftime('%Y-%m-%d %X', stime) return strtime def execute(self, request, session=None, method='post'): params = { 'app_key' : self.API_KEY, 'v' : self.API_VERSION, 'format' : self.FORMAT, 'partner_id' : self.SDK_VERSON } api_params = request.get_api_params() params['timestamp'] = self._get_timestamp() params['method'] = request.get_method_name() if session is not None : params['session'] = session params.update(api_params) params['sign'] = self._sign(params) method = method.lower() if method == 'get': form_data = urllib.urlencode(params) rsp = requests.get('%s?%s'%(self.GATEWAY, form_data)) elif method == 'post': rsp = requests.post(self.GATEWAY, data=params) rsp = json.loads(rsp.content) if rsp.has_key('error_response'): error_code = rsp['error_response']['code'] if 'sub_msg' in rsp['error_response']: msg = '%s [%s]'%(rsp['error_response']['sub_msg'], rsp['error_response']['msg']) else: msg = rsp['error_response']['msg'] raise TOPException(error_code, msg) else: rsp = rsp[request.get_method_name().replace('.','_')[7:] + '_response'] if not rsp: return None return rsp
ISC License
danielfrg/jupyterhub-kubernetes_spawner
kubernetes_spawner/swagger_client/models/v1_rbd_volume_source.py
V1RBDVolumeSource.read_only
python
def read_only(self): return self._read_only
Gets the read_only of this V1RBDVolumeSource. ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it :return: The read_only of this V1RBDVolumeSource. :rtype: bool
https://github.com/danielfrg/jupyterhub-kubernetes_spawner/blob/15a2b63ef719f8c3ff83221333f7de69c1495512/kubernetes_spawner/swagger_client/models/v1_rbd_volume_source.py#L225-L233
from pprint import pformat from six import iteritems class V1RBDVolumeSource(object): def __init__(self): self.swagger_types = { 'monitors': 'list[str]', 'image': 'str', 'fs_type': 'str', 'pool': 'str', 'user': 'str', 'keyring': 'str', 'secret_ref': 'V1LocalObjectReference', 'read_only': 'bool' } self.attribute_map = { 'monitors': 'monitors', 'image': 'image', 'fs_type': 'fsType', 'pool': 'pool', 'user': 'user', 'keyring': 'keyring', 'secret_ref': 'secretRef', 'read_only': 'readOnly' } self._monitors = None self._image = None self._fs_type = None self._pool = None self._user = None self._keyring = None self._secret_ref = None self._read_only = None @property def monitors(self): return self._monitors @monitors.setter def monitors(self, monitors): self._monitors = monitors @property def image(self): return self._image @image.setter def image(self, image): self._image = image @property def fs_type(self): return self._fs_type @fs_type.setter def fs_type(self, fs_type): self._fs_type = fs_type @property def pool(self): return self._pool @pool.setter def pool(self, pool): self._pool = pool @property def user(self): return self._user @user.setter def user(self, user): self._user = user @property def keyring(self): return self._keyring @keyring.setter def keyring(self, keyring): self._keyring = keyring @property def secret_ref(self): return self._secret_ref @secret_ref.setter def secret_ref(self, secret_ref): self._secret_ref = secret_ref @property
Apache License 2.0
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_8/api/syslog_api.py
SyslogApi.api28_syslog_servers_settings_get_with_http_info
python
def api28_syslog_servers_settings_get_with_http_info( self, authorization=None, x_request_id=None, filter=None, limit=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api28_syslog_servers_settings_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api28_syslog_servers_settings_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.8/syslog-servers/settings', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SyslogServerSettingsGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
List syslog settings Displays syslog settings. Values include `continuation_token`, `items`, `more_items_remaining`, and `total_item_count`. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api28_syslog_servers_settings_get_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria. :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size. :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned. :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values. :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: SyslogServerSettingsGetResponse If the method is called asynchronously, returns the request thread.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_8/api/syslog_api.py#L427-L533
from __future__ import absolute_import import re import six from typing import List, Optional from .. import models class SyslogApi(object): def __init__(self, api_client): self.api_client = api_client def api28_syslog_servers_delete_with_http_info( self, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.8/syslog-servers', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api28_syslog_servers_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api28_syslog_servers_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api28_syslog_servers_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.8/syslog-servers', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SyslogServerGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api28_syslog_servers_patch_with_http_info( self, syslog_server=None, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if syslog_server is None: raise TypeError("Missing the required parameter `syslog_server` when calling `api28_syslog_servers_patch`") collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if 'syslog_server' in params: body_params = params['syslog_server'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.8/syslog-servers', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SyslogServerResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api28_syslog_servers_post_with_http_info( self, syslog_server=None, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if syslog_server is None: raise TypeError("Missing the required parameter `syslog_server` when calling `api28_syslog_servers_post`") collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if 'syslog_server' in params: body_params = params['syslog_server'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.8/syslog-servers', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SyslogServerResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
BSD 2-Clause Simplified License
pyansys/pymapdl
ansys/mapdl/core/_commands/solution/twod_to_3d_analysis.py
TwoDTo3DAnalysis.map2dto3d
python
def map2dto3d(self, action="", ldstep="", sbstep="", option="", **kwargs): command = f"MAP2DTO3D,{action},{ldstep},{sbstep},{option}" return self.run(command, **kwargs)
Initiates a 2-D to 3-D analysis and maps variables. APDL Command: MAP2DTO3D Parameters ---------- action The 2-D to 3-D action to perform: START - Start the analysis process by rebuilding the 2-D analysis database (.db) based on the specified load step and substep information, and update nodes to their deformed positions in the 2-D mesh. FINISH - Maps solution variables from the 2-D mesh to the extruded 3-D mesh. ldstep The load step number at which 2-D to 3-D analysis should occur. The default value is the highest load step number found in the Jobname.Rnnn files (for the current jobname and in the current directory). sbstep The substep number of the specified load step (LDSTEP) at which the 2-D to 3-D analysis should occur. The default value is the highest substep number found in the specified load step in the Jobname.Rnnn files (for the current jobname and in the current directory). option Mapping option: (Blank) - Transfer and map all applied boundary conditions, nodal temperatures, loads, and surface pressures from the 2-D mesh to the extruded 3-D mesh. This behavior is the default. NOBC - No applied boundary conditions or loads are transferred from the 2-D mesh to the extruded 3-D mesh. Nodal temperatures (defined via the BF,TEMP command) are transferred. Notes ----- The MAP2DTO3D command initiates the 2-D to 3-D analysis process, sets analysis options, rebuilds the database, and maps the solution variables from the 2-D mesh to the 3-D mesh. Before issuing this command, clear the database (/CLEAR). The LDSTEP and SBSTEP values apply only when Action = START. For more information, see 2-D to 3-D Analysis in the Advanced Analysis Guide. Distributed ANSYS Restriction: This command is not supported in Distributed ANSYS.
https://github.com/pyansys/pymapdl/blob/e5cc21471c3a8fcef1f7b88359e38aa89cd63f73/ansys/mapdl/core/_commands/solution/twod_to_3d_analysis.py#L2-L58
class TwoDTo3DAnalysis:
MIT License
buwantaiji/dominantsparseeigenad
DominantSparseEigenAD/CG.py
setCGSubspaceSparse
python
def setCGSubspaceSparse(A, Aadjoint_to_gadjoint): global CGSubspaceSparse @staticmethod def forward(ctx, g, E0, b, alpha): Aprime = lambda v: A(v) - E0 * v initialx = torch.randn(b.shape[0], device=b.device, dtype=b.dtype) initialx = initialx - torch.matmul(alpha, initialx) * alpha x = CG_torch(Aprime, b, initialx, sparse=True) ctx.g = g ctx.save_for_backward(E0, alpha, x) return x @staticmethod def backward(ctx, grad_x): g = ctx.g E0, alpha, x = ctx.saved_tensors CG = CGSubspaceSparse.apply b = grad_x - torch.matmul(alpha, grad_x) * alpha grad_b = CG(g, E0, b, alpha) v1, v2 = - grad_b, x grad_alpha = - x * torch.matmul(alpha, grad_x) grad_E0 = - torch.matmul(v1, v2) grad_g = Aadjoint_to_gadjoint(v1, v2) return grad_g, grad_E0, grad_b, grad_alpha CGSubspaceSparse = type("CGSubspaceSparse", (torch.autograd.Function, ), {"forward": forward, "backward": backward})
Function primitive of low-rank CG linear system solver, where the matrix is "sparse" and represented as a function. As a workaround of the fact that Pytorch doesn't support taking gradient of objects of type other than torch.tensor, the computation graph of this primitive is wrapped compared to CGSubspace, which the version in which the matrix A is normally represented as a torch.Tensor. In particular, this wrapped version is mainly used to make the back-propagation of the dominant sparse eigensolver primitive -- i.e., DominantSparseSymeig -- work properly. The computation graph is schematically shown below. ---------------------- g --|--> A | | \ | | A-E_0I -- | | / \ | E_0 --|-->-- |||--|--> x | / / | b --|------->------- / | alpha --|------->-------- | ---------------------- input: g -- The parameter(s) of interest of the matrix A, whose gradients are requested. In current version, g must be a torch.Tensor of arbitrary shape. E0, alpha are the smallest eigvalue and corresponding (non-degenerate) eigenvector, respectively. output: x. The computation process involves using CG algorithm to solve a low-rank linear system of the form (A - E_0I)x = b, alpha^T x = 0. For more details of this part, c.f. https://buwantaiji.github.io/2019/10/CG-backward/ USER NOTE: The mechanism of wrapping relies on user's providing two quantities: A -- The "sparse" representation of the matrix A as a function. Aadjoint_to_gadjoint -- A function that receive the adjoint of the matrix A as input, and return the adjoint of the pamameters(g) as output. The input should be of the form of two vectors represented as torch.Tensor, say, v1 and v2, and the adjoint of A = v1 * v2^T.(outer product) User may do whatever he want to get the adjoint of g using these two vectors.
https://github.com/buwantaiji/dominantsparseeigenad/blob/36d534b6713ba256309b07116ebc542bee01cd51/DominantSparseEigenAD/CG.py#L73-L140
import torch def CG_torch(A, b, initialx, sparse=False): if sparse: Amap = A else: Amap = lambda v: torch.matmul(A, v) n = b.shape[0] eps = 1e-7 x = initialx r = b - Amap(x) if(torch.norm(r).item() < eps): return x d = r alpha = torch.matmul(r, r) / Amap(d).matmul(d) for i in range(n): x = x + alpha * d r_next = r - alpha * Amap(d) if(torch.norm(r_next).item() < eps): break beta = torch.matmul(r_next, r_next) / torch.matmul(r, r) r = r_next d = r + beta * d alpha = torch.matmul(r, r) / Amap(d).matmul(d) return x class CGSubspace(torch.autograd.Function): @staticmethod def forward(ctx, A, b, alpha): initialx = torch.randn(b.shape[0], device=b.device, dtype=b.dtype) initialx = initialx - torch.matmul(alpha, initialx) * alpha x = CG_torch(A, b, initialx) ctx.save_for_backward(A, alpha, x) return x @staticmethod def backward(ctx, grad_x): A, alpha, x = ctx.saved_tensors CG = CGSubspace.apply b = grad_x - torch.matmul(alpha, grad_x) * alpha grad_b = CG(A, b, alpha) grad_A = - grad_b[:, None] * x grad_alpha = - x * torch.matmul(alpha, grad_x) return grad_A, grad_b, grad_alpha
Apache License 2.0
sdispater/eloquent
eloquent/orm/mixins/soft_deletes.py
SoftDeletes._do_perform_delete_on_model
python
def _do_perform_delete_on_model(self): if self._force_deleting: return self.with_trashed().where(self.get_key_name(), self.get_key()).force_delete() return self._run_soft_delete()
Perform the actual delete query on this model instance.
https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/mixins/soft_deletes.py#L27-L34
from ..scopes import SoftDeletingScope class SoftDeletes(object): _force_deleting = False @classmethod def boot_soft_deletes(cls, klass): klass.add_global_scope(SoftDeletingScope()) def force_delete(self): self._force_deleting = True self.delete() self._force_deleting = False
MIT License
snudler6/time-travel
src/time_travel/patchers/poll_patcher.py
MockPollObject._get_earliest_events_for_waited_fds
python
def _get_earliest_events_for_waited_fds(self, timeout=None): if timeout is None or timeout < 0: timeout = float('inf') else: timeout = timeout / 1000.0 timeout_timestamp = self.clock.time + timeout def _is_relevant_fd_event(fd, evt): return fd in self.poll_events and self.poll_events[fd] & evt ts, fd_events = self.event_pool.get_next_event(_is_relevant_fd_event) if ts is None or ts > timeout_timestamp: return timeout_timestamp, [] else: return ts, fd_events
Return a list of [(fd, set(events)), ...].
https://github.com/snudler6/time-travel/blob/cf1d71bee6ef0f087cc5eee88ec08f524c2c17fc/src/time_travel/patchers/poll_patcher.py#L67-L85
from .base_patcher import BasePatcher import select as select_lib class MockPollObject(object): def __init__(self, clock, event_pool): self.clock = clock self.event_pool = event_pool self.poll_events = {} def register(self, fd, eventmask=None): if eventmask is None: eventmask = (select_lib.POLLIN | select_lib.POLLOUT | select_lib.POLLPRI) self.poll_events[fd] = eventmask def modify(self, fd, eventmask): if fd not in self.poll_events: raise IOError() self.poll_events[fd] = eventmask def unregister(self, fd): if fd not in self.poll_events: raise KeyError(fd) self.poll_events.pop(fd) def poll(self, timeout=None): timestamp, fd_events = self._get_earliest_events_for_waited_fds(timeout) if timestamp == float('inf'): raise ValueError('No relevant future events were set for infinite ' 'timout') for fd, events in fd_events: self.event_pool.remove_events_from_fds( timestamp, [(fd, event) for event in events]) self.clock.time = timestamp def _crunch_events(_event_set): out = 0 for _event in _event_set: out |= _event return out return [(fd, _crunch_events(events)) for fd, events in fd_events]
MIT License
thingsboard/python_tb_rest_client
tb_rest_client/models/models_pe/home_dashboard.py
HomeDashboard.configuration
python
def configuration(self, configuration): self._configuration = configuration
Sets the configuration of this HomeDashboard. :param configuration: The configuration of this HomeDashboard. # noqa: E501 :type: str
https://github.com/thingsboard/python_tb_rest_client/blob/87c6a3703974fc8a86e4c72c444168ee2b758ecb/tb_rest_client/models/models_pe/home_dashboard.py#L132-L140
import pprint import re import six class HomeDashboard(object): swagger_types = { 'assigned_customers': 'list[ShortCustomerInfo]', 'configuration': 'str', 'created_time': 'int', 'customer_id': 'CustomerId', 'hide_dashboard_toolbar': 'bool', 'image': 'str', 'mobile_hide': 'bool', 'mobile_order': 'int', 'name': 'str', 'owner_id': 'EntityId', 'tenant_id': 'TenantId', 'title': 'str' } attribute_map = { 'assigned_customers': 'assignedCustomers', 'configuration': 'configuration', 'created_time': 'createdTime', 'customer_id': 'customerId', 'hide_dashboard_toolbar': 'hideDashboardToolbar', 'image': 'image', 'mobile_hide': 'mobileHide', 'mobile_order': 'mobileOrder', 'name': 'name', 'owner_id': 'ownerId', 'tenant_id': 'tenantId', 'title': 'title' } def __init__(self, assigned_customers=None, configuration=None, created_time=None, customer_id=None, hide_dashboard_toolbar=None, image=None, mobile_hide=None, mobile_order=None, name=None, owner_id=None, tenant_id=None, title=None): self._assigned_customers = None self._configuration = None self._created_time = None self._customer_id = None self._hide_dashboard_toolbar = None self._image = None self._mobile_hide = None self._mobile_order = None self._name = None self._owner_id = None self._tenant_id = None self._title = None self.discriminator = None if assigned_customers is not None: self.assigned_customers = assigned_customers if configuration is not None: self.configuration = configuration if created_time is not None: self.created_time = created_time if customer_id is not None: self.customer_id = customer_id if hide_dashboard_toolbar is not None: self.hide_dashboard_toolbar = hide_dashboard_toolbar if image is not None: self.image = image if mobile_hide is not None: self.mobile_hide = mobile_hide if mobile_order is not None: self.mobile_order = mobile_order if name is not None: self.name = name if owner_id is not None: self.owner_id = owner_id if tenant_id is not None: self.tenant_id = tenant_id if title is not None: self.title = title @property def assigned_customers(self): return self._assigned_customers @assigned_customers.setter def assigned_customers(self, assigned_customers): self._assigned_customers = assigned_customers @property def configuration(self): return self._configuration @configuration.setter
Apache License 2.0
scqubits/scqubits
scqubits/utils/misc.py
process_which
python
def process_which(which: Union[int, Iterable[int]], max_index: int) -> List[int]: if isinstance(which, int): if which == -1: return list(range(max_index)) return [which] return list(which)
Processes different ways of specifying the selection of wanted eigenvalues/eigenstates. Parameters ---------- which: single index or tuple/list of integers indexing the eigenobjects. If which is -1, all indices up to the max_index limit are included. max_index: maximum index value Returns ------- indices
https://github.com/scqubits/scqubits/blob/d8532a3b614e37b1e65b75000493ea2c25c05682/scqubits/utils/misc.py#L34-L54
import ast import functools import platform import warnings from collections.abc import Sequence from io import StringIO from typing import Any, Callable, Dict, Iterable, List, Tuple, Union import numpy as np import qutip as qt import scipy as sp from scqubits.settings import IN_IPYTHON if IN_IPYTHON: from tqdm.notebook import tqdm else: from tqdm import tqdm
BSD 3-Clause New or Revised License
kwent/pypertrail
pypertrail/cli.py
delete
python
def delete(ctx, saved_search_id): r = SavedSearch(ctx.obj['TOKEN'], ctx.obj['DEBUG']).delete(saved_search_id) click.echo(json_dumps(r, ctx.obj['PRETTY']))
Delete a saved search.
https://github.com/kwent/pypertrail/blob/9cd8dd64821433487ea93e0eb3ce6b54f79fc237/pypertrail/cli.py#L198-L201
import click import json import os import yaml from .accounts import Account from .archives import Archive from .groups import Group from .saved_searches import SavedSearch from .search import Search from .systems import System from .users import User from .utils import Utils from os.path import expanduser from time import sleep try: bool(type(unicode)) UNICODE_TYPE = unicode except NameError: UNICODE_TYPE = str CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) def json_dumps(dict, pretty=False): if pretty: return json.dumps(dict, indent=2) else: return json.dumps(dict) def parse_payload(ctx, payload): if payload: try: json_object = json.loads(payload) except: ctx.fail("Payload is not in a JSON format.") return json_object def parse_yml(ctx, path): with open(path, 'r') as stream: try: content = yaml.load(stream) except yaml.YAMLError as e: ctx.fail(e) return content @click.version_option('1.0.0') @click.group(context_settings=CONTEXT_SETTINGS) @click.option('--debug', '-d', flag_value=True, default=False, help='Debug mode.') @click.option('--pretty', '-p', flag_value=True, default=False, help='Prettify JSON output.') @click.option('--token', '-t', type=UNICODE_TYPE, envvar='PAPERTRAIL_API_TOKEN', help='Papertrail API token.') @click.option('--conf', '-c', type=UNICODE_TYPE, default="{0}/{1}".format(expanduser("~"), '.pypertrail.yml'), help='Path to config (~/.pypertrail.yml).') @click.pass_context def main(ctx, debug, pretty, token, conf): if not token: if os.path.isfile(conf): token = parse_yml(ctx, conf)['token'] else: ctx.fail("""Token not found. Token not found in PAPERTRAIL_API_TOKEN environment variable. Token not found in -t/--token option. Token not found in configuration file: {0}.""".format(conf)) ctx.obj = {} ctx.obj['PRETTY'] = pretty if pretty else None ctx.obj['DEBUG'] = debug if debug else None ctx.obj['TOKEN'] = token if token else None @main.group() @click.pass_context def accounts(ctx): pass @accounts.command() @click.pass_context def list(ctx): r = Account(ctx.obj['TOKEN'], ctx.obj['DEBUG']).list() click.echo(json_dumps(r, ctx.obj['PRETTY'])) @main.group() @click.pass_context def archives(ctx): pass @archives.command() @click.pass_context def list(ctx): r = Archive(ctx.obj['TOKEN'], ctx.obj['DEBUG']).list() click.echo(json_dumps(r, ctx.obj['PRETTY'])) @main.group() @click.pass_context def groups(ctx): pass @groups.command() @click.pass_context def list(ctx): r = Group(ctx.obj['TOKEN'], ctx.obj['DEBUG']).list() click.echo(json_dumps(r, ctx.obj['PRETTY'])) @groups.command() @click.pass_context @click.argument('group_id') def show(ctx, group_id): r = Group(ctx.obj['TOKEN'], ctx.obj['DEBUG']).show(group_id) click.echo(json_dumps(r, ctx.obj['PRETTY'])) @groups.command() @click.pass_context @click.argument('group_id') def update(ctx, group_id): r = Group(ctx.obj['TOKEN'], ctx.obj['DEBUG']).update(group_id) click.echo(json_dumps(r, ctx.obj['PRETTY'])) @groups.command() @click.pass_context @click.argument('group_id') def delete(ctx, group_id): r = Group(ctx.obj['TOKEN'], ctx.obj['DEBUG']).delete(group_id) click.echo(json_dumps(r, ctx.obj['PRETTY'])) @main.group() @click.pass_context def saved_searches(ctx): pass @saved_searches.command() @click.pass_context def list(ctx): r = SavedSearch(ctx.obj['TOKEN'], ctx.obj['DEBUG']).list() click.echo(json_dumps(r, ctx.obj['PRETTY'])) @saved_searches.command() @click.pass_context @click.argument('saved_search_id') def show(ctx, saved_search_id): r = SavedSearch(ctx.obj['TOKEN'], ctx.obj['DEBUG']).show(saved_search_id) click.echo(json_dumps(r, ctx.obj['PRETTY'])) @saved_searches.command() @click.pass_context @click.option('--payload', '-p', help='Parameters required by the API.') def create(ctx, payload): payload = parse_payload(ctx, payload) r = SavedSearch(ctx.obj['TOKEN'], ctx.obj['DEBUG']).create(payload) click.echo(json_dumps(r, ctx.obj['PRETTY'])) @saved_searches.command() @click.pass_context @click.argument('saved_search_id') @click.option('--payload', '-p', help='Parameters required by the API.') def update(ctx, saved_search_id, payload): payload = parse_payload(ctx, payload) r = SavedSearch(ctx.obj['TOKEN'], ctx.obj['DEBUG']).update(payload) click.echo(json_dumps(r, ctx.obj['PRETTY'])) @saved_searches.command() @click.pass_context @click.argument('saved_search_id')
MIT License
databand-ai/dbnd
modules/dbnd/src/dbnd/_vendor_package/google/protobuf/descriptor_pool.py
DescriptorPool.FindFieldByName
python
def FindFieldByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) message_name, _, field_name = full_name.rpartition('.') message_descriptor = self.FindMessageTypeByName(message_name) return message_descriptor.fields_by_name[field_name]
Loads the named field descriptor from the pool. Args: full_name (str): The full name of the field descriptor to load. Returns: FieldDescriptor: The field descriptor for the named field. Raises: KeyError: if the field cannot be found in the pool.
https://github.com/databand-ai/dbnd/blob/ec0076f9a142b20e2f7afd886ed1a18683c553ec/modules/dbnd/src/dbnd/_vendor_package/google/protobuf/descriptor_pool.py#L532-L547
__author__ = 'matthewtoia@google.com (Matt Toia)' import collections import warnings from google.protobuf import descriptor from google.protobuf import descriptor_database from google.protobuf import text_encoding _USE_C_DESCRIPTORS = descriptor._USE_C_DESCRIPTORS def _Deprecated(func): def NewFunc(*args, **kwargs): warnings.warn( 'Call to deprecated function %s(). Note: Do add unlinked descriptors ' 'to descriptor_pool is wrong. Use Add() or AddSerializedFile() ' 'instead.' % func.__name__, category=DeprecationWarning) return func(*args, **kwargs) NewFunc.__name__ = func.__name__ NewFunc.__doc__ = func.__doc__ NewFunc.__dict__.update(func.__dict__) return NewFunc def _NormalizeFullyQualifiedName(name): return name.lstrip('.') def _OptionsOrNone(descriptor_proto): if descriptor_proto.HasField('options'): return descriptor_proto.options else: return None def _IsMessageSetExtension(field): return (field.is_extension and field.containing_type.has_options and field.containing_type.GetOptions().message_set_wire_format and field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL) class DescriptorPool(object): if _USE_C_DESCRIPTORS: def __new__(cls, descriptor_db=None): return descriptor._message.DescriptorPool(descriptor_db) def __init__(self, descriptor_db=None): self._internal_db = descriptor_database.DescriptorDatabase() self._descriptor_db = descriptor_db self._descriptors = {} self._enum_descriptors = {} self._service_descriptors = {} self._file_descriptors = {} self._toplevel_extensions = {} self._file_desc_by_toplevel_extension = {} self._top_enum_values = {} self._extensions_by_name = collections.defaultdict(dict) self._extensions_by_number = collections.defaultdict(dict) def _CheckConflictRegister(self, desc, desc_name, file_name): for register, descriptor_type in [ (self._descriptors, descriptor.Descriptor), (self._enum_descriptors, descriptor.EnumDescriptor), (self._service_descriptors, descriptor.ServiceDescriptor), (self._toplevel_extensions, descriptor.FieldDescriptor), (self._top_enum_values, descriptor.EnumValueDescriptor)]: if desc_name in register: old_desc = register[desc_name] if isinstance(old_desc, descriptor.EnumValueDescriptor): old_file = old_desc.type.file.name else: old_file = old_desc.file.name if not isinstance(desc, descriptor_type) or ( old_file != file_name): error_msg = ('Conflict register for file "' + file_name + '": ' + desc_name + ' is already defined in file "' + old_file + '". Please fix the conflict by adding ' 'package name on the proto file, or use different ' 'name for the duplication.') if isinstance(desc, descriptor.EnumValueDescriptor): error_msg += ('\nNote: enum values appear as ' 'siblings of the enum type instead of ' 'children of it.') raise TypeError(error_msg) return def Add(self, file_desc_proto): self._internal_db.Add(file_desc_proto) def AddSerializedFile(self, serialized_file_desc_proto): from google.protobuf import descriptor_pb2 file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString( serialized_file_desc_proto) self.Add(file_desc_proto) @_Deprecated def AddDescriptor(self, desc): self._AddDescriptor(desc) def _AddDescriptor(self, desc): if not isinstance(desc, descriptor.Descriptor): raise TypeError('Expected instance of descriptor.Descriptor.') self._CheckConflictRegister(desc, desc.full_name, desc.file.name) self._descriptors[desc.full_name] = desc self._AddFileDescriptor(desc.file) @_Deprecated def AddEnumDescriptor(self, enum_desc): self._AddEnumDescriptor(enum_desc) def _AddEnumDescriptor(self, enum_desc): if not isinstance(enum_desc, descriptor.EnumDescriptor): raise TypeError('Expected instance of descriptor.EnumDescriptor.') file_name = enum_desc.file.name self._CheckConflictRegister(enum_desc, enum_desc.full_name, file_name) self._enum_descriptors[enum_desc.full_name] = enum_desc if enum_desc.file.package: top_level = (enum_desc.full_name.count('.') - enum_desc.file.package.count('.') == 1) else: top_level = enum_desc.full_name.count('.') == 0 if top_level: file_name = enum_desc.file.name package = enum_desc.file.package for enum_value in enum_desc.values: full_name = _NormalizeFullyQualifiedName( '.'.join((package, enum_value.name))) self._CheckConflictRegister(enum_value, full_name, file_name) self._top_enum_values[full_name] = enum_value self._AddFileDescriptor(enum_desc.file) @_Deprecated def AddServiceDescriptor(self, service_desc): self._AddServiceDescriptor(service_desc) def _AddServiceDescriptor(self, service_desc): if not isinstance(service_desc, descriptor.ServiceDescriptor): raise TypeError('Expected instance of descriptor.ServiceDescriptor.') self._CheckConflictRegister(service_desc, service_desc.full_name, service_desc.file.name) self._service_descriptors[service_desc.full_name] = service_desc @_Deprecated def AddExtensionDescriptor(self, extension): self._AddExtensionDescriptor(extension) def _AddExtensionDescriptor(self, extension): if not (isinstance(extension, descriptor.FieldDescriptor) and extension.is_extension): raise TypeError('Expected an extension descriptor.') if extension.extension_scope is None: self._toplevel_extensions[extension.full_name] = extension try: existing_desc = self._extensions_by_number[ extension.containing_type][extension.number] except KeyError: pass else: if extension is not existing_desc: raise AssertionError( 'Extensions "%s" and "%s" both try to extend message type "%s" ' 'with field number %d.' % (extension.full_name, existing_desc.full_name, extension.containing_type.full_name, extension.number)) self._extensions_by_number[extension.containing_type][ extension.number] = extension self._extensions_by_name[extension.containing_type][ extension.full_name] = extension if _IsMessageSetExtension(extension): self._extensions_by_name[extension.containing_type][ extension.message_type.full_name] = extension @_Deprecated def AddFileDescriptor(self, file_desc): self._InternalAddFileDescriptor(file_desc) def _InternalAddFileDescriptor(self, file_desc): self._AddFileDescriptor(file_desc) for extension in file_desc.extensions_by_name.values(): self._file_desc_by_toplevel_extension[ extension.full_name] = file_desc def _AddFileDescriptor(self, file_desc): if not isinstance(file_desc, descriptor.FileDescriptor): raise TypeError('Expected instance of descriptor.FileDescriptor.') self._file_descriptors[file_desc.name] = file_desc def FindFileByName(self, file_name): try: return self._file_descriptors[file_name] except KeyError: pass try: file_proto = self._internal_db.FindFileByName(file_name) except KeyError as error: if self._descriptor_db: file_proto = self._descriptor_db.FindFileByName(file_name) else: raise error if not file_proto: raise KeyError('Cannot find a file named %s' % file_name) return self._ConvertFileProtoToFileDescriptor(file_proto) def FindFileContainingSymbol(self, symbol): symbol = _NormalizeFullyQualifiedName(symbol) try: return self._InternalFindFileContainingSymbol(symbol) except KeyError: pass try: self._FindFileContainingSymbolInDb(symbol) return self._InternalFindFileContainingSymbol(symbol) except KeyError: raise KeyError('Cannot find a file containing %s' % symbol) def _InternalFindFileContainingSymbol(self, symbol): try: return self._descriptors[symbol].file except KeyError: pass try: return self._enum_descriptors[symbol].file except KeyError: pass try: return self._service_descriptors[symbol].file except KeyError: pass try: return self._top_enum_values[symbol].type.file except KeyError: pass try: return self._file_desc_by_toplevel_extension[symbol] except KeyError: pass top_name, _, sub_name = symbol.rpartition('.') try: message = self.FindMessageTypeByName(top_name) assert (sub_name in message.extensions_by_name or sub_name in message.fields_by_name or sub_name in message.enum_values_by_name) return message.file except (KeyError, AssertionError): raise KeyError('Cannot find a file containing %s' % symbol) def FindMessageTypeByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._descriptors: self._FindFileContainingSymbolInDb(full_name) return self._descriptors[full_name] def FindEnumTypeByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._enum_descriptors: self._FindFileContainingSymbolInDb(full_name) return self._enum_descriptors[full_name]
Apache License 2.0
sally20921/conssl
ConSSL/callbacks/verification/batch_gradient.py
default_output_mapping
python
def default_output_mapping(data: Any) -> torch.Tensor: if isinstance(data, torch.Tensor): return data batches = default_input_mapping(data) batches = [batch.view(batch.size(0), -1).float() for batch in batches] combined = torch.cat(batches, 1) return combined
Pulls out all tensors in a output collection and combines them into one big batch for verification. Args: data: a tensor or a (nested) collection of tensors (tuple, list, dict, etc.). Returns: A float tensor with shape (B, N) where B is the batch size and N is the sum of (flattened) dimensions of all tensors in the collection. If the input was already a tensor, the tensor itself is returned. Example: >>> data = (torch.rand(3, 5), "foo", torch.rand(3, 2, 4)) >>> result = default_output_mapping(data) >>> result.shape torch.Size([3, 13]) >>> data = {"one": torch.rand(3, 5), "two": torch.rand(3, 2, 1)} >>> result = default_output_mapping(data) >>> result.shape torch.Size([3, 7])
https://github.com/sally20921/conssl/blob/0bd8cb4b418d6bba9d8f8b7b2b3f3d5d8e26866e/ConSSL/callbacks/verification/batch_gradient.py#L150-L180
from typing import Any, Callable, List, Optional import torch from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.utilities.apply_func import apply_to_collection from pytorch_lightning.utilities.exceptions import MisconfigurationException from ConSSL.callbacks.verification.base import VerificationBase, VerificationCallbackBase class BatchGradientVerification(VerificationBase): def check( self, input_array: Any, input_mapping: Optional[Callable] = None, output_mapping: Optional[Callable] = None, sample_idx: int = 0, ) -> bool: input_mapping = input_mapping or default_input_mapping output_mapping = output_mapping or default_output_mapping input_array = self._get_input_array_copy(input_array) input_batches = input_mapping(input_array) if input_batches[0].size(0) < 2: raise MisconfigurationException("Batch size must be greater than 1 to run verification.") for input_batch in input_batches: input_batch.requires_grad = True self.model.zero_grad() output = self._model_forward(input_array) output_mapping(output)[sample_idx].sum().backward() zero_grad_inds = list(range(len(input_batches[0]))) zero_grad_inds.pop(sample_idx) has_grad_outside_sample = [input_batch.grad[zero_grad_inds].abs().sum().item() for input_batch in input_batches] has_grad_inside_sample = [input_batch.grad[sample_idx].abs().sum().item() for input_batch in input_batches] return not any(has_grad_outside_sample) and all(has_grad_inside_sample) class BatchGradientVerificationCallback(VerificationCallbackBase): def __init__( self, input_mapping: Optional[Callable] = None, output_mapping: Optional[Callable] = None, sample_idx: int = 0, **kwargs: Any, ): super().__init__(**kwargs) self._input_mapping = input_mapping self._output_mapping = output_mapping self._sample_idx = sample_idx def message(self, *args: Any, **kwargs: Any) -> str: message = ( "Your model is mixing data across the batch dimension." " This can lead to wrong gradient updates in the optimizer." " Check the operations that reshape and permute tensor dimensions in your model." ) return message def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None: verification = BatchGradientVerification(pl_module) result = verification.check( input_array=pl_module.example_input_array, input_mapping=self._input_mapping, output_mapping=self._output_mapping, sample_idx=self._sample_idx, ) if not result: self._raise() def default_input_mapping(data: Any) -> List[torch.Tensor]: tensors = collect_tensors(data) batches: List[torch.Tensor] = [] for tensor in tensors: if tensor.ndim > 0 and (not batches or tensor.size(0) == batches[0].size(0)): batches.append(tensor) return batches
MIT License
gtaylor/python-route53
route53/xml_parsers/common_hosted_zone.py
parse_delegation_set
python
def parse_delegation_set(zone, e_delegation_set): e_nameservers = e_delegation_set.find('./{*}NameServers') nameservers = [] for e_nameserver in e_nameservers: nameservers.append(e_nameserver.text) zone._nameservers = nameservers
Parses a DelegationSet tag. These often accompany HostedZone tags in responses like CreateHostedZone and GetHostedZone. :param HostedZone zone: An existing HostedZone instance to populate. :param lxml.etree._Element e_delegation_set: A DelegationSet element.
https://github.com/gtaylor/python-route53/blob/b9fc7e258a79551c9ed61e4a71668b7f06f9e774/route53/xml_parsers/common_hosted_zone.py#L57-L72
from route53.hosted_zone import HostedZone HOSTED_ZONE_TAG_TO_KWARG_MAP = { 'Id': 'id', 'Name': 'name', 'CallerReference': 'caller_reference', 'ResourceRecordSetCount': 'resource_record_set_count', } def parse_hosted_zone(e_zone, connection): kwargs = {} for e_field in e_zone: tag_name = e_field.tag.split('}')[1] field_text = e_field.text if tag_name == 'Config': e_comment = e_field.find('./{*}Comment') kwargs['comment'] = e_comment.text if e_comment is not None else None continue elif tag_name == 'Id': field_text = field_text.strip('/hostedzone/') kw_name = HOSTED_ZONE_TAG_TO_KWARG_MAP[tag_name] kwargs[kw_name] = field_text return HostedZone(connection, **kwargs)
MIT License
anthonyk1225/robinhood-to-xlsx
sql/operations/instruments.py
create_instruments
python
def create_instruments(simple_name, symbol, url): conn = sqlite3.connect('robinhood.db') command = "INSERT INTO instruments\ (simple_name, symbol, url)\ VALUES ('{}', '{}', '{}');".format(simple_name, symbol, url) cursor = conn.execute(command) conn.commit() conn.close() return cursor.lastrowid
returns INTEGER - last id of row created
https://github.com/anthonyk1225/robinhood-to-xlsx/blob/9426d89e7e8854ece820154c3fe3f80b46914019/sql/operations/instruments.py#L5-L19
import sqlite3
MIT License
rapid7/vm-console-client-python
rapid7vmconsole/models/created_reference_scan_id_link.py
CreatedReferenceScanIDLink.links
python
def links(self): return self._links
Gets the links of this CreatedReferenceScanIDLink. # noqa: E501 Hypermedia links to corresponding or related resources. # noqa: E501 :return: The links of this CreatedReferenceScanIDLink. # noqa: E501 :rtype: list[Link]
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/created_reference_scan_id_link.py#L77-L85
import pprint import re import six class CreatedReferenceScanIDLink(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'int', 'links': 'list[Link]' } attribute_map = { 'id': 'id', 'links': 'links' } def __init__(self, id=None, links=None): self._id = None self._links = None self.discriminator = None if id is not None: self.id = id if links is not None: self.links = links @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property
MIT License
googleapis/python-aiplatform
google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py
FeaturestoreServiceGrpcTransport.search_features
python
def search_features( self, ) -> Callable[ [featurestore_service.SearchFeaturesRequest], featurestore_service.SearchFeaturesResponse, ]: if "search_features" not in self._stubs: self._stubs["search_features"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/SearchFeatures", request_serializer=featurestore_service.SearchFeaturesRequest.serialize, response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, ) return self._stubs["search_features"]
r"""Return a callable for the search features method over gRPC. Searches Features matching a query in a given project. Returns: Callable[[~.SearchFeaturesRequest], ~.SearchFeaturesResponse]: A function that, when called, will call the underlying RPC on the server.
https://github.com/googleapis/python-aiplatform/blob/c1c2326b2342ab1b6f4c4ce3852e63376eae740d/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py#L812-L839
import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import grpc_helpers from google.api_core import operations_v1 from google.api_core import gapic_v1 import google.auth from google.auth import credentials as ga_credentials from google.auth.transport.grpc import SslCredentials import grpc from google.cloud.aiplatform_v1.types import entity_type from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1.types import feature from google.cloud.aiplatform_v1.types import feature as gca_feature from google.cloud.aiplatform_v1.types import featurestore from google.cloud.aiplatform_v1.types import featurestore_service from google.longrunning import operations_pb2 from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO class FeaturestoreServiceGrpcTransport(FeaturestoreServiceTransport): _stubs: Dict[str, Callable] def __init__( self, *, host: str = "aiplatform.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: credentials = False self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, credentials=self._credentials, credentials_file=credentials_file, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._prep_wrapped_messages(client_info) @classmethod def create_channel( cls, host: str = "aiplatform.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> grpc.Channel: return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: return self._grpc_channel @property def operations_client(self) -> operations_v1.OperationsClient: if self._operations_client is None: self._operations_client = operations_v1.OperationsClient(self.grpc_channel) return self._operations_client @property def create_featurestore( self, ) -> Callable[ [featurestore_service.CreateFeaturestoreRequest], operations_pb2.Operation ]: if "create_featurestore" not in self._stubs: self._stubs["create_featurestore"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeaturestore", request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_featurestore"] @property def get_featurestore( self, ) -> Callable[ [featurestore_service.GetFeaturestoreRequest], featurestore.Featurestore ]: if "get_featurestore" not in self._stubs: self._stubs["get_featurestore"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/GetFeaturestore", request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, response_deserializer=featurestore.Featurestore.deserialize, ) return self._stubs["get_featurestore"] @property def list_featurestores( self, ) -> Callable[ [featurestore_service.ListFeaturestoresRequest], featurestore_service.ListFeaturestoresResponse, ]: if "list_featurestores" not in self._stubs: self._stubs["list_featurestores"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/ListFeaturestores", request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, ) return self._stubs["list_featurestores"] @property def update_featurestore( self, ) -> Callable[ [featurestore_service.UpdateFeaturestoreRequest], operations_pb2.Operation ]: if "update_featurestore" not in self._stubs: self._stubs["update_featurestore"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeaturestore", request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["update_featurestore"] @property def delete_featurestore( self, ) -> Callable[ [featurestore_service.DeleteFeaturestoreRequest], operations_pb2.Operation ]: if "delete_featurestore" not in self._stubs: self._stubs["delete_featurestore"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeaturestore", request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["delete_featurestore"] @property def create_entity_type( self, ) -> Callable[ [featurestore_service.CreateEntityTypeRequest], operations_pb2.Operation ]: if "create_entity_type" not in self._stubs: self._stubs["create_entity_type"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/CreateEntityType", request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_entity_type"] @property def get_entity_type( self, ) -> Callable[[featurestore_service.GetEntityTypeRequest], entity_type.EntityType]: if "get_entity_type" not in self._stubs: self._stubs["get_entity_type"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/GetEntityType", request_serializer=featurestore_service.GetEntityTypeRequest.serialize, response_deserializer=entity_type.EntityType.deserialize, ) return self._stubs["get_entity_type"] @property def list_entity_types( self, ) -> Callable[ [featurestore_service.ListEntityTypesRequest], featurestore_service.ListEntityTypesResponse, ]: if "list_entity_types" not in self._stubs: self._stubs["list_entity_types"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/ListEntityTypes", request_serializer=featurestore_service.ListEntityTypesRequest.serialize, response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, ) return self._stubs["list_entity_types"] @property def update_entity_type( self, ) -> Callable[ [featurestore_service.UpdateEntityTypeRequest], gca_entity_type.EntityType ]: if "update_entity_type" not in self._stubs: self._stubs["update_entity_type"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/UpdateEntityType", request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, response_deserializer=gca_entity_type.EntityType.deserialize, ) return self._stubs["update_entity_type"] @property def delete_entity_type( self, ) -> Callable[ [featurestore_service.DeleteEntityTypeRequest], operations_pb2.Operation ]: if "delete_entity_type" not in self._stubs: self._stubs["delete_entity_type"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/DeleteEntityType", request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["delete_entity_type"] @property def create_feature( self, ) -> Callable[ [featurestore_service.CreateFeatureRequest], operations_pb2.Operation ]: if "create_feature" not in self._stubs: self._stubs["create_feature"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeature", request_serializer=featurestore_service.CreateFeatureRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_feature"] @property def batch_create_features( self, ) -> Callable[ [featurestore_service.BatchCreateFeaturesRequest], operations_pb2.Operation ]: if "batch_create_features" not in self._stubs: self._stubs["batch_create_features"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/BatchCreateFeatures", request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["batch_create_features"] @property def get_feature( self, ) -> Callable[[featurestore_service.GetFeatureRequest], feature.Feature]: if "get_feature" not in self._stubs: self._stubs["get_feature"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/GetFeature", request_serializer=featurestore_service.GetFeatureRequest.serialize, response_deserializer=feature.Feature.deserialize, ) return self._stubs["get_feature"] @property def list_features( self, ) -> Callable[ [featurestore_service.ListFeaturesRequest], featurestore_service.ListFeaturesResponse, ]: if "list_features" not in self._stubs: self._stubs["list_features"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/ListFeatures", request_serializer=featurestore_service.ListFeaturesRequest.serialize, response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, ) return self._stubs["list_features"] @property def update_feature( self, ) -> Callable[[featurestore_service.UpdateFeatureRequest], gca_feature.Feature]: if "update_feature" not in self._stubs: self._stubs["update_feature"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeature", request_serializer=featurestore_service.UpdateFeatureRequest.serialize, response_deserializer=gca_feature.Feature.deserialize, ) return self._stubs["update_feature"] @property def delete_feature( self, ) -> Callable[ [featurestore_service.DeleteFeatureRequest], operations_pb2.Operation ]: if "delete_feature" not in self._stubs: self._stubs["delete_feature"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeature", request_serializer=featurestore_service.DeleteFeatureRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["delete_feature"] @property def import_feature_values( self, ) -> Callable[ [featurestore_service.ImportFeatureValuesRequest], operations_pb2.Operation ]: if "import_feature_values" not in self._stubs: self._stubs["import_feature_values"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/ImportFeatureValues", request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["import_feature_values"] @property def batch_read_feature_values( self, ) -> Callable[ [featurestore_service.BatchReadFeatureValuesRequest], operations_pb2.Operation ]: if "batch_read_feature_values" not in self._stubs: self._stubs["batch_read_feature_values"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/BatchReadFeatureValues", request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["batch_read_feature_values"] @property def export_feature_values( self, ) -> Callable[ [featurestore_service.ExportFeatureValuesRequest], operations_pb2.Operation ]: if "export_feature_values" not in self._stubs: self._stubs["export_feature_values"] = self.grpc_channel.unary_unary( "/google.cloud.aiplatform.v1.FeaturestoreService/ExportFeatureValues", request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["export_feature_values"] @property
Apache License 2.0
xxao/rebrick
rebrick/api_users.py
get_partlists
python
def get_partlists(page=None, page_size=None, user_token=None, api_key=None): parameters = { 'page': page, 'page_size': page_size, 'key': api_key} user_token = assert_user_token(user_token) path = config.API_USERS_URL + "%s/partlists/" % user_token return request(path, parameters)
Gets details for all user's parts lists. Args: page: int or None A page number within the paginated result set. page_size: int or None Number of results to return per page. user_token: str or None Rebrickable user token. If set to None the one set by rebrick.init() is used. api_key: str or None Rebrickable API access key. If set to None the one set by rebrick.init() is used. Returns: http.client.HTTPResponse Server response.
https://github.com/xxao/rebrick/blob/43c803a9a77563885f1305043324ad1d78367cdc/rebrick/api_users.py#L195-L227
from . import config from .request import request, assert_user_token def get_badges(page=None, page_size=None, ordering=None, api_key=None): parameters = { 'page': page, 'page_size': page_size, 'ordering': ordering, 'key': api_key} path = config.API_USERS_URL + "badges" return request(path, parameters) def get_badge(badge_id, api_key=None): parameters = {'key': api_key} path = config.API_USERS_URL + "badges/%s/" % badge_id return request(path, parameters) def get_build(set_id, user_token=None, api_key=None): if '-' not in str(set_id): set_id = "%s-1" % set_id parameters = {'key': api_key} user_token = assert_user_token(user_token) path = config.API_USERS_URL + "%s/build/%s/" % (user_token, set_id) return request(path, parameters) def get_elements(part_id=None, part_cat_id=None, color_id=None, part_details=False, page=None, page_size=None, user_token=None, api_key=None): parameters = { 'part_num': part_id, 'part_cat_id': part_cat_id, 'color_id': color_id, 'inc_part_details': int(part_details), 'page': page, 'page_size': page_size, 'key': api_key} user_token = assert_user_token(user_token) path = config.API_USERS_URL + "%s/allparts/" % user_token return request(path, parameters) def get_lost_elements(part_details=False, page=None, page_size=None, ordering=None, user_token=None, api_key=None): parameters = { 'inc_part_details': int(part_details), 'page': page, 'page_size': page_size, 'ordering': ordering, 'key': api_key} user_token = assert_user_token(user_token) path = config.API_USERS_URL + "%s/lost_parts/" % user_token return request(path, parameters)
MIT License
jizongfox/deep-clustering-toolbox
deepclustering/decorator/decorator.py
threaded
python
def threaded(_func=None, *, name="meter", daemon=False): def decorator_thread(f): @wraps(f) def wrapper(*args, **kwargs): new_thread = Thread(target=f, args=args, kwargs=kwargs, name=name) new_thread.daemon = daemon new_thread.start() return new_thread return wrapper if _func is None: return decorator_thread else: return decorator_thread(_func)
Decorator to run the process in an extra thread.
https://github.com/jizongfox/deep-clustering-toolbox/blob/0721cbbb278af027409ed4c115ccc743b6daed1b/deepclustering/decorator/decorator.py#L123-L139
import _thread import contextlib import random import sys import threading import time from functools import wraps from threading import Thread import numpy as np from torch.multiprocessing import Process from torch.nn.modules.batchnorm import _BatchNorm def export(fn): mod = sys.modules[fn.__module__] if hasattr(mod, "__all__"): mod.__all__.append(fn.__name__) else: mod.__all__ = [fn.__name__] return fn def _extract_bn_modules(model): return [m for m in model.modules() if isinstance(m, _BatchNorm)] @contextlib.contextmanager def _disable_tracking_bn_stats(model): def switch_attr(m): if hasattr(m, "track_running_stats"): m.track_running_stats ^= True model.apply(switch_attr) yield model.apply(switch_attr) class _disable_tracking_bn_stats_pytoch_el_1_1_0: def __init__(self, model): self.bn_modules = _extract_bn_modules(model) self.moments = [m.momentum for m in self.bn_modules] def __enter__(self): for m in self.bn_modules: m.momentum = 0 def __exit__(self, exc_type, exc_val, exc_tb): for module, momentum in zip(self.bn_modules, self.moments): module.momentum = momentum class TimeBlock: def __init__(self, start=None): self.start = start if start is not None else time.time() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop = time.time() self.cost = self.stop - self.start return exc_type is None def timethis(func): @wraps(func) def wrapper(*args, **kwargs): start = time.time() result = func(*args, **kwargs) end = time.time() print(func.__name__, end - start) return result return wrapper def convert_params(f): @wraps(f) def wrapper(self, *args, **kwargs): return self.process_params(f, *args, **kwargs) return wrapper def threaded_(f): @wraps(f) def wrapper(*args, **kwargs): return _thread.start_new(f, args, kwargs) return wrapper
MIT License
tanghaibao/jcvi
jcvi/formats/fastq.py
some
python
def some(args): p = OptionParser(some.__doc__) opts, args = p.parse_args(args) if len(args) not in (2, 3): sys.exit(not p.print_help()) ( idsfile, afastq, ) = args[:2] bfastq = args[2] if len(args) == 3 else None ids = DictFile(idsfile, valuepos=None) ai = iter_fastq(open(afastq)) arec = next(ai) if bfastq: bi = iter_fastq(open(bfastq)) brec = next(bi) while arec: if arec.name[1:] in ids: print(arec) if bfastq: print(brec) arec = next(ai) if bfastq: brec = next(bi)
%prog some idsfile afastq [bfastq] Select a subset of the reads with ids present in the idsfile. `bfastq` is optional (only if reads are paired)
https://github.com/tanghaibao/jcvi/blob/3b161796234670ce1c4894974eaeb590d35cf2a2/jcvi/formats/fastq.py#L742-L777
import os.path as op import sys import re import logging import json from itertools import islice from Bio import SeqIO from Bio.SeqIO.QualityIO import FastqGeneralIterator from jcvi.formats.fasta import must_open, rc from jcvi.formats.base import DictFile from jcvi.utils.cbook import percentage from jcvi.apps.base import OptionParser, ActionDispatcher, sh, which, mkdir, need_update qual_offset = lambda x: 33 if x == "sanger" else 64 allowed_dialect_conversions = { ">=1.8": "<1.8", "sra": "<1.8", } class FastqLite(object): def __init__(self, name, seq, qual): self.name = name self.seq = seq self.qual = qual def __str__(self): return "\n".join((self.name, self.seq, "+", self.qual)) def rc(self): self.seq = rc(self.seq) self.qual = self.qual[::-1] class FastqRecord(object): def __init__(self, fh, offset=0, key=None): self.name = self.header = fh.readline() if not self.name: return self.name = self.name.split()[0] self.seq = fh.readline().rstrip() self.l3 = fh.readline().rstrip() self.qual = fh.readline().rstrip() if offset != 0: self.qual = "".join(chr(ord(x) + offset) for x in self.qual) self.length = len(self.seq) assert self.length == len( self.qual ), "length mismatch: seq(%s) and qual(%s)" % (self.seq, self.qual) if key: self.name = key(self.name) def __str__(self): return "\n".join((self.name, self.seq, "+", self.qual)) def __len__(self): return self.length @property def quality(self): return [ord(x) for x in self.qual] class FastqHeader(object): def __init__(self, row): header = row.strip().split(" ") self.readId, self.readLen, self.readNum = None, None, None self.multiplexId = 0 self.paired = False if len(header) == 3 and "length" in header[2]: self.dialect = "sra" self.readId = header[0].lstrip("@") m = re.search(r"length=(\d+)", header[2]) if m: self.readLen = m.group(1) h = header[1].split(":") self.instrument = h[0] if len(h) == 7: self.runId, self.flowcellId = int(h[1]), h[2] self.laneNum, self.tileNum = int(h[3]), int(h[4]) self.xPos, self.yPos = h[5], h[6] else: self.runId, self.flowcellId = None, None self.laneNum, self.tileNum = int(h[1]), int(h[2]) self.xPos, self.yPos = h[3], h[4] else: h = header[0].split(":") self.instrument = h[0].lstrip("@") if len(header) == 2 and header[1].find(":"): self.dialect = ">=1.8" self.runId = int(h[1]) self.flowcellId = h[2] self.laneNum = int(h[3]) self.tileNum = int(h[4]) self.xPos = int(h[5]) self.yPos = h[6] if re.search("/", self.yPos): self.paired = True self.yPos, self.readNum = self.yPos.split("/") a = header[1].split(":") self.readNum = int(a[0]) self.isFiltered = a[1] self.controlNum = int(a[2]) self.barcode = a[3] else: self.dialect = "<1.8" self.laneNum = int(h[1]) self.tileNum = int(h[2]) self.xPos = int(h[3]) self.yPos = h[4] m = re.search(r"(\d+)(#\S+)/(\d+)", self.yPos) if m: self.paired = True self.yPos, self.multiplexId, self.readNum = ( m.group(1), m.group(2), m.group(3), ) def __str__(self): if self.dialect == "sra": h0 = self.readId if self.readNum: h0 += "/{0}".format(self.readNum) h1elems = [ self.instrument, self.laneNum, self.tileNum, self.xPos, self.yPos, ] if self.runId and self.flowcellId: h1elems[1:1] = [self.runId, self.flowcellId] h1 = ":".join(str(x) for x in h1elems) h2 = "length={0}".format(self.readLen) return "@{0} {1} {2}".format(h0, h1, h2) elif self.dialect == ">=1.8": yPos = ( "{0}/{1}".format(self.yPos, self.readNum) if self.paired else self.yPos ) h0 = ":".join( str(x) for x in ( self.instrument, self.runId, self.flowcellId, self.laneNum, self.tileNum, self.xPos, yPos, ) ) h1 = ":".join( str(x) for x in (self.readNum, self.isFiltered, self.controlNum, self.barcode) ) return "@{0} {1}".format(h0, h1) else: yPos = ( "{0}#{1}/{2}".format(self.yPos, self.multiplexId, self.readNum) if self.paired else self.yPos ) h0 = ":".join( str(x) for x in (self.instrument, self.laneNum, self.tileNum, self.xPos, yPos) ) return "@{0}".format(h0) def format_header(self, dialect=None, tag=None): if dialect: if self.dialect == dialect: logging.error("Input and output dialect are the same") elif dialect not in allowed_dialect_conversions[self.dialect]: logging.error( "Error: Cannot convert from `{0}` to `{1}` dialect".format( self.dialect, dialect ) ) logging.error( "Allowed conversions: {0}".format( json.dumps(allowed_dialect_conversions, indent=4) ) ) sys.exit() else: self.dialect = dialect if tag: readNum = tag.split("/")[1] self.readNum = readNum self.paired = True return str(self) def pairspf(pp, commonprefix=True): if commonprefix: pf = op.commonprefix(pp).rstrip("._-") else: pf = min(pp) pf = op.basename(pf) if not pf.strip(): pf = op.basename(pp[0]) return pf def iter_fastq(filename, offset=0, key=None): if isinstance(filename, str): logging.debug("Read file `{0}`".format(filename)) fh = must_open(filename) else: fh = filename while True: rec = FastqRecord(fh, offset=offset, key=key) if not rec.name: break yield rec yield None def main(): actions = ( ("size", "total base pairs in the fastq files"), ("shuffle", "shuffle paired reads into the same file interleaved"), ("split", "split paired reads into two files"), ("splitread", "split appended reads (from JGI)"), ("catread", "cat pairs together (reverse of splitread)"), ("pairinplace", "collect pairs by checking adjacent ids"), ("convert", "convert between illumina and sanger offset"), ("first", "get first N reads from file"), ("filter", "filter to get high qv reads"), ("suffix", "filter reads based on suffix"), ("trim", "trim reads using fastx_trimmer"), ("some", "select a subset of fastq reads"), ("guessoffset", "guess the quality offset of the fastq records"), ("readlen", "calculate read length"), ( "format", "format fastq file, convert header from casava 1.8+ to older format", ), ("fasta", "convert fastq to fasta and qual file"), ("fromsra", "convert sra to fastq using `fastq-dump`"), ("uniq", "retain only first instance of duplicate (by name) reads"), ) p = ActionDispatcher(actions) p.dispatch(globals()) def uniq(args): p = OptionParser(uniq.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) (fastqfile,) = args fw = must_open(opts.outfile, "w") nduplicates = nreads = 0 seen = set() for rec in iter_fastq(fastqfile): nreads += 1 if rec is None: break name = rec.name if name in seen: nduplicates += 1 continue seen.add(name) print(rec, file=fw) logging.debug("Removed duplicate reads: {}".format(percentage(nduplicates, nreads))) def suffix(args): p = OptionParser(suffix.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastqfile, sf = args fw = must_open(opts.outfile, "w") nreads = nselected = 0 for rec in iter_fastq(fastqfile): nreads += 1 if rec is None: break if rec.seq.endswith(sf): print(rec, file=fw) nselected += 1 logging.debug( "Selected reads with suffix {0}: {1}".format(sf, percentage(nselected, nreads)) ) def calc_readlen(f, first): from jcvi.utils.cbook import SummaryStats L = [] ai = iter_fastq(f) rec = next(ai) while rec: L.append(rec.length) if len(L) > first: break rec = next(ai) s = SummaryStats(L) return s def is_fastq(f): fq = f.replace(".gz", "") if f.endswith(".gz") else f return fq.endswith((".fastq", ".fq")) def readlen(args): p = OptionParser(readlen.__doc__) p.set_firstN() p.add_option( "--silent", default=False, action="store_true", help="Do not print read length stats", ) p.add_option( "--nocheck", default=False, action="store_true", help="Do not check file type suffix", ) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) (f,) = args if (not opts.nocheck) and (not is_fastq(f)): logging.debug("File `{}` does not endswith .fastq or .fq".format(f)) return 0 s = calc_readlen(f, opts.firstN) if not opts.silent: print("\t".join(str(x) for x in (f, s.min, s.max, s.mean, s.median))) return int(s.max) def fasta(args): p = OptionParser(fasta.__doc__) p.add_option( "--seqtk", default=False, action="store_true", help="Use seqtk to convert" ) p.set_outdir() p.set_outfile(outfile=None) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) fastqfiles = args outdir = opts.outdir if outdir and outdir != ".": mkdir(outdir) fastqfile = fastqfiles[0] pf = op.basename(fastqfile) gzinput = pf.endswith(".gz") if gzinput: pf = pf.rsplit(".", 1)[0] pf, sf = pf.rsplit(".", 1) if sf not in ("fq", "fastq"): logging.debug("Assumed FASTA: suffix not `fq` or `fastq`") return fastqfile, None fastafile, qualfile = pf + ".fasta", pf + ".qual" outfile = opts.outfile or fastafile outfile = op.join(outdir, outfile) if opts.seqtk: if need_update(fastqfiles, outfile): for i, fastqfile in enumerate(fastqfiles): cmd = "seqtk seq -A {0} -L 30 -l 70".format(fastqfile) sh(cmd, outfile=outfile, append=i) else: logging.debug("Outfile `{0}` already exists.".format(outfile)) return outfile, None for fastqfile in fastqfiles: SeqIO.convert(fastqfile, "fastq", fastafile, "fasta") SeqIO.convert(fastqfile, "fastq", qualfile, "qual") return fastafile, qualfile def first(args): from jcvi.apps.base import need_update p = OptionParser(first.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) N = int(args[0]) nlines = N * 4 fastqfiles = args[1:] fastqfile = fastqfiles[0] outfile = opts.outfile if not need_update(fastqfiles, outfile): logging.debug("File `{0}` exists. Will not overwrite.".format(outfile)) return gz = fastqfile.endswith(".gz") for fastqfile in fastqfiles: if gz: cmd = "zcat {0} | head -n {1}".format(fastqfile, nlines) else: cmd = "head -n {0} {1}".format(nlines, fastqfile) sh(cmd, outfile=opts.outfile, append=True) def FastqPairedIterator(read1, read2): if read1 == read2: p1fp = p2fp = must_open(read1) else: p1fp = must_open(read1) p2fp = must_open(read2) return p1fp, p2fp def isHighQv(qs, qvchar, pct=90): cutoff = len(qs) * pct / 100 highs = sum(1 for x in qs if x >= qvchar) return highs >= cutoff def filter(args): p = OptionParser(filter.__doc__) p.add_option( "-q", dest="qv", default=20, type="int", help="Minimum quality score to keep", ) p.add_option( "-p", dest="pct", default=95, type="int", help="Minimum percent of bases that have [-q] quality", ) opts, args = p.parse_args(args) if len(args) not in (1, 2): sys.exit(not p.print_help()) if len(args) == 1: r1 = r2 = args[0] else: r1, r2 = args qv = opts.qv pct = opts.pct offset = guessoffset([r1]) qvchar = chr(offset + qv) logging.debug("Call base qv >= {0} as good.".format(qvchar)) outfile = r1.rsplit(".", 1)[0] + ".q{0}.paired.fastq".format(qv) fw = open(outfile, "w") p1fp, p2fp = FastqPairedIterator(r1, r2) while True: a = list(islice(p1fp, 4)) if not a: break b = list(islice(p2fp, 4)) q1 = a[-1].rstrip() q2 = b[-1].rstrip() if isHighQv(q1, qvchar, pct=pct) and isHighQv(q2, qvchar, pct=pct): fw.writelines(a) fw.writelines(b) def checkShuffleSizes(p1, p2, pairsfastq, extra=0): from jcvi.apps.base import getfilesize pairssize = getfilesize(pairsfastq) p1size = getfilesize(p1) p2size = getfilesize(p2) assert ( pairssize == p1size + p2size + extra ), "The sizes do not add up: {0} + {1} + {2} != {3}".format( p1size, p2size, extra, pairssize ) def shuffle(args): p = OptionParser(shuffle.__doc__) p.set_tag() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) p1, p2 = args pairsfastq = pairspf((p1, p2)) + ".fastq" tag = opts.tag p1fp = must_open(p1) p2fp = must_open(p2) pairsfw = must_open(pairsfastq, "w") nreads = 0 while True: a = list(islice(p1fp, 4)) if not a: break b = list(islice(p2fp, 4)) if tag: name = a[0].rstrip() a[0] = name + "/1\n" b[0] = name + "/2\n" pairsfw.writelines(a) pairsfw.writelines(b) nreads += 2 pairsfw.close() extra = nreads * 2 if tag else 0 checkShuffleSizes(p1, p2, pairsfastq, extra=extra) logging.debug( "File `{0}` verified after writing {1} reads.".format(pairsfastq, nreads) ) return pairsfastq def split(args): from jcvi.apps.grid import Jobs p = OptionParser(split.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) (pairsfastq,) = args gz = pairsfastq.endswith(".gz") pf = pairsfastq.replace(".gz", "").rsplit(".", 1)[0] p1 = pf + ".1.fastq" p2 = pf + ".2.fastq" cmd = "zcat" if gz else "cat" p1cmd = cmd + " {0} | sed -ne '1~8{{N;N;N;p}}'".format(pairsfastq) p2cmd = cmd + " {0} | sed -ne '5~8{{N;N;N;p}}'".format(pairsfastq) if gz: p1cmd += " | gzip" p2cmd += " | gzip" p1 += ".gz" p2 += ".gz" p1cmd += " > " + p1 p2cmd += " > " + p2 args = [(p1cmd,), (p2cmd,)] m = Jobs(target=sh, args=args) m.run() checkShuffleSizes(p1, p2, pairsfastq) def guessoffset(args): p = OptionParser(guessoffset.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) (fastqfile,) = args ai = iter_fastq(fastqfile) rec = next(ai) offset = 64 while rec: quality = rec.quality lowcounts = len([x for x in quality if x < 59]) highcounts = len([x for x in quality if x > 74]) diff = highcounts - lowcounts if diff > 10: break elif diff < -10: offset = 33 break rec = next(ai) if offset == 33: print("Sanger encoding (offset=33)", file=sys.stderr) elif offset == 64: print("Illumina encoding (offset=64)", file=sys.stderr) return offset def format(args): p = OptionParser(format.__doc__) p.add_option( "--convert", default=None, choices=[">=1.8", "<1.8", "sra"], help="Convert fastq header to a different format", ) p.set_tag(specify_tag=True) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) (fastqfile,) = args ai = iter_fastq(fastqfile) rec = next(ai) dialect = None while rec: h = FastqHeader(rec.header) if not dialect: dialect = h.dialect logging.debug("Input fastq dialect: `{0}`".format(dialect)) if opts.convert: logging.debug("Output fastq dialect: `{0}`".format(opts.convert)) rec.name = h.format_header(dialect=opts.convert, tag=opts.tag) print(rec) rec = next(ai)
BSD 2-Clause Simplified License
oslandia/deeposlandia
deeposlandia/utils.py
read_config
python
def read_config(filename): with open(filename) as fobj: return json.load(fobj)
Read the JSON configuration file. Parameters ---------- filename : str Path of the configuration file Returns ------- dict Dataset glossary
https://github.com/oslandia/deeposlandia/blob/5e8324cb67e2adb5ec1fba56b26d4a92f2c8e4ba/deeposlandia/utils.py#L18-L32
import json import os import daiquiri import numpy as np import pandas as pd from PIL import Image from deeposlandia.datasets import GEOGRAPHIC_DATASETS logger = daiquiri.getLogger(__name__)
MIT License
alesgenova/pbcpy
tests/test_field.py
TestField.setUpClass
python
def setUpClass(cls): N = 8 A, B, C = 5, 10, 6 nr = np.array([A*20, B*20, C*20]) grid = make_orthorombic_cell(A=A,B=B,C=C,CellClass=DirectGrid, nr=nr, units="Angstrom") d = N/grid.volume initial_vals = np.ones(nr)*d cls.constant_field = DirectField(grid=grid, griddata_3d=initial_vals) cls.N = N
This setUp is in common for all the test cases below, and it's only execuded once
https://github.com/alesgenova/pbcpy/blob/77d6886dd58e859c8da1b23adf5992f6aacf5095/tests/test_field.py#L14-L26
import unittest import numpy as np from pbcpy.base import DirectCell, ReciprocalCell, Coord from pbcpy.grid import DirectGrid, ReciprocalGrid from pbcpy.field import DirectField, ReciprocalField from pbcpy.constants import LEN_CONV from tests.common import run_test_orthorombic, run_test_triclinic, make_orthorombic_cell, make_triclinic_cell class TestField(unittest.TestCase): @classmethod
MIT License
scidash/sciunit
sciunit/scores/base.py
Score.color
python
def color(self, value: Union[float, "Score"] = None) -> tuple: if value is None: value = self.norm_score rgb = Score.value_color(value) return rgb
Turn the score into an RGB color tuple of three 8-bit integers. Args: value (Union[float,, optional): The score that will be turned to an RGB color. Defaults to None. Returns: tuple: A tuple of three 8-bit integers that represents an RGB color.
https://github.com/scidash/sciunit/blob/68401d88b8e47d29807f8b4f9d265a23174143d9/sciunit/scores/base.py#L177-L189
import imp import logging import math import sys from copy import copy from typing import Tuple, Union import numpy as np from quantities import Quantity from sciunit.base import SciUnit, config, ipy, log from sciunit.errors import InvalidScoreError score_logger = logging.getLogger("sciunit_scores") if ipy: imp.reload(logging) sl_handler = logging.StreamHandler(sys.stdout) score_logger.addHandler(sl_handler) score_log_level = config.get("score_log_level", 1) score_logger.setLevel(score_log_level) class Score(SciUnit): def __init__( self, score: Union["Score", float, int, Quantity], related_data: dict = None ): self.check_score(score) if related_data is None: related_data = {} self.score, self.related_data = score, related_data if isinstance(score, Exception): self.__class__ = ErrorScore super(Score, self).__init__() score = None _best = None _worst = None _allowed_types = None _allowed_types_message = ( "Score of type %s is not an instance " "of one of the allowed types: %s" ) _description = "" description = "" _raw = None related_data = None test = None model = None observation_schema = None state_hide = ["related_data"] @classmethod def observation_preprocess(cls, observation: dict) -> dict: return observation @classmethod def observation_postprocess(cls, observation: dict) -> dict: return observation def check_score(self, score: "Score") -> None: if self._allowed_types and not isinstance( score, self._allowed_types + (Exception,) ): raise InvalidScoreError( self._allowed_types_message % (type(score), self._allowed_types) ) self._check_score(score) def _check_score(self, score: "Score") -> None: @classmethod def compute(cls, observation: dict, prediction: dict): return NotImplementedError("") @property def norm_score(self) -> "Score": return self.score @property def log_norm_score(self) -> float: return math.log(self.norm_score) if self.norm_score is not None else None @property def log2_norm_score(self) -> float: return math.log2(self.norm_score) if self.norm_score is not None else None @property def log10_norm_score(self) -> float: return math.log10(self.norm_score) if self.norm_score is not None else None
MIT License
aidanlister/django-facebook
django_facebook/middleware.py
FacebookMiddleware.get_fb_user_cookie
python
def get_fb_user_cookie(self, request): fb_user = facebook.get_user_from_cookie(request.COOKIES, settings.FACEBOOK_APP_ID, settings.FACEBOOK_SECRET_KEY) if fb_user: fb_user['method'] = 'cookie' return fb_user
Attempt to find a facebook user using a cookie.
https://github.com/aidanlister/django-facebook/blob/4be2048f3642a87d4a07145bc33186c30c0478b2/django_facebook/middleware.py#L72-L78
from django.conf import settings from django.contrib import auth import facebook import datetime class DjangoFacebook(object): def __init__(self, user): self.user = user self.uid = user['uid'] self.graph = facebook.GraphAPI(user['access_token']) class FacebookDebugCanvasMiddleware(object): def process_request(self, request): cp = request.POST.copy() request.POST = cp request.POST['signed_request'] = settings.FACEBOOK_DEBUG_SIGNEDREQ return None class FacebookDebugCookieMiddleware(object): def process_request(self, request): cookie_name = "fbs_" + settings.FACEBOOK_APP_ID request.COOKIES[cookie_name] = settings.FACEBOOK_DEBUG_COOKIE return None class FacebookDebugTokenMiddleware(object): def process_request(self, request): user = { 'uid':settings.FACEBOOK_DEBUG_UID, 'access_token':settings.FACEBOOK_DEBUG_TOKEN, } request.facebook = DjangoFacebook(user) return None class FacebookMiddleware(object):
MIT License
sectorlabs/django-localized-fields
localized_fields/value.py
LocalizedValue.deconstruct
python
def deconstruct(self) -> dict: path = "localized_fields.value.%s" % self.__class__.__name__ return path, [self.__dict__], {}
Deconstructs this value into a primitive type. Returns: A dictionary with all the localized values contained in this instance.
https://github.com/sectorlabs/django-localized-fields/blob/f024e4feb50a3aac2cdfd965243f48dcf1628aa5/localized_fields/value.py#L60-L69
from collections.abc import Iterable from typing import Optional import deprecation from django.conf import settings from django.utils import translation class LocalizedValue(dict): default_value = None def __init__(self, keys: dict = None): super().__init__({}) self._interpret_value(keys) def get(self, language: str = None, default: str = None) -> str: language = language or settings.LANGUAGE_CODE value = super().get(language, default) return value if value is not None else default def set(self, language: str, value: str): self[language] = value self.__dict__.update(self) return self
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/climate/mqtt.py
MqttClimate.__init__
python
def __init__(self, hass, name, topic, value_templates, qos, retain, mode_list, fan_mode_list, swing_mode_list, target_temperature, away, hold, current_fan_mode, current_swing_mode, current_operation, aux, send_if_off, payload_on, payload_off, availability_topic, payload_available, payload_not_available): super().__init__(availability_topic, qos, payload_available, payload_not_available) self.hass = hass self._name = name self._topic = topic self._value_templates = value_templates self._qos = qos self._retain = retain self._target_temperature = target_temperature self._unit_of_measurement = hass.config.units.temperature_unit self._away = away self._hold = hold self._current_temperature = None self._current_fan_mode = current_fan_mode self._current_operation = current_operation self._aux = aux self._current_swing_mode = current_swing_mode self._fan_list = fan_mode_list self._operation_list = mode_list self._swing_list = swing_mode_list self._target_temperature_step = 1 self._send_if_off = send_if_off self._payload_on = payload_on self._payload_off = payload_off
Initialize the climate device.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/climate/mqtt.py#L191-L221
import asyncio import logging import voluptuous as vol from homeassistant.core import callback import homeassistant.components.mqtt as mqtt from homeassistant.components.climate import ( STATE_HEAT, STATE_COOL, STATE_DRY, STATE_FAN_ONLY, ClimateDevice, PLATFORM_SCHEMA as CLIMATE_PLATFORM_SCHEMA, STATE_AUTO, ATTR_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE, SUPPORT_SWING_MODE, SUPPORT_FAN_MODE, SUPPORT_AWAY_MODE, SUPPORT_HOLD_MODE, SUPPORT_AUX_HEAT) from homeassistant.const import ( STATE_ON, STATE_OFF, ATTR_TEMPERATURE, CONF_NAME, CONF_VALUE_TEMPLATE) from homeassistant.components.mqtt import ( CONF_AVAILABILITY_TOPIC, CONF_QOS, CONF_RETAIN, CONF_PAYLOAD_AVAILABLE, CONF_PAYLOAD_NOT_AVAILABLE, MQTT_BASE_PLATFORM_SCHEMA, MqttAvailability) import homeassistant.helpers.config_validation as cv from homeassistant.components.fan import (SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH) _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['mqtt'] DEFAULT_NAME = 'MQTT HVAC' CONF_POWER_COMMAND_TOPIC = 'power_command_topic' CONF_POWER_STATE_TOPIC = 'power_state_topic' CONF_POWER_STATE_TEMPLATE = 'power_state_template' CONF_MODE_COMMAND_TOPIC = 'mode_command_topic' CONF_MODE_STATE_TOPIC = 'mode_state_topic' CONF_MODE_STATE_TEMPLATE = 'mode_state_template' CONF_TEMPERATURE_COMMAND_TOPIC = 'temperature_command_topic' CONF_TEMPERATURE_STATE_TOPIC = 'temperature_state_topic' CONF_TEMPERATURE_STATE_TEMPLATE = 'temperature_state_template' CONF_FAN_MODE_COMMAND_TOPIC = 'fan_mode_command_topic' CONF_FAN_MODE_STATE_TOPIC = 'fan_mode_state_topic' CONF_FAN_MODE_STATE_TEMPLATE = 'fan_mode_state_template' CONF_SWING_MODE_COMMAND_TOPIC = 'swing_mode_command_topic' CONF_SWING_MODE_STATE_TOPIC = 'swing_mode_state_topic' CONF_SWING_MODE_STATE_TEMPLATE = 'swing_mode_state_template' CONF_AWAY_MODE_COMMAND_TOPIC = 'away_mode_command_topic' CONF_AWAY_MODE_STATE_TOPIC = 'away_mode_state_topic' CONF_AWAY_MODE_STATE_TEMPLATE = 'away_mode_state_template' CONF_HOLD_COMMAND_TOPIC = 'hold_command_topic' CONF_HOLD_STATE_TOPIC = 'hold_state_topic' CONF_HOLD_STATE_TEMPLATE = 'hold_state_template' CONF_AUX_COMMAND_TOPIC = 'aux_command_topic' CONF_AUX_STATE_TOPIC = 'aux_state_topic' CONF_AUX_STATE_TEMPLATE = 'aux_state_template' CONF_CURRENT_TEMPERATURE_TEMPLATE = 'current_temperature_template' CONF_CURRENT_TEMPERATURE_TOPIC = 'current_temperature_topic' CONF_PAYLOAD_ON = 'payload_on' CONF_PAYLOAD_OFF = 'payload_off' CONF_FAN_MODE_LIST = 'fan_modes' CONF_MODE_LIST = 'modes' CONF_SWING_MODE_LIST = 'swing_modes' CONF_INITIAL = 'initial' CONF_SEND_IF_OFF = 'send_if_off' SCHEMA_BASE = CLIMATE_PLATFORM_SCHEMA.extend(MQTT_BASE_PLATFORM_SCHEMA.schema) PLATFORM_SCHEMA = SCHEMA_BASE.extend({ vol.Optional(CONF_POWER_COMMAND_TOPIC): mqtt.valid_publish_topic, vol.Optional(CONF_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic, vol.Optional(CONF_TEMPERATURE_COMMAND_TOPIC): mqtt.valid_publish_topic, vol.Optional(CONF_FAN_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic, vol.Optional(CONF_SWING_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic, vol.Optional(CONF_AWAY_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic, vol.Optional(CONF_HOLD_COMMAND_TOPIC): mqtt.valid_publish_topic, vol.Optional(CONF_AUX_COMMAND_TOPIC): mqtt.valid_publish_topic, vol.Optional(CONF_POWER_STATE_TOPIC): mqtt.valid_subscribe_topic, vol.Optional(CONF_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic, vol.Optional(CONF_TEMPERATURE_STATE_TOPIC): mqtt.valid_subscribe_topic, vol.Optional(CONF_FAN_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic, vol.Optional(CONF_SWING_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic, vol.Optional(CONF_AWAY_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic, vol.Optional(CONF_HOLD_STATE_TOPIC): mqtt.valid_subscribe_topic, vol.Optional(CONF_AUX_STATE_TOPIC): mqtt.valid_subscribe_topic, vol.Optional(CONF_VALUE_TEMPLATE): cv.template, vol.Optional(CONF_POWER_STATE_TEMPLATE): cv.template, vol.Optional(CONF_MODE_STATE_TEMPLATE): cv.template, vol.Optional(CONF_TEMPERATURE_STATE_TEMPLATE): cv.template, vol.Optional(CONF_FAN_MODE_STATE_TEMPLATE): cv.template, vol.Optional(CONF_SWING_MODE_STATE_TEMPLATE): cv.template, vol.Optional(CONF_AWAY_MODE_STATE_TEMPLATE): cv.template, vol.Optional(CONF_HOLD_STATE_TEMPLATE): cv.template, vol.Optional(CONF_AUX_STATE_TEMPLATE): cv.template, vol.Optional(CONF_CURRENT_TEMPERATURE_TEMPLATE): cv.template, vol.Optional(CONF_CURRENT_TEMPERATURE_TOPIC): mqtt.valid_subscribe_topic, vol.Optional(CONF_FAN_MODE_LIST, default=[STATE_AUTO, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]): cv.ensure_list, vol.Optional(CONF_SWING_MODE_LIST, default=[STATE_ON, STATE_OFF]): cv.ensure_list, vol.Optional(CONF_MODE_LIST, default=[STATE_AUTO, STATE_OFF, STATE_COOL, STATE_HEAT, STATE_DRY, STATE_FAN_ONLY]): cv.ensure_list, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_INITIAL, default=21): cv.positive_int, vol.Optional(CONF_SEND_IF_OFF, default=True): cv.boolean, vol.Optional(CONF_PAYLOAD_ON, default="ON"): cv.string, vol.Optional(CONF_PAYLOAD_OFF, default="OFF"): cv.string, }).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): template_keys = ( CONF_POWER_STATE_TEMPLATE, CONF_MODE_STATE_TEMPLATE, CONF_TEMPERATURE_STATE_TEMPLATE, CONF_FAN_MODE_STATE_TEMPLATE, CONF_SWING_MODE_STATE_TEMPLATE, CONF_AWAY_MODE_STATE_TEMPLATE, CONF_HOLD_STATE_TEMPLATE, CONF_AUX_STATE_TEMPLATE, CONF_CURRENT_TEMPERATURE_TEMPLATE ) value_templates = {} if CONF_VALUE_TEMPLATE in config: value_template = config.get(CONF_VALUE_TEMPLATE) value_template.hass = hass value_templates = {key: value_template for key in template_keys} for key in template_keys & config.keys(): value_templates[key] = config.get(key) value_templates[key].hass = hass async_add_devices([ MqttClimate( hass, config.get(CONF_NAME), { key: config.get(key) for key in ( CONF_POWER_COMMAND_TOPIC, CONF_MODE_COMMAND_TOPIC, CONF_TEMPERATURE_COMMAND_TOPIC, CONF_FAN_MODE_COMMAND_TOPIC, CONF_SWING_MODE_COMMAND_TOPIC, CONF_AWAY_MODE_COMMAND_TOPIC, CONF_HOLD_COMMAND_TOPIC, CONF_AUX_COMMAND_TOPIC, CONF_POWER_STATE_TOPIC, CONF_MODE_STATE_TOPIC, CONF_TEMPERATURE_STATE_TOPIC, CONF_FAN_MODE_STATE_TOPIC, CONF_SWING_MODE_STATE_TOPIC, CONF_AWAY_MODE_STATE_TOPIC, CONF_HOLD_STATE_TOPIC, CONF_AUX_STATE_TOPIC, CONF_CURRENT_TEMPERATURE_TOPIC ) }, value_templates, config.get(CONF_QOS), config.get(CONF_RETAIN), config.get(CONF_MODE_LIST), config.get(CONF_FAN_MODE_LIST), config.get(CONF_SWING_MODE_LIST), config.get(CONF_INITIAL), False, None, SPEED_LOW, STATE_OFF, STATE_OFF, False, config.get(CONF_SEND_IF_OFF), config.get(CONF_PAYLOAD_ON), config.get(CONF_PAYLOAD_OFF), config.get(CONF_AVAILABILITY_TOPIC), config.get(CONF_PAYLOAD_AVAILABLE), config.get(CONF_PAYLOAD_NOT_AVAILABLE)) ]) class MqttClimate(MqttAvailability, ClimateDevice):
MIT License
soon/codeforcesapi
codeforces/api/codeforces_api.py
CodeforcesAPI.user_rated_list
python
def user_rated_list(self, active_only=False): assert isinstance(active_only, bool) data = self._data_retriever.get_data('user.ratedList', activeOnly=active_only) return map(User, data)
Returns the list of all rated users. :param active_only: If true then only users, who participated in rated contest during the last month are returned. Otherwise, all users with at least one rated contest are returned. :type active_only: bool :return: Returns an iterator of User objects, sorted in decreasing order of rating. :rtype: iterator of User
https://github.com/soon/codeforcesapi/blob/23275464a41c6886461af94929b35ef1808a33bd/codeforces/api/codeforces_api.py#L435-L449
import hashlib import json import operator import random import time from collections import OrderedDict from enum import Enum from urllib.error import HTTPError from urllib.request import urlopen from .json_objects import Contest from .json_objects import Hack from .json_objects import Problem from .json_objects import ProblemStatistics from .json_objects import RanklistRow from .json_objects import RatingChange from .json_objects import Submission from .json_objects import User __all__ = ['CodeforcesAPI', 'CodeforcesLanguage'] class CodeforcesLanguage(Enum): en = 'en' ru = 'ru' class CodeforcesDataRetriever: def __init__(self, lang=CodeforcesLanguage.en, key=None, secret=None): self._key = None self._secret = None if key is not None and secret is not None: self.key = key self.secret = secret self._base_from_language = { CodeforcesLanguage.en: 'http://codeforces.com/api/', CodeforcesLanguage.ru: 'http://codeforces.ru/api/' } self._language = lang def get_data(self, method, **kwargs): return self.__get_data(self.__generate_url(method, **kwargs)) def __get_data(self, url): try: with urlopen(url) as req: return self.__check_json(req.read().decode('utf-8')) except HTTPError as http_e: try: return self.__check_json(http_e.read().decode('utf-8')) except Exception as e: raise e from http_e def __generate_url(self, method, **kwargs): url = self.base + method if self.key is not None and self.secret is not None: kwargs['apiKey'] = self.key kwargs['time'] = int(time.time()) if kwargs: args = self.__get_valid_args(**kwargs) url += '?' + '&'.join(map(self.__key_value_to_http_parameter, args.items())) if self.key is not None and self.secret is not None: url += '&apiSig=' + self.__generate_api_sig(method, args) return url def __generate_api_sig(self, method, params): rand = str(random.randint(100000, 999999)) s = '{}/{}?'.format(rand, method) ordered_params = OrderedDict(sorted(params.items(), key=operator.itemgetter(0))) s += '&'.join(map(self.__key_value_to_http_parameter, ordered_params.items())) s += '#' + self.secret return rand + hashlib.sha512(s.encode()).hexdigest() @staticmethod def __get_valid_args(**kwargs): return {k: v for k, v in kwargs.items() if v is not None} @staticmethod def __key_value_to_http_parameter(key_value): key, value = key_value if isinstance(value, list): value = ';'.join(sorted(map(str, value))) else: value = str(value) return '{0}={1}'.format(key, value) @staticmethod def __check_json(answer): values = json.loads(answer) try: if values['status'] == 'OK': return values['result'] else: raise ValueError(values['comment']) except KeyError as e: raise ValueError('Missed required field', e.args[0]) @property def base(self): return self._base_from_language[self.language] @property def language(self): return self._language @language.setter def language(self, value): assert isinstance(value, (CodeforcesLanguage, str)) self._language = CodeforcesLanguage(value) @property def key(self): return self._key @key.setter def key(self, value): assert isinstance(value, str) or value is None self._key = value @property def secret(self): return self._secret @secret.setter def secret(self, value): assert isinstance(value, str) or value is None self._secret = value class CodeforcesAPI: def __init__(self, lang='en', key=None, secret=None): self._data_retriever = CodeforcesDataRetriever(CodeforcesLanguage(lang), key, secret) def contest_hacks(self, contest_id): assert isinstance(contest_id, int) data = self._data_retriever.get_data('contest.hacks', contestId=contest_id) return map(Hack, data) def contest_list(self, gym=False): data = self._data_retriever.get_data('contest.list', gym=gym) return map(Contest, data) def contest_rating_changes(self, contest_id): data = self._data_retriever.get_data('contest.ratingChanges', contestId=contest_id) return map(RatingChange, data) def contest_standings(self, contest_id, from_=1, count=None, handles=None, show_unofficial=False): assert isinstance(contest_id, int), 'contest_id should be of type int, not {}'.format(type(contest_id)) assert isinstance(from_, int), 'from_ should be of type int, not {}'.format(type(from_)) assert isinstance(count, int) or count is None, 'count should be of type int, not {}'.format(type(count)) assert isinstance(handles, list) or handles is None, 'handles should be of type list of str, not {}'.format(type(handles)) assert handles is None or len(handles) <= 10000, 'No more than 10000 handles is accepted' assert isinstance(show_unofficial, bool), 'show_unofficial should be of type bool, not {}'.format(type(show_unofficial)) data = self._data_retriever.get_data('contest.standings', contestId=contest_id, count=count, handles=handles, showUnofficial=show_unofficial, **{'from': from_}) return {'contest': Contest(data['contest']), 'problems': map(Problem, data['problems']), 'rows': map(RanklistRow, data['rows'])} def contest_status(self, contest_id, handle=None, from_=1, count=None): assert isinstance(contest_id, int) assert isinstance(handle, str) or handle is None assert isinstance(from_, int) assert isinstance(count, int) or count is None data = self._data_retriever.get_data('contest.status', contestId=contest_id, handle=handle, count=count, **{'from': from_}) return map(Submission, data) def problemset_problems(self, tags=None): data = self._data_retriever.get_data('problemset.problems', tags=tags) return {'problems': map(Problem, data['problems']), 'problemStatistics': map(ProblemStatistics, data['problemStatistics'])} def problemset_recent_status(self, count): assert isinstance(count, int) assert 0 < count <= 1000 data = self._data_retriever.get_data('problemset.recentStatus', count=count) return map(Submission, data) def user_info(self, handles): assert isinstance(handles, list) data = self._data_retriever.get_data('user.info', handles=handles) return map(User, data)
MIT License
pypa/pip
src/pip/_internal/cache.py
_hash_dict
python
def _hash_dict(d: Dict[str, str]) -> str: s = json.dumps(d, sort_keys=True, separators=(",", ":"), ensure_ascii=True) return hashlib.sha224(s.encode("ascii")).hexdigest()
Return a stable sha224 of a dictionary.
https://github.com/pypa/pip/blob/0442875a68f19b0118b0b88c747bdaf6b24853ba/src/pip/_internal/cache.py#L23-L26
import hashlib import json import logging import os from typing import Any, Dict, List, Optional, Set from pip._vendor.packaging.tags import Tag, interpreter_name, interpreter_version from pip._vendor.packaging.utils import canonicalize_name from pip._internal.exceptions import InvalidWheelFilename from pip._internal.models.format_control import FormatControl from pip._internal.models.link import Link from pip._internal.models.wheel import Wheel from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds from pip._internal.utils.urls import path_to_url logger = logging.getLogger(__name__)
MIT License
linjianma/autohoot
autohoot/source.py
SourceToSource._assign_grad_variable
python
def _assign_grad_variable(self, node): forward_node = self.grad_to_forward_map[node] ret_str = f'_grad{forward_node.name} = {node.s2s_expr(node.inputs)}' node.name = f'_grad{forward_node.name}' return ret_str
Assign a gradient variable. e.g. _grad_a = T.dot(_grad_b, _g)
https://github.com/linjianma/autohoot/blob/5f9b790afb15cf4ae97cd0929a1db65d7c0347b3/autohoot/source.py#L89-L96
from autohoot import autodiff as ad from autohoot.utils import find_topo_sort, inner_product def invert_dict(d): return dict((v, k) for k, v in d.items()) INDENT = " " def indent_line(line): return f'{INDENT}{line}\n' def new_line(line): return f'{line}\n' class SourceToSource(): def __init__(self): self.mid_name = '_a' self.input_index = 0 self.forward_to_grad_map = {} self.grad_to_forward_map = {} self.forward_to_hvp_map = {} self.hvp_to_forward_map = {} def _assign_next_midname(self): if self.mid_name[-1] < 'z': self.mid_name = self.mid_name[:-1] + chr(ord(self.mid_name[-1]) + 1) else: self.mid_name = self.mid_name + 'a' def _assign_mid_variable(self, node): ret_str = f'{self.mid_name} = {node.s2s_expr(node.inputs)}' node.name = f'{self.mid_name}' self._assign_next_midname() return ret_str def _assign_init_variable(self, node): ret_str = f'{node.name} = inputs[{self.input_index}]' self.input_index += 1 return ret_str
Apache License 2.0
unofficial-memsource/memsource-cli-client
memsource_cli/models/file_import_settings_create_dto.py
FileImportSettingsCreateDto.mif
python
def mif(self, mif): self._mif = mif
Sets the mif of this FileImportSettingsCreateDto. :param mif: The mif of this FileImportSettingsCreateDto. # noqa: E501 :type: MifSettingsDto
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/file_import_settings_create_dto.py#L723-L731
import pprint import re import six from memsource_cli.models.android_settings_dto import AndroidSettingsDto from memsource_cli.models.csv_settings_dto import CsvSettingsDto from memsource_cli.models.dita_settings_dto import DitaSettingsDto from memsource_cli.models.doc_book_settings_dto import DocBookSettingsDto from memsource_cli.models.doc_settings_dto import DocSettingsDto from memsource_cli.models.html_settings_dto import HtmlSettingsDto from memsource_cli.models.idml_settings_dto import IdmlSettingsDto from memsource_cli.models.json_settings_dto import JsonSettingsDto from memsource_cli.models.mac_settings_dto import MacSettingsDto from memsource_cli.models.md_settings_dto import MdSettingsDto from memsource_cli.models.mif_settings_dto import MifSettingsDto from memsource_cli.models.multilingual_xls_settings_dto import MultilingualXlsSettingsDto from memsource_cli.models.multilingual_xml_settings_dto import MultilingualXmlSettingsDto from memsource_cli.models.pdf_settings_dto import PdfSettingsDto from memsource_cli.models.php_settings_dto import PhpSettingsDto from memsource_cli.models.po_settings_dto import PoSettingsDto from memsource_cli.models.ppt_settings_dto import PptSettingsDto from memsource_cli.models.properties_settings_dto import PropertiesSettingsDto from memsource_cli.models.psd_settings_dto import PsdSettingsDto from memsource_cli.models.quark_tag_settings_dto import QuarkTagSettingsDto from memsource_cli.models.resx_settings_dto import ResxSettingsDto from memsource_cli.models.sdl_xlf_settings_dto import SdlXlfSettingsDto from memsource_cli.models.tm_match_settings_dto import TMMatchSettingsDto from memsource_cli.models.ttx_settings_dto import TtxSettingsDto from memsource_cli.models.txt_settings_dto import TxtSettingsDto from memsource_cli.models.xlf2_settings_dto import Xlf2SettingsDto from memsource_cli.models.xlf_settings_dto import XlfSettingsDto from memsource_cli.models.xls_settings_dto import XlsSettingsDto from memsource_cli.models.xml_settings_dto import XmlSettingsDto from memsource_cli.models.yaml_settings_dto import YamlSettingsDto class FileImportSettingsCreateDto(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'input_charset': 'str', 'output_charset': 'str', 'zip_charset': 'str', 'file_format': 'str', 'target_length': 'bool', 'target_length_max': 'int', 'target_length_percent': 'bool', 'target_length_percent_value': 'float', 'segmentation_rule_id': 'int', 'target_segmentation_rule_id': 'int', 'android': 'AndroidSettingsDto', 'csv': 'CsvSettingsDto', 'dita': 'DitaSettingsDto', 'doc_book': 'DocBookSettingsDto', 'doc': 'DocSettingsDto', 'html': 'HtmlSettingsDto', 'idml': 'IdmlSettingsDto', 'json': 'JsonSettingsDto', 'mac': 'MacSettingsDto', 'md': 'MdSettingsDto', 'mif': 'MifSettingsDto', 'multilingual_xls': 'MultilingualXlsSettingsDto', 'multilingual_xml': 'MultilingualXmlSettingsDto', 'pdf': 'PdfSettingsDto', 'php': 'PhpSettingsDto', 'po': 'PoSettingsDto', 'ppt': 'PptSettingsDto', 'properties': 'PropertiesSettingsDto', 'psd': 'PsdSettingsDto', 'quark_tag': 'QuarkTagSettingsDto', 'resx': 'ResxSettingsDto', 'sdl_xlf': 'SdlXlfSettingsDto', 'tm_match': 'TMMatchSettingsDto', 'ttx': 'TtxSettingsDto', 'txt': 'TxtSettingsDto', 'xlf2': 'Xlf2SettingsDto', 'xlf': 'XlfSettingsDto', 'xls': 'XlsSettingsDto', 'xml': 'XmlSettingsDto', 'yaml': 'YamlSettingsDto' } attribute_map = { 'input_charset': 'inputCharset', 'output_charset': 'outputCharset', 'zip_charset': 'zipCharset', 'file_format': 'fileFormat', 'target_length': 'targetLength', 'target_length_max': 'targetLengthMax', 'target_length_percent': 'targetLengthPercent', 'target_length_percent_value': 'targetLengthPercentValue', 'segmentation_rule_id': 'segmentationRuleId', 'target_segmentation_rule_id': 'targetSegmentationRuleId', 'android': 'android', 'csv': 'csv', 'dita': 'dita', 'doc_book': 'docBook', 'doc': 'doc', 'html': 'html', 'idml': 'idml', 'json': 'json', 'mac': 'mac', 'md': 'md', 'mif': 'mif', 'multilingual_xls': 'multilingualXls', 'multilingual_xml': 'multilingualXml', 'pdf': 'pdf', 'php': 'php', 'po': 'po', 'ppt': 'ppt', 'properties': 'properties', 'psd': 'psd', 'quark_tag': 'quarkTag', 'resx': 'resx', 'sdl_xlf': 'sdlXlf', 'tm_match': 'tmMatch', 'ttx': 'ttx', 'txt': 'txt', 'xlf2': 'xlf2', 'xlf': 'xlf', 'xls': 'xls', 'xml': 'xml', 'yaml': 'yaml' } def __init__(self, input_charset=None, output_charset=None, zip_charset=None, file_format=None, target_length=None, target_length_max=None, target_length_percent=None, target_length_percent_value=None, segmentation_rule_id=None, target_segmentation_rule_id=None, android=None, csv=None, dita=None, doc_book=None, doc=None, html=None, idml=None, json=None, mac=None, md=None, mif=None, multilingual_xls=None, multilingual_xml=None, pdf=None, php=None, po=None, ppt=None, properties=None, psd=None, quark_tag=None, resx=None, sdl_xlf=None, tm_match=None, ttx=None, txt=None, xlf2=None, xlf=None, xls=None, xml=None, yaml=None): self._input_charset = None self._output_charset = None self._zip_charset = None self._file_format = None self._target_length = None self._target_length_max = None self._target_length_percent = None self._target_length_percent_value = None self._segmentation_rule_id = None self._target_segmentation_rule_id = None self._android = None self._csv = None self._dita = None self._doc_book = None self._doc = None self._html = None self._idml = None self._json = None self._mac = None self._md = None self._mif = None self._multilingual_xls = None self._multilingual_xml = None self._pdf = None self._php = None self._po = None self._ppt = None self._properties = None self._psd = None self._quark_tag = None self._resx = None self._sdl_xlf = None self._tm_match = None self._ttx = None self._txt = None self._xlf2 = None self._xlf = None self._xls = None self._xml = None self._yaml = None self.discriminator = None if input_charset is not None: self.input_charset = input_charset if output_charset is not None: self.output_charset = output_charset if zip_charset is not None: self.zip_charset = zip_charset if file_format is not None: self.file_format = file_format if target_length is not None: self.target_length = target_length if target_length_max is not None: self.target_length_max = target_length_max if target_length_percent is not None: self.target_length_percent = target_length_percent if target_length_percent_value is not None: self.target_length_percent_value = target_length_percent_value if segmentation_rule_id is not None: self.segmentation_rule_id = segmentation_rule_id if target_segmentation_rule_id is not None: self.target_segmentation_rule_id = target_segmentation_rule_id if android is not None: self.android = android if csv is not None: self.csv = csv if dita is not None: self.dita = dita if doc_book is not None: self.doc_book = doc_book if doc is not None: self.doc = doc if html is not None: self.html = html if idml is not None: self.idml = idml if json is not None: self.json = json if mac is not None: self.mac = mac if md is not None: self.md = md if mif is not None: self.mif = mif if multilingual_xls is not None: self.multilingual_xls = multilingual_xls if multilingual_xml is not None: self.multilingual_xml = multilingual_xml if pdf is not None: self.pdf = pdf if php is not None: self.php = php if po is not None: self.po = po if ppt is not None: self.ppt = ppt if properties is not None: self.properties = properties if psd is not None: self.psd = psd if quark_tag is not None: self.quark_tag = quark_tag if resx is not None: self.resx = resx if sdl_xlf is not None: self.sdl_xlf = sdl_xlf if tm_match is not None: self.tm_match = tm_match if ttx is not None: self.ttx = ttx if txt is not None: self.txt = txt if xlf2 is not None: self.xlf2 = xlf2 if xlf is not None: self.xlf = xlf if xls is not None: self.xls = xls if xml is not None: self.xml = xml if yaml is not None: self.yaml = yaml @property def input_charset(self): return self._input_charset @input_charset.setter def input_charset(self, input_charset): self._input_charset = input_charset @property def output_charset(self): return self._output_charset @output_charset.setter def output_charset(self, output_charset): self._output_charset = output_charset @property def zip_charset(self): return self._zip_charset @zip_charset.setter def zip_charset(self, zip_charset): self._zip_charset = zip_charset @property def file_format(self): return self._file_format @file_format.setter def file_format(self, file_format): allowed_values = ["doc", "ppt", "xls", "xlf", "xlf2", "sdlxliff", "ttx", "html", "xml", "mif", "tmx", "idml", "dita", "json", "po", "ts", "icml", "yaml", "properties", "csv", "android_string", "desktop_entry", "mac_strings", "pdf", "windows_rc", "xml_properties", "joomla_ini", "magento_csv", "dtd", "mozilla_properties", "plist", "plain_text", "srt", "sub", "sbv", "wiki", "resx", "resjson", "chrome_json", "epub", "svg", "docbook", "wpxliff", "multiling_xml", "multiling_xls", "mqxliff", "php", "psd", "tag", "md", "vtt"] if file_format not in allowed_values: raise ValueError( "Invalid value for `file_format` ({0}), must be one of {1}" .format(file_format, allowed_values) ) self._file_format = file_format @property def target_length(self): return self._target_length @target_length.setter def target_length(self, target_length): self._target_length = target_length @property def target_length_max(self): return self._target_length_max @target_length_max.setter def target_length_max(self, target_length_max): self._target_length_max = target_length_max @property def target_length_percent(self): return self._target_length_percent @target_length_percent.setter def target_length_percent(self, target_length_percent): self._target_length_percent = target_length_percent @property def target_length_percent_value(self): return self._target_length_percent_value @target_length_percent_value.setter def target_length_percent_value(self, target_length_percent_value): self._target_length_percent_value = target_length_percent_value @property def segmentation_rule_id(self): return self._segmentation_rule_id @segmentation_rule_id.setter def segmentation_rule_id(self, segmentation_rule_id): self._segmentation_rule_id = segmentation_rule_id @property def target_segmentation_rule_id(self): return self._target_segmentation_rule_id @target_segmentation_rule_id.setter def target_segmentation_rule_id(self, target_segmentation_rule_id): self._target_segmentation_rule_id = target_segmentation_rule_id @property def android(self): return self._android @android.setter def android(self, android): self._android = android @property def csv(self): return self._csv @csv.setter def csv(self, csv): self._csv = csv @property def dita(self): return self._dita @dita.setter def dita(self, dita): self._dita = dita @property def doc_book(self): return self._doc_book @doc_book.setter def doc_book(self, doc_book): self._doc_book = doc_book @property def doc(self): return self._doc @doc.setter def doc(self, doc): self._doc = doc @property def html(self): return self._html @html.setter def html(self, html): self._html = html @property def idml(self): return self._idml @idml.setter def idml(self, idml): self._idml = idml @property def json(self): return self._json @json.setter def json(self, json): self._json = json @property def mac(self): return self._mac @mac.setter def mac(self, mac): self._mac = mac @property def md(self): return self._md @md.setter def md(self, md): self._md = md @property def mif(self): return self._mif @mif.setter
Apache License 2.0
maier/cadvisor-collectd
src/cadvisor/python/cadvisor.py
CAdvisor.dispatch_metric
python
def dispatch_metric(self, container_name, container_id, plugin, plugin_instance, metric_type, type_instance, metric_value): pass
send metrics to the target output intended to be overridden - e.g. by an abstraction to collectd's Values.dispatch()
https://github.com/maier/cadvisor-collectd/blob/7cc8a77161d8d1e413ee548c3e19b6c737c282c9/src/cadvisor/python/cadvisor.py#L134-L139
from __future__ import print_function import sys from abc import ABCMeta, abstractmethod import json import yaml import urllib2 import socket import docker import re class CAdvisor(object): __metaclass__ = ABCMeta def __init__(self, config): super(CAdvisor, self).__init__() self.name = self.__class__.__name__ self.doc_url = 'https://github.com/maier/cadvisor-collectd/wiki/Configuring-CAdvisor' self.config_host = config.get('host', 'cadvisor/docker') self.config_port = config.get('port', 8080) self.config_file = config.get('config_file', '/etc/collectd/cadvisor.yaml') self.host = None self.port = None self.config = {} try: f = open(self.config_file, 'r') self.config = yaml.load(f) except Exception, e: self.log_error('Unable to load configuration "{}": {}'.format(self.config_file, e)) sys.exit(1) self.docker_socket = self.config.get('docker_socket', '/var/run/docker.sock') self.active_metrics = self.get_active_metrics() self.system_enabled = self.config.get('system_enabled', False) self.system_fs_metrics = self.config.get('system_fs_metrics', False) self.system_services = self.config.get('system_services', { 'options': { 'include_mounts': False, 'include_sockets': False, 'include_docker_scopes': False, 'include_system_slice': False, 'include_user_slice': False, 'include_other_slices': False }, 'include': [], 'exclude': ['*'] }) self.service_filter = None if type(self.system_services['include']).__name__ != 'list': self.system_services['include'] = [] if type(self.system_services['exclude']).__name__ != 'list': self.system_services['exclude'] = [] if not self.system_services['include'] and not self.system_services['exclude']: self.service_filter = 'all' elif '*' in self.system_services['exclude'] and '*' not in self.system_services['include']: self.service_filter = 'include' elif '*' in self.system_services['include'] and '*' not in self.system_services['exclude']: self.service_filter = 'exclude' elif'*' in self.system_services['include'] and '*' in self.system_services['exclude']: self.log_error('Conflicting service filter configuration, cannot be include and exclude simultaneously. See documentation: {}'.format(self.doc_url)) sys.exit(1) else: self.log_error('No service filter configuration identified. See documentation: {}'.format(self.doc_url)) sys.exit(1) self.docker_enabled = self.config.get('docker_enabled', True) self.docker_container_config = self.config.get('docker_containers', []) if type(self.docker_container_config).__name__ != 'list': self.docker_container_config = [] self.host_namespec = self.config.get('ns_host', '{hn}') self.plugin_namespec = self.config.get('ns_plugin', '{cn}.') def log(self, message, level='INFO'): msg = '{level} -- {msg}'.format(level=level, msg=message) if level == 'ERR': print(msg, file=sys.stderr) else: print(msg) @abstractmethod def log_error(self, message): self.log('{name}: {msg}'.format(name=self.name, msg=message), 'ERR') @abstractmethod def log_warning(self, message): self.log('{name}: {msg}'.format(name=self.name, msg=message), 'ERR') @abstractmethod def log_notice(self, message): self.log('{name}: {msg}'.format(name=self.name, msg=message)) @abstractmethod def log_info(self, message): self.log('{name}: {msg}'.format(name=self.name, msg=message)) @abstractmethod def log_debug(self, message): self.log('{name}: {msg}'.format(name=self.name, msg=message)) @abstractmethod
Apache License 2.0
leonjza/hogar
hogar/Plugins/sample.py
commands
python
def commands (): return ['command1', 'trigger2']
Commands In the case of text plugins, returns the commands that this plugin should trigger for. For other message types, a empty list should be returned. If your plugin applies to any command (in the case of text messages), simply supply the a wildcard in the list ie. ['*'] -- @return list
https://github.com/leonjza/hogar/blob/a8cf4b6a6b508e5e86d26dd5cbd55add560d26a5/hogar/Plugins/sample.py#L52-L68
def enabled (): return True def applicable_types (): return ['text', 'audio', 'document', 'photo', 'sticker', 'video', 'contact', 'location']
MIT License
masoniteframework/core
masonite/drivers/cache/CacheRedisDriver.py
CacheRedisDriver.delete
python
def delete(self, key): self.connection.delete('{0}_cache_{1}'.format(self.app_name, key))
Delete file cache.
https://github.com/masoniteframework/core/blob/235ee98a20f7359b0201aa4d2b2e7cf7b6c36f8b/masonite/drivers/cache/CacheRedisDriver.py#L93-L95
import os from masonite.contracts import CacheContract from masonite.drivers import BaseCacheDriver from masonite.exceptions import DriverLibraryNotFound class CacheRedisDriver(CacheContract, BaseCacheDriver): def __init__(self): from config import application, cache self.appconfig = application self.cache_forever = None self.app_name = os.getenv('APP_NAME', 'masonite') config = cache.DRIVERS['redis'] try: import redis self.redis = redis except ImportError: raise DriverLibraryNotFound( "Could not find the 'redis' library. Run pip install redis to fix this.") self.connection = redis.StrictRedis( host=config['host'], port=config['port'], password=config['password'], decode_responses=True) def store(self, key, value): self.cache_forever = True self.connection.set('{0}_cache_{1}'.format(self.app_name, key), value) return key def store_for(self, key, value, cache_time, cache_type): self.cache_forever = False cache_for_time = self.calculate_time(cache_type, cache_time) self.connection.set('{0}_cache_{1}'.format(self.app_name, key), value, ex=cache_for_time) return key def get(self, key): return self.connection.get('{0}_cache_{1}'.format(self.app_name, key))
MIT License
openstack/senlin
senlin/common/utils.py
format_node_name
python
def format_node_name(fmt, cluster, index): if not fmt: fmt = "node-$8R" result = "" last = 0 pattern = re.compile("(\$\d{0,8}[rRI])") for m in pattern.finditer(fmt): group = m.group() t = group[-1] width = group[1:-1] if t == "R" or t == "r": if width != "": sub = random_name(int(width)) else: sub = random_name(8) if t == "r": sub = sub.lower() elif t == "I": if width != "": str_index = str(index) sub = str_index.zfill(int(width)) else: sub = str(index) result += fmt[last:m.start()] + sub last = m.end() result += fmt[last:] return result
Generates a node name using the given format. :param fmt: A string containing format directives. Currently we only support the following keys: - "$nR": a random string with at most 'n' characters where 'n' defaults to 8. - "$nI": a string representation of the node index where 'n' instructs the number of digits generated with 0s padded to the left. :param cluster: The DB object for the cluster to which the node belongs. This parameter is provided for future extension. :param index: The index for the node in the target cluster. :returns: A string containing the generated node name.
https://github.com/openstack/senlin/blob/390779ca1e08f819683e79993696f945f1c0393e/senlin/common/utils.py#L146-L189
import random import re import string from jsonpath_rw import parse from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import timeutils import requests import urllib from senlin.common import consts from senlin.common import exception from senlin.common.i18n import _ from senlin.objects import service as service_obj cfg.CONF.import_opt('max_response_size', 'senlin.conf') cfg.CONF.import_opt('periodic_interval', 'senlin.conf') LOG = logging.getLogger(__name__) _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' class URLFetchError(exception.Error, IOError): pass def get_positive_int(v): if strutils.is_int_like(v): count = int(v) if count > 0: return True, count return False, 0 def parse_level_values(values): if not isinstance(values, list): values = [values] result = [] for v in values: if v in consts.EVENT_LEVELS: result.append(consts.EVENT_LEVELS[v]) elif isinstance(v, int): result.append(v) if result == []: return None return result def level_from_number(value): n = int(value) levels = {value: key for key, value in consts.EVENT_LEVELS.items()} return levels.get(n, None) def url_fetch(url, timeout=1, allowed_schemes=('http', 'https'), verify=True): components = urllib.parse.urlparse(url) if components.scheme not in allowed_schemes: raise URLFetchError(_('Invalid URL scheme %s') % components.scheme) if components.scheme == 'file': try: return urllib.request.urlopen(url, timeout=timeout).read() except urllib.error.URLError as uex: raise URLFetchError(_('Failed to retrieve data: %s') % uex) try: resp = requests.get(url, stream=True, verify=verify, timeout=timeout) resp.raise_for_status() reader = resp.iter_content(chunk_size=1000) result = "" for chunk in reader: if isinstance(chunk, bytes): chunk = chunk.decode('utf-8') result += chunk if len(result) > cfg.CONF.max_response_size: raise URLFetchError("Data exceeds maximum allowed size (%s" " bytes)" % cfg.CONF.max_response_size) return result except requests.exceptions.RequestException as ex: raise URLFetchError(_('Failed to retrieve data: %s') % ex) def random_name(length=8): if length <= 0: return '' lead = random.choice(string.ascii_letters) tail = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length - 1)) return lead + tail
Apache License 2.0
academysoftwarefoundation/opencue
rqd/rqd/rqnimby.py
Nimby.signalHandler
python
def signalHandler(self, sig, frame): del sig del frame self.stop()
If a signal is detected, call .stop()
https://github.com/academysoftwarefoundation/opencue/blob/da28ae905b81e7d1125db2073a369fdc0ae9acd4/rqd/rqd/rqnimby.py#L59-L63
from __future__ import absolute_import from __future__ import print_function from __future__ import division import os import select import time import signal import threading import logging as log import rqd.rqconstants import rqd.rqutil class Nimby(threading.Thread): def __init__(self, rqCore): threading.Thread.__init__(self) self.rqCore = rqCore self.locked = False self.active = False self.fileObjList = [] self.results = [[]] self.thread = None signal.signal(signal.SIGINT, self.signalHandler)
Apache License 2.0
huangshiyu13/deepfake_detection
timm/models/gluon_xception.py
gluon_xception65
python
def gluon_xception65(pretrained=False, num_classes=1000, in_chans=3, **kwargs): default_cfg = default_cfgs['gluon_xception65'] model = Xception65(num_classes=num_classes, in_chans=in_chans, **kwargs) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfg, num_classes, in_chans) return model
Modified Aligned Xception-65
https://github.com/huangshiyu13/deepfake_detection/blob/0aa16b8ee46698c3380c5c35c12fd0e935219767/timm/models/gluon_xception.py#L446-L454
import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict from .registry import register_model from .helpers import load_pretrained from .layers import SelectAdaptivePool2d from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD __all__ = ['Xception65', 'Xception71'] default_cfgs = { 'gluon_xception65': { 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth', 'input_size': (3, 299, 299), 'crop_pct': 0.875, 'pool_size': (10, 10), 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'num_classes': 1000, 'first_conv': 'conv1', 'classifier': 'fc' }, 'gluon_xception71': { 'url': '', 'input_size': (3, 299, 299), 'crop_pct': 0.875, 'pool_size': (10, 10), 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'num_classes': 1000, 'first_conv': 'conv1', 'classifier': 'fc' } } _USE_FIXED_PAD = False def _pytorch_padding(kernel_size, stride=1, dilation=1, **_): if _USE_FIXED_PAD: return 0 else: padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 fp = _fixed_padding(kernel_size, dilation) assert all(padding == p for p in fp) return padding def _fixed_padding(kernel_size, dilation): kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg return [pad_beg, pad_end, pad_beg, pad_end] class SeparableConv2d(nn.Module): def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None, norm_kwargs=None): super(SeparableConv2d, self).__init__() norm_kwargs = norm_kwargs if norm_kwargs is not None else {} self.kernel_size = kernel_size self.dilation = dilation padding = _fixed_padding(self.kernel_size, self.dilation) if _USE_FIXED_PAD and any(p > 0 for p in padding): self.fixed_padding = nn.ZeroPad2d(padding) else: self.fixed_padding = None self.conv_dw = nn.Conv2d( inplanes, inplanes, kernel_size, stride=stride, padding=_pytorch_padding(kernel_size, stride, dilation), dilation=dilation, groups=inplanes, bias=bias) self.bn = norm_layer(num_features=inplanes, **norm_kwargs) self.conv_pw = nn.Conv2d(inplanes, planes, kernel_size=1, bias=bias) def forward(self, x): if self.fixed_padding is not None: x = self.fixed_padding(x) x = self.conv_dw(x) x = self.bn(x) x = self.conv_pw(x) return x class Block(nn.Module): def __init__(self, inplanes, planes, num_reps, stride=1, dilation=1, norm_layer=None, norm_kwargs=None, start_with_relu=True, grow_first=True, is_last=False): super(Block, self).__init__() norm_kwargs = norm_kwargs if norm_kwargs is not None else {} if planes != inplanes or stride != 1: self.skip = nn.Sequential() self.skip.add_module('conv1', nn.Conv2d( inplanes, planes, 1, stride=stride, bias=False)), self.skip.add_module('bn1', norm_layer(num_features=planes, **norm_kwargs)) else: self.skip = None rep = OrderedDict() l = 1 filters = inplanes if grow_first: if start_with_relu: rep['act%d' % l] = nn.ReLU(inplace=False) rep['conv%d' % l] = SeparableConv2d( inplanes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs) rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs) filters = planes l += 1 for _ in range(num_reps - 1): if grow_first or start_with_relu: rep['act%d' % l] = nn.ReLU(inplace=grow_first or not start_with_relu) rep['conv%d' % l] = SeparableConv2d( filters, filters, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs) rep['bn%d' % l] = norm_layer(num_features=filters, **norm_kwargs) l += 1 if not grow_first: rep['act%d' % l] = nn.ReLU(inplace=True) rep['conv%d' % l] = SeparableConv2d( inplanes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs) rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs) l += 1 if stride != 1: rep['act%d' % l] = nn.ReLU(inplace=True) rep['conv%d' % l] = SeparableConv2d( planes, planes, 3, stride, norm_layer=norm_layer, norm_kwargs=norm_kwargs) rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs) l += 1 elif is_last: rep['act%d' % l] = nn.ReLU(inplace=True) rep['conv%d' % l] = SeparableConv2d( planes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs) rep['bn%d' % l] = norm_layer(num_features=planes, **norm_kwargs) l += 1 self.rep = nn.Sequential(rep) def forward(self, x): skip = x if self.skip is not None: skip = self.skip(skip) x = self.rep(x) + skip return x class Xception65(nn.Module): def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_rate=0., global_pool='avg'): super(Xception65, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate norm_kwargs = norm_kwargs if norm_kwargs is not None else {} if output_stride == 32: entry_block3_stride = 2 exit_block20_stride = 2 middle_block_dilation = 1 exit_block_dilations = (1, 1) elif output_stride == 16: entry_block3_stride = 2 exit_block20_stride = 1 middle_block_dilation = 1 exit_block_dilations = (1, 2) elif output_stride == 8: entry_block3_stride = 1 exit_block20_stride = 1 middle_block_dilation = 2 exit_block_dilations = (2, 4) else: raise NotImplementedError self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = norm_layer(num_features=32, **norm_kwargs) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = norm_layer(num_features=64) self.block1 = Block( 64, 128, num_reps=2, stride=2, norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=False) self.block2 = Block( 128, 256, num_reps=2, stride=2, norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True) self.block3 = Block( 256, 728, num_reps=2, stride=entry_block3_stride, norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True, is_last=True) self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block( 728, 728, num_reps=3, stride=1, dilation=middle_block_dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True)) for i in range(4, 20)])) self.block20 = Block( 728, 1024, num_reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0], norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=False, is_last=True) self.conv3 = SeparableConv2d( 1024, 1536, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.bn3 = norm_layer(num_features=1536, **norm_kwargs) self.conv4 = SeparableConv2d( 1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.bn4 = norm_layer(num_features=1536, **norm_kwargs) self.num_features = 2048 self.conv5 = SeparableConv2d( 1536, self.num_features, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.bn5 = norm_layer(num_features=self.num_features, **norm_kwargs) self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes) def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes) if num_classes else None def forward_features(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.relu(x) x = self.block1(x) x = self.relu(x) x = self.block2(x) x = self.block3(x) x = self.mid(x) x = self.block20(x) x = self.relu(x) x = self.conv3(x) x = self.bn3(x) x = self.relu(x) x = self.conv4(x) x = self.bn4(x) x = self.relu(x) x = self.conv5(x) x = self.bn5(x) x = self.relu(x) return x def forward(self, x): x = self.forward_features(x) x = self.global_pool(x).flatten(1) if self.drop_rate: F.dropout(x, self.drop_rate, training=self.training) x = self.fc(x) return x class Xception71(nn.Module): def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_rate=0., global_pool='avg'): super(Xception71, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate norm_kwargs = norm_kwargs if norm_kwargs is not None else {} if output_stride == 32: entry_block3_stride = 2 exit_block20_stride = 2 middle_block_dilation = 1 exit_block_dilations = (1, 1) elif output_stride == 16: entry_block3_stride = 2 exit_block20_stride = 1 middle_block_dilation = 1 exit_block_dilations = (1, 2) elif output_stride == 8: entry_block3_stride = 1 exit_block20_stride = 1 middle_block_dilation = 2 exit_block_dilations = (2, 4) else: raise NotImplementedError self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = norm_layer(num_features=32, **norm_kwargs) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = norm_layer(num_features=64) self.block1 = Block( 64, 128, num_reps=2, stride=2, norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=False) self.block2 = nn.Sequential(*[ Block( 128, 256, num_reps=2, stride=1, norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True), Block( 256, 256, num_reps=2, stride=2, norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True), Block( 256, 728, num_reps=2, stride=2, norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=False, grow_first=True)]) self.block3 = Block( 728, 728, num_reps=2, stride=entry_block3_stride, norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True, is_last=True) self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block( 728, 728, num_reps=3, stride=1, dilation=middle_block_dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=True)) for i in range(4, 20)])) self.block20 = Block( 728, 1024, num_reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0], norm_layer=norm_layer, norm_kwargs=norm_kwargs, start_with_relu=True, grow_first=False, is_last=True) self.conv3 = SeparableConv2d( 1024, 1536, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.bn3 = norm_layer(num_features=1536, **norm_kwargs) self.conv4 = SeparableConv2d( 1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.bn4 = norm_layer(num_features=1536, **norm_kwargs) self.num_features = 2048 self.conv5 = SeparableConv2d( 1536, self.num_features, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.bn5 = norm_layer(num_features=self.num_features, **norm_kwargs) self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes) def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.fc = nn.Linear(self.num_features * self.global_pool.feat_mult(), num_classes) if num_classes else None def forward_features(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.relu(x) x = self.block1(x) x = self.relu(x) x = self.block2(x) x = self.block3(x) x = self.mid(x) x = self.block20(x) x = self.relu(x) x = self.conv3(x) x = self.bn3(x) x = self.relu(x) x = self.conv4(x) x = self.bn4(x) x = self.relu(x) x = self.conv5(x) x = self.bn5(x) x = self.relu(x) return x def forward(self, x): x = self.forward_features(x) x = self.global_pool(x).flatten(1) if self.drop_rate: F.dropout(x, self.drop_rate, training=self.training) x = self.fc(x) return x @register_model
MIT License
msmbuilder/osprey
osprey/config.py
Config.fromdict
python
def fromdict(cls, config, check_fields=True): m = super(Config, cls).__new__(cls) m.path = '.' m.verbose = False m.config = m._merge_defaults(config) if check_fields: m._check_fields() return m
Create a Config object from config dict directly.
https://github.com/msmbuilder/osprey/blob/ea09da24e45820e1300e24a52fefa6c849f7a986/osprey/config.py#L117-L125
from __future__ import print_function, absolute_import, division import sys import six import hashlib import traceback import importlib import contextlib from os.path import join, isfile, dirname, abspath import yaml import sklearn.base from six.moves import cPickle from six import iteritems from six.moves import reduce from pkg_resources import resource_filename from .entry_point import load_entry_point from .utils import (dict_merge, in_directory, prepend_syspath, num_samples, trials_to_dict) from .search_space import SearchSpace from .strategies import BaseStrategy from .dataset_loaders import BaseDatasetLoader from .cross_validators import BaseCVFactory from .trials import Trial, make_session from .subclass_factory import init_subclass_by_name from . import eval_scopes FIELDS = { 'estimator': ['pickle', 'eval', 'eval_scope', 'entry_point', 'params', 'module'], 'dataset_loader': ['name', 'params'], 'trials': ['uri', 'project_name'], 'search_space': dict, 'strategy': ['name', 'params'], 'cv': (int, dict), 'scoring': (str, type(None)), 'random_seed': (int, type(None)), 'max_param_suggestion_retries': (int, type(None)), } class Config(object): def __init__(self, path, verbose=True): self.path = path self.verbose = verbose if not isfile(self.path): raise RuntimeError('%s does not exist' % self.path) with open(self.path, 'rb') as f: config = parse(f) self.config = self._merge_defaults(config) self._check_fields() if self.verbose: print('Loading config file: %s...' % path) def _merge_defaults(self, config): fn = resource_filename('osprey', join('data', 'default_config.yaml')) with open(fn) as f: default = parse(f) return reduce(dict_merge, [default, config]) def _check_fields(self): for section, submeta in iteritems(self.config): if section not in FIELDS: raise RuntimeError("unknown section: %s" % section) if isinstance(FIELDS[section], type): if not isinstance(submeta, FIELDS[section]): raise RuntimeError("%s should be a %s, but is a %s." % ( section, FIELDS[section].__name__, type(submeta).__name__)) elif isinstance(FIELDS[section], tuple): if not any(isinstance(submeta, t) for t in FIELDS[section]): raise RuntimeError( "The %s field should be one of %s, not %s" % ( section, [e.__name__ for e in FIELDS[section]], type(submeta).__name__)) else: for key in submeta: if key not in FIELDS[section]: raise RuntimeError("in section %r: unknown key %r" % ( section, key)) missing_fields = set(FIELDS.keys()).difference(self.config.keys()) if len(missing_fields) > 0: raise RuntimeError('The following required fields are missing from' ' the config file (%s): %s' % ( ', '.join(missing_fields), self.path)) @classmethod
Apache License 2.0
jwcook/naturtag
naturtag/widgets/autocomplete.py
AutocompleteSearch.reset
python
def reset(self, *args): self.text_input.text = '' self.text_input.suggestion_text = '' self.dropdown_view.data = []
Reset inputs and autocomplete results
https://github.com/jwcook/naturtag/blob/34701c5be880baf5a1c4b1a6e43e28168de8c437/naturtag/widgets/autocomplete.py#L145-L149
import asyncio from collections.abc import Mapping from logging import getLogger from kivy.clock import Clock from kivy.core.window import Window from kivy.metrics import dp from kivy.properties import ( BooleanProperty, DictProperty, NumericProperty, ObjectProperty, StringProperty, ) from kivy.uix.behaviors import FocusBehavior from kivy.uix.recycleboxlayout import RecycleBoxLayout from kivy.uix.recycleview.layout import LayoutSelectionBehavior from kivy.uix.recycleview.views import RecycleDataViewBehavior from kivymd.uix.boxlayout import MDBoxLayout from kivymd.uix.card import MDCard from kivymd.uix.label import MDLabel from kivymd.uix.textfield import MDTextField from naturtag.app.screens import load_kv from naturtag.constants import AUTOCOMPLETE_DELAY, AUTOCOMPLETE_MIN_CHARS from naturtag.widgets import TextFieldWrapper PADDING = dp(50) ROW_SIZE = dp(22) logger = getLogger().getChild(__name__) load_kv('autocomplete') class AutocompleteSearch(MDBoxLayout, TextFieldWrapper): def __init__(self, text_input_kwargs=None, **kwargs): super().__init__(**kwargs) self.register_event_type('on_selection') self.trigger = Clock.create_trigger(self.callback, AUTOCOMPLETE_DELAY) Clock.schedule_once(lambda *x: self.post_init(text_input_kwargs or {})) def post_init(self, text_input_kwargs): self.text_input = self.ids.text_input self.text_input.bind( text=lambda *x: self.trigger(), focus=self.on_text_focus, ) self.ids.clear_input_button.bind(on_release=self.reset) if text_input_kwargs: logger.debug(f'Overriding text input settings: {text_input_kwargs}') for k, v in text_input_kwargs.items(): setattr(self.text_input, k, v) self.dropdown_container = self.ids.dropdown_container self.dropdown_view = self.ids.dropdown_view self.ids.dropdown_layout.bind(on_selection=lambda *x: self.update_selection(*x)) def on_text_focus(self, instance, *args): if instance.focus: logger.debug('Opening dropdown on text focus') self.dropdown_container.open() def callback(self, *args): logger.debug('Populating autocomplete results') search_str = self.text_input.text if len(search_str) < AUTOCOMPLETE_MIN_CHARS: return def get_row(item): if isinstance(item, Mapping): return item return {'text': item, 'suggestion_text': item, 'metadata': {}} matches = asyncio.run(self.get_autocomplete(search_str)) logger.info(f'Found {len(matches)} matches for search string "{search_str}"') self.dropdown_view.data = [get_row(i) for i in matches] self.dropdown_container.open() def update_selection(self, instance, suggestion_text, metadata): self.text_input.suggestion_text = ' ' + suggestion_text self.dispatch('on_selection', metadata) Clock.schedule_once(self.dropdown_container.dismiss, 0.2) def on_selection(self, metadata): async def get_autocomplete(self, search_str): return [{'text': f'Text: {search_str}'}] + [{'text': f'Text: {i}'} for i in range(9)]
MIT License
diefenbach/django-lfc
lfc/models.py
BaseContent.get_context
python
def get_context(self, request): obj_template = self.get_template() children_cache_key = "%s-children-%s-%s-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, self.content_type, self.id, request.user.id) sub_objects = cache.get(children_cache_key) if sub_objects is None: if obj_template.children_columns == 0: sub_objects = self.get_children(request) else: sub_objects = lfc.utils.get_lol(self.get_children(request), obj_template.children_columns) cache.set(children_cache_key, sub_objects) images_cache_key = "%s-images-%s-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, self.content_type, self.id) cached_images = cache.get(images_cache_key) if cached_images: image = cached_images["image"] images = cached_images["images"] subimages = cached_images["subimages"] else: temp_images = list(self.images.all()) if temp_images: if obj_template.images_columns == 0: images = temp_images image = images[0] subimages = temp_images[1:] else: images = lfc.utils.get_lol(temp_images, obj_template.images_columns) subimages = lfc.utils.get_lol(temp_images[1:], obj_template.images_columns) image = images[0][0] else: image = None images = [] subimages = [] cache.set(images_cache_key, { "image": image, "images": images, "subimages": subimages }) self.context = RequestContext(request, { "lfc_context": self, "self": self, "images": images, "image": image, "subimages": subimages, "files": self.files.all(), "sub_objects": sub_objects, "portal": lfc.utils.get_portal(), }) return self.context
Calculates and returns the request context.
https://github.com/diefenbach/django-lfc/blob/75c900d672b4d36705fb8fa4833c446bbb78efea/lfc/models.py#L903-L963
import datetime import re from django import template from django.conf import settings from django.contrib.auth.models import User from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType from django.core.cache import cache from django.core.urlresolvers import reverse from django.db import models from django.template import RequestContext from django.template.loader import render_to_string from django.utils import translation from django.utils.translation import ugettext_lazy as _ from tagging import fields from tagging.models import Tag from tagging.models import TaggedItem from portlets.models import PortletAssignment from portlets.models import PortletBlocking import workflows.utils from workflows import WorkflowBase from workflows.models import Workflow from workflows.models import State from workflows.models import StateObjectRelation from permissions import PermissionBase from permissions.exceptions import Unauthorized from permissions.models import Role from permissions.models import ObjectPermission from permissions.models import ObjectPermissionInheritanceBlock import lfc.utils from lfc.fields.thumbs import ImageWithThumbsField from lfc.managers import BaseContentManager from lfc.settings import ALLOW_COMMENTS_CHOICES from lfc.settings import ALLOW_COMMENTS_DEFAULT from lfc.settings import ALLOW_COMMENTS_TRUE from lfc.settings import LANGUAGE_CHOICES from lfc.settings import ORDER_BY_CHOICES from lfc.settings import IMAGE_SIZES from lfc.settings import UPLOAD_FOLDER class Application(models.Model): name = models.CharField(max_length=100, unique=True) class WorkflowStatesInformation(models.Model): state = models.ForeignKey(State) public = models.BooleanField(default=False) review = models.BooleanField(default=False) def __unicode__(self): result = self.state.name if self.public: result += u" " + u"Public" if self.review: result += u" " + "Review" return result class Template(models.Model): name = models.CharField(max_length=50, unique=True) path = models.CharField(max_length=100) children_columns = models.IntegerField(verbose_name=_(u"Subpages columns"), default=1) images_columns = models.IntegerField(verbose_name=_(u"Images columns"), default=1) class Meta: ordering = ("name", ) def __unicode__(self): return self.name class ContentTypeRegistration(models.Model): type = models.CharField(_(u"Type"), blank=True, max_length=100, unique=True) name = models.CharField(_(u"Name"), blank=True, max_length=100, unique=True) display_select_standard = models.BooleanField(_(u"Display select standard"), default=True) display_position = models.BooleanField(_(u"Display position"), default=True) global_addable = models.BooleanField(_(u"Global addable"), default=True) subtypes = models.ManyToManyField("self", verbose_name=_(u"Allowed sub types"), symmetrical=False, blank=True) templates = models.ManyToManyField("Template", verbose_name=_(u"Templates"), related_name="content_type_registrations") default_template = models.ForeignKey("Template", verbose_name=_(u"Default template"), blank=True, null=True) workflow = models.ForeignKey(Workflow, verbose_name=_(u"Workflow"), blank=True, null=True) class Meta: ordering = ("name", ) def __unicode__(self): return self.name def get_subtypes(self): return self.subtypes.all() def get_templates(self): return self.templates.all() class Portal(models.Model, PermissionBase): title = models.CharField(_(u"Title"), blank=True, max_length=100) standard = models.ForeignKey("BaseContent", verbose_name=_(u"Standard"), blank=True, null=True, on_delete=models.SET_NULL) from_email = models.EmailField(_(u"From e-mail address")) notification_emails = models.TextField(_(u"Notification email addresses")) allow_comments = models.BooleanField(_(u"Allow comments"), default=False) images = generic.GenericRelation("Image", verbose_name=_(u"Images"), object_id_field="content_id", content_type_field="content_type") files = generic.GenericRelation("File", verbose_name=_(u"Files"), object_id_field="content_id", content_type_field="content_type") def __unicode__(self): return self.title @property def content_type(self): return u"portal" def get_content_type(self): return u"Portal" def get_absolute_url(self): language = translation.get_language() if language == settings.LANGUAGE_CODE: return reverse("lfc_base_view") else: return reverse("lfc_base_view", kwargs={"language": language}) def get_notification_emails(self): adresses = re.split("[\s,]+", self.notification_emails) return adresses def are_comments_allowed(self): return self.allow_comments def get_parent_for_permissions(self): return None def get_parent_for_portlets(self): return None def get_template(self): return Template.objects.get(name="Article") def get_children(self, request=None, *args, **kwargs): return lfc.utils.get_content_objects(request, parent=None, **kwargs) def has_permission(self, user, codename): try: roles = [lfc.utils.get_cached_object(Role, name="Anonymous").id] except Role.DoesNotExist: roles = [] try: if user == self.creator: roles.append(Role.objects.get(name="Owner").id) except (AttributeError, Role.DoesNotExist): pass return super(Portal, self).has_permission(user, codename, roles) def check_permission(self, user, codename): if not self.has_permission(user, codename): raise Unauthorized("'%s' doesn't have permission '%s' for portal." % (user, codename)) class AbstractBaseContent(models.Model, WorkflowBase, PermissionBase): objects = BaseContentManager() class Meta: abstract = True class BaseContent(AbstractBaseContent): content_type = models.CharField(_(u"Content type"), max_length=100, blank=True) title = models.CharField(_(u"Title"), max_length=100) display_title = models.BooleanField(_(u"Display title"), default=True) slug = models.SlugField(_(u"Slug"), max_length=100) description = models.TextField(_(u"Description"), blank=True) position = models.PositiveSmallIntegerField(_(u"Position"), default=1) language = models.CharField(_(u"Language"), max_length=10, choices=LANGUAGE_CHOICES, default="0") canonical = models.ForeignKey("self", verbose_name=_(u"Canonical"), related_name="translations", blank=True, null=True, on_delete=models.SET_NULL) tags = fields.TagField(_(u"Tags")) parent = models.ForeignKey("self", verbose_name=_(u"Parent"), blank=True, null=True, related_name="children") template = models.ForeignKey("Template", verbose_name=_(u"Template"), blank=True, null=True) standard = models.ForeignKey("self", verbose_name=_(u"Standard"), blank=True, null=True, on_delete=models.SET_NULL) order_by = models.CharField(_(u"Order by"), max_length=20, default="position", choices=ORDER_BY_CHOICES) exclude_from_navigation = models.BooleanField(_(u"Exclude from navigation"), default=False) exclude_from_search = models.BooleanField(_(u"Exclude from search results"), default=False) creator = models.ForeignKey(User, verbose_name=_(u"Creator"), null=True) creation_date = models.DateTimeField(_(u"Creation date"), auto_now_add=True) modification_date = models.DateTimeField(_(u"Modification date"), auto_now=True) publication_date = models.DateTimeField(_(u"Publication date"), null=True, blank=True) start_date = models.DateTimeField(_(u"Start date"), null=True, blank=True) end_date = models.DateTimeField(_(u"End date"), null=True, blank=True) meta_title = models.CharField(_(u"Meta title"), max_length=100, default="<portal_title> - <title>") meta_keywords = models.TextField(_(u"Meta keywords"), blank=True, default="<tags>") meta_description = models.TextField(_(u"Meta description"), blank=True, default="<description>") images = generic.GenericRelation("Image", verbose_name=_(u"Images"), object_id_field="content_id", content_type_field="content_type") files = generic.GenericRelation("File", verbose_name=_(u"Files"), object_id_field="content_id", content_type_field="content_type") allow_comments = models.PositiveSmallIntegerField(_(u"Commentable"), choices=ALLOW_COMMENTS_CHOICES, default=ALLOW_COMMENTS_DEFAULT) searchable_text = models.TextField(blank=True) working_copy_base = models.ForeignKey("self", verbose_name=_(u"Working copy base"), related_name="working_copies", blank=True, null=True, on_delete=models.SET_NULL) version = models.PositiveSmallIntegerField(blank=True, null=True) class Meta: ordering = ["position"] unique_together = ["parent", "slug", "language"] def __unicode__(self): return unicode(self.title) def has_meta_data_tab(self): return getattr(settings, "LFC_MANAGE_META_DATA", True) def has_children_tab(self): return getattr(settings, "LFC_MANAGE_CHILDREN", True) def has_images_tab(self): return getattr(settings, "LFC_MANAGE_IMAGES", True) def has_files_tab(self): return getattr(settings, "LFC_MANAGE_FILES", True) def has_portlets_tab(self): return getattr(settings, "LFC_MANAGE_PORTLETS", True) def has_comments_tab(self): return getattr(settings, "LFC_MANAGE_COMMENTS", True) def has_seo_tab(self): return getattr(settings, "LFC_MANAGE_SEO", True) def has_history_tab(self): return getattr(settings, "LFC_MANAGE_HISTORY", True) def has_permissions_tab(self): return getattr(settings, "LFC_MANAGE_PERMISSIONS", True) def get_tabs(self, request): return [] def save(self, *args, **kwargs): self.searchable_text = self.get_searchable_text() if self.content_type == "": self.content_type = self.__class__.__name__.lower() super(BaseContent, self).save(*args, **kwargs) co = self.get_content_object() if workflows.utils.get_state(co) is None: workflows.utils.set_initial_state(co) lfc.utils.clear_cache() def delete(self, *args, **kwargs): ctype = ContentType.objects.get_for_model(self) TaggedItem.objects.filter(object_id=self.id, content_type=ctype).delete() Tag.objects.annotate(item_count=models.Count('items')).filter(item_count=0).delete() for image in self.images.all(): try: image.image.delete() except AttributeError: pass try: image.delete() except AssertionError: pass for myfile in self.files.all(): try: myfile.file.delete() except AttributeError: pass try: myfile.delete() except AssertionError: pass StateObjectRelation.objects.filter(content_id=self.id, content_type=ctype).delete() ObjectPermission.objects.filter(content_id=self.id, content_type=ctype).delete() ObjectPermissionInheritanceBlock.objects.filter(content_id=self.id, content_type=ctype).delete() for pa in PortletAssignment.objects.filter(content_id=self.id, content_type=ctype): pa.portlet.delete() pa.delete() PortletBlocking.objects.filter(content_id=self.id, content_type=ctype).delete() super(BaseContent, self).delete(*args, **kwargs) def get_absolute_url(self): page = self.standard or self obj = page slugs = [] while obj is not None: slugs.append(obj.slug) obj = obj.parent slugs.reverse() slug = "/".join(slugs) if page.language == settings.LANGUAGE_CODE: return ("lfc_base_view", (), {"slug": slug}) elif page.language == "0": if page.parent: language = page.parent.language if language == "0": return ("lfc_base_view", (), {"slug": slug}) else: language = translation.get_language() if language == settings.LANGUAGE_CODE: return ("lfc_base_view", (), {"slug": slug}) else: return ("lfc_base_view", (), {"slug": slug, "language": language}) else: return ("lfc_base_view", (), {"slug": slug, "language": page.language}) get_absolute_url = models.permalink(get_absolute_url) def add_history(self, request, action): History.objects.create(obj=self.get_base_object(), action=action, user=request.user) def get_content_object(self): if self.__class__.__name__.lower() == "basecontent": return getattr(self, self.content_type) else: return self def get_base_object(self): if self.__class__.__name__.lower() == "basecontent": return self else: return self.basecontent_ptr def get_searchable_text(self): result = self.title + " " + self.description return result.strip() def edit_form(self, **kwargs): raise(NotImplementedError, "form has to be implemented by sub classed") def add_form(self, **kwargs): from lfc.manage.forms import AddForm return AddForm(**kwargs) def get_ancestors(self): ancestors = [] obj = self while obj and obj.parent is not None: temp = obj.parent.get_content_object() ancestors.append(temp) obj = obj.parent return ancestors def get_ancestors_reverse(self): ancestors = self.get_ancestors() ancestors.reverse() return ancestors def get_content_type(self): info = lfc.utils.registration.get_info(self.content_type) return info.name def get_descendants(self, request=None, result=None): if result is None: result = [] for child in self.get_children(request): result.append(child) child.get_descendants(request, result) return result def has_children(self, request=None, *args, **kwargs): return len(lfc.utils.get_content_objects(request, parent=self, **kwargs)) > 0 def get_children(self, request=None, *args, **kwargs): return lfc.utils.get_content_objects(request, parent=self, **kwargs) def get_image(self): images = self.images.all() try: return images[0] except IndexError: return None def get_meta_title(self): title = self.meta_title.replace("<title>", self.title) title = title.replace("<portal_title>", lfc.utils.get_portal().title) return title def get_meta_keywords(self): keywords = self.meta_keywords.replace("<title>", self.title) keywords = keywords.replace("<description>", self.description) keywords = keywords.replace("<tags>", self.tags) return keywords def get_meta_description(self): description = self.meta_description.replace("<title>", self.title) description = description.replace("<description>", self.description) description = description.replace("<tags>", self.tags) return description def get_template(self): if self.template is not None: return self.template else: default_template = lfc.utils.registration.get_default_template(self) if default_template is not None: return default_template else: return lfc.utils.get_portal().get_template() def get_title(self): return self.display_title and self.title or "" def is_canonical(self): return self.language in (settings.LANGUAGE_CODE, "0") def get_canonical(self, request): if self.is_canonical(): return self else: if self.canonical: obj = BaseContent.objects.get(pk=self.canonical.id) if self.has_permission(request.user, "view"): return obj.get_content_object() else: return None def is_translation(self): return not self.is_canonical() def has_language(self, request, language): if self.language == "0": return True if self.language == language: return True if self.is_translation(): canonical = self.get_canonical(request) if canonical and canonical.language == language: return True if canonical and canonical.get_translation(request, language): return True if self.is_canonical(): if self.get_translation(request, language): return True return False def get_translation(self, request, language): if self.is_translation(): return None try: translation = self.translations.get(language=language).get_content_object() if translation.has_permission(request.user, "view"): return translation else: return None except BaseContent.DoesNotExist: return None def are_comments_allowed(self): if self.allow_comments == ALLOW_COMMENTS_DEFAULT: if self.parent: return self.parent.are_comments_allowed() else: return lfc.utils.get_portal().are_comments_allowed() else: if self.allow_comments == ALLOW_COMMENTS_TRUE: return True else: return False def get_parent_for_portlets(self): return self.parent and self.parent.get_content_object() or lfc.utils.get_portal() def set_context(self, request): self.context = self.get_context(request)
BSD 3-Clause New or Revised License
datamllab/rlcard
rlcard/utils/logger.py
Logger.__init__
python
def __init__(self, log_dir): self.log_dir = log_dir
Initialize the labels, legend and paths of the plot and log file. Args: log_path (str): The path the log files
https://github.com/datamllab/rlcard/blob/f256657dd13039bd707e6d95ea3c31795b573c76/rlcard/utils/logger.py#L8-L14
import os import csv class Logger(object):
MIT License
hirofumi0810/tensorflow_end2end_speech_recognition
models/recurrent/layers/bn_basic_lstm.py
BatchNormBasicLSTMCell.__call__
python
def __call__(self, inputs, state, scope=None): input_size = inputs.get_shape().with_rank(2)[1] if input_size.value is None: raise ValueError( "Could not infer input size from inputs.get_shape()[-1]") with tf.variable_scope(scope or "batch_norm_lstm_cell", reuse=self._reuse): if self._state_is_tuple: c_prev, h_prev = state else: c_prev, h_prev = tf.split( value=state, num_or_size_splits=2, axis=1) W_xh = tf.get_variable('W_xh', shape=[input_size, 4 * self._num_units], initializer=orthogonal_initializer()) W_hh = tf.get_variable('W_hh', shape=[self._num_units, 4 * self._num_units], initializer=orthogonal_initializer()) bias = tf.get_variable('b', [4 * self._num_units]) xh = tf.matmul(inputs, W_xh) hh = tf.matmul(h_prev, W_hh) bn_xh = batch_norm(xh, 'xh', self._is_training) bn_hh = batch_norm(hh, 'hh', self._is_training) lstm_matrix = tf.nn.bias_add(tf.add(bn_xh, bn_hh), bias) i, g, f, o = tf.split( value=lstm_matrix, num_or_size_splits=4, axis=1) c = (c_prev * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(g)) bn_c = batch_norm(c, 'bn_c', self._is_training) h = tf.tanh(bn_c) * tf.sigmoid(o) if self._state_is_tuple: new_state = LSTMStateTuple(c, h) else: new_state = tf.concat(values=[c, h], axis=1) return h, new_state
Long short-term memory cell (LSTM) with Recurrent Batch Normalization.
https://github.com/hirofumi0810/tensorflow_end2end_speech_recognition/blob/65b9728089d5e92b25b92384a67419d970399a64/models/recurrent/layers/bn_basic_lstm.py#L58-L103
import tensorflow as tf from tensorflow.contrib.rnn import RNNCell, LSTMStateTuple from tensorflow.python.platform import tf_logging as logging from .batch_normalization import batch_norm from .initializer import orthogonal_initializer class BatchNormBasicLSTMCell(RNNCell): def __init__(self, num_units, is_training, forget_bias=1.0, input_size=None, state_is_tuple=True, reuse=None): if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._reuse = reuse self._is_training = is_training @property def state_size(self): return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) @property def output_size(self): return self._num_units
MIT License
richardfan1126/nitro-enclave-python-demo
attestation_verifier/server/NsmUtil.py
NSMUtil.decrypt
python
def decrypt(self, ciphertext): cipher = PKCS1_OAEP.new(self._rsa_key) plaintext = cipher.decrypt(ciphertext) return plaintext.decode()
Decrypt ciphertext using private key
https://github.com/richardfan1126/nitro-enclave-python-demo/blob/6c1aedcb07bb47fa3f24a180fc33ad15ae487366/attestation_verifier/server/NsmUtil.py#L46-L53
import base64 import Crypto from Crypto.PublicKey import RSA from Crypto.Cipher import PKCS1_OAEP import libnsm class NSMUtil(): def __init__(self): self._nsm_fd = libnsm.nsm_lib_init() self.nsm_rand_func = lambda num_bytes : libnsm.nsm_get_random( self._nsm_fd, num_bytes ) self._monkey_patch_crypto(self.nsm_rand_func) self._rsa_key = RSA.generate(2048) self._public_key = self._rsa_key.publickey().export_key('DER') def get_attestation_doc(self): libnsm_att_doc_cose_signed = libnsm.nsm_get_attestation_doc( self._nsm_fd, self._public_key, len(self._public_key) ) return libnsm_att_doc_cose_signed
Apache License 2.0
transientskp/tkp
tkp/accessors/fitsimage.py
FitsImage.parse_frequency
python
def parse_frequency(self): freq_eff = None freq_bw = None try: header = self.header if 'RESTFRQ' in header: freq_eff = header['RESTFRQ'] if 'RESTBW' in header: freq_bw = header['RESTBW'] else: logger.warning("bandwidth header missing in image {}," " setting to 1 MHz".format(self.url)) freq_bw = 1e6 elif ('CTYPE3' in header) and (header['CTYPE3'] in ('FREQ', 'VOPT')): freq_eff = header['CRVAL3'] freq_bw = header['CDELT3'] elif ('CTYPE4' in header) and (header['CTYPE4'] in ('FREQ', 'VOPT')): freq_eff = header['CRVAL4'] freq_bw = header['CDELT4'] else: freq_eff = header['RESTFREQ'] freq_bw = 1e6 except KeyError: msg = "Frequency not specified in headers for {}".format(self.url) logger.error(msg) raise TypeError(msg) return freq_eff, freq_bw
Set some 'shortcut' variables for access to the frequency parameters in the FITS file header. @param hdulist: hdulist to parse @type hdulist: hdulist
https://github.com/transientskp/tkp/blob/c4d6c3c59d51c083509316ba0e25dd8e732ee23b/tkp/accessors/fitsimage.py#L114-L148
import numpy import pytz import datetime import dateutil.parser import logging import re import astropy.io.fits as pyfits from tkp.accessors.dataaccessor import DataAccessor from tkp.utility.coordinates import WCS logger = logging.getLogger(__name__) class FitsImage(DataAccessor): def __init__(self, url, plane=None, beam=None, hdu_index=0): super(FitsImage, self).__init__() self.url = url self.header = self._get_header(hdu_index) self.wcs = self.parse_coordinates() self.data = self.read_data(hdu_index, plane) self.taustart_ts, self.tau_time = self.parse_times() self.freq_eff, self.freq_bw = self.parse_frequency() self.pixelsize = self.parse_pixelsize() if beam: (bmaj, bmin, bpa) = beam else: (bmaj, bmin, bpa) = self.parse_beam() self.beam = self.degrees2pixels( bmaj, bmin, bpa, self.pixelsize[0], self.pixelsize[1] ) self.centre_ra, self.centre_decl = self.calculate_phase_centre() if 'TELESCOP' in self.header: self.telescope = self.header['TELESCOP'] def _get_header(self, hdu_index): with pyfits.open(self.url) as hdulist: hdu = hdulist[hdu_index] return hdu.header.copy() def read_data(self, hdu_index, plane): with pyfits.open(self.url) as hdulist: hdu = hdulist[hdu_index] data = numpy.float64(hdu.data.squeeze()) if plane is not None and len(data.shape) > 2: data = data[plane].squeeze() n_dim = len(data.shape) if n_dim != 2: logger.warn("Loaded datacube with %s dimensions, assuming Stokes I and taking plane 0" % n_dim) data = data[0, :, :] data = data.transpose() return data def parse_coordinates(self): header = self.header wcs = WCS() try: wcs.crval = header['crval1'], header['crval2'] wcs.crpix = header['crpix1'] - 1, header['crpix2'] - 1 wcs.cdelt = header['cdelt1'], header['cdelt2'] except KeyError: msg = "Coordinate system not specified in FITS" logger.error(msg) raise TypeError(msg) try: wcs.ctype = header['ctype1'], header['ctype2'] except KeyError: wcs.ctype = 'unknown', 'unknown' try: wcs.crota = float(header['crota1']), float(header['crota2']) except KeyError: wcs.crota = 0., 0. try: wcs.cunit = header['cunit1'], header['cunit2'] except KeyError: msg = "WCS units unknown; using degrees" logger.warning(msg) wcs.cunit = 'deg', 'deg' return wcs def calculate_phase_centre(self): x, y = self.data.shape centre_ra, centre_decl = self.wcs.p2s((x / 2, y / 2)) return float(centre_ra), float(centre_decl)
BSD 2-Clause Simplified License
unofficial-memsource/memsource-cli-client
memsource_cli/models/page_dto_job_part_reference_v2.py
PageDtoJobPartReferenceV2.page_number
python
def page_number(self): return self._page_number
Gets the page_number of this PageDtoJobPartReferenceV2. # noqa: E501 :return: The page_number of this PageDtoJobPartReferenceV2. # noqa: E501 :rtype: int
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/page_dto_job_part_reference_v2.py#L141-L148
import pprint import re import six from memsource_cli.models.job_part_reference_v2 import JobPartReferenceV2 class PageDtoJobPartReferenceV2(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'total_elements': 'int', 'total_pages': 'int', 'page_size': 'int', 'page_number': 'int', 'number_of_elements': 'int', 'content': 'list[JobPartReferenceV2]' } attribute_map = { 'total_elements': 'totalElements', 'total_pages': 'totalPages', 'page_size': 'pageSize', 'page_number': 'pageNumber', 'number_of_elements': 'numberOfElements', 'content': 'content' } def __init__(self, total_elements=None, total_pages=None, page_size=None, page_number=None, number_of_elements=None, content=None): self._total_elements = None self._total_pages = None self._page_size = None self._page_number = None self._number_of_elements = None self._content = None self.discriminator = None if total_elements is not None: self.total_elements = total_elements if total_pages is not None: self.total_pages = total_pages if page_size is not None: self.page_size = page_size if page_number is not None: self.page_number = page_number if number_of_elements is not None: self.number_of_elements = number_of_elements if content is not None: self.content = content @property def total_elements(self): return self._total_elements @total_elements.setter def total_elements(self, total_elements): self._total_elements = total_elements @property def total_pages(self): return self._total_pages @total_pages.setter def total_pages(self, total_pages): self._total_pages = total_pages @property def page_size(self): return self._page_size @page_size.setter def page_size(self, page_size): self._page_size = page_size @property
Apache License 2.0
paperdashboard/shadowsocks
gnupg/_util.py
_make_random_string
python
def _make_random_string(length): chars = string.ascii_lowercase + string.ascii_uppercase + string.digits return ''.join(random.choice(chars) for x in range(length))
Returns a random lowercase, uppercase, alphanumerical string. :param int length: The length in bytes of the string to generate.
https://github.com/paperdashboard/shadowsocks/blob/52585b5132ddd25d2c0e35234474e8365889dca0/gnupg/_util.py#L592-L598
from __future__ import absolute_import from datetime import datetime from socket import gethostname from time import localtime from time import mktime import codecs import encodings import os import threading import random import re import string import sys _STREAMLIKE_TYPES = [] try: import io from io import StringIO from io import BytesIO except ImportError: from cStringIO import StringIO else: _STREAMLIKE_TYPES.append(io.IOBase) if 2 == sys.version_info[0]: from StringIO import StringIO as _StringIO_StringIO _STREAMLIKE_TYPES.append(_StringIO_StringIO) import cStringIO as _cStringIO _STREAMLIKE_TYPES.append(_cStringIO.InputType) _STREAMLIKE_TYPES.append(_cStringIO.OutputType) _STREAMLIKE_TYPES.append(file) from . import _logger try: unicode _py3k = False try: isinstance(__name__, basestring) except NameError: msg = "Sorry, python-gnupg requires a Python version with proper" msg += " unicode support. Please upgrade to Python>=2.6." raise SystemExit(msg) except NameError: _py3k = True _running_windows = False if "win" in sys.platform: _running_windows = True _here = os.path.join(os.getcwd(), 'gnupg') _test = os.path.join(os.path.join(_here, 'test'), 'tmp') _user = os.environ.get('HOME') if not _user: _user = '/tmp/python-gnupg' try: os.makedirs(_user) except (OSError, IOError): _user = os.getcwd() _user = os.path.sep.join([_user, 'gnupghome']) _ugpg = os.path.join(_user, '.gnupg') _conf = os.path.join(os.path.join(_user, '.config'), 'python-gnupg') log = _logger.create_logger(0) _VERSION_STRING_REGEX = re.compile('(\d)*(\.)*(\d)*(\.)*(\d)*') def find_encodings(enc=None, system=False): if not enc: enc = 'utf-8' if system: if getattr(sys.stdin, 'encoding', None) is None: enc = sys.stdin.encoding log.debug("Obtained encoding from stdin: %s" % enc) else: enc = 'ascii' enc = enc.lower() codec_alias = encodings.normalize_encoding(enc) codecs.register(encodings.search_function) coder = codecs.lookup(codec_alias) return coder if _py3k: def b(x): coder = find_encodings() if isinstance(x, bytes): return coder.encode(x.decode(coder.name))[0] else: return coder.encode(x)[0] def s(x): if isinstance(x, str): return x elif isinstance(x, (bytes, bytearray)): return x.decode(find_encodings().name) else: raise NotImplemented else: def b(x): return x def s(x): if isinstance(x, basestring): return x elif isinstance(x, (bytes, bytearray)): return x.decode(find_encodings().name) else: raise NotImplemented def binary(data): coder = find_encodings() if _py3k and isinstance(data, bytes): encoded = coder.encode(data.decode(coder.name))[0] elif _py3k and isinstance(data, str): encoded = coder.encode(data)[0] elif not _py3k and type(data) is not str: encoded = coder.encode(data)[0] else: encoded = data return encoded def author_info(name, contact=None, public_key=None): return Storage(name=name, contact=contact, public_key=public_key) def _copy_data(instream, outstream): sent = 0 while True: if ((_py3k and isinstance(instream, str)) or (not _py3k and isinstance(instream, basestring))): data = instream[:1024] instream = instream[1024:] else: data = instream.read(1024) if len(data) == 0: break sent += len(data) encoded = binary(data) log.debug("Sending %d bytes of data..." % sent) log.debug("Encoded data (type %s):\n%s" % (type(encoded), encoded)) if not _py3k: try: outstream.write(encoded) except IOError as ioe: if 'Broken pipe' in str(ioe): log.error('Error sending data: Broken pipe') else: log.exception(ioe) break else: log.debug("Wrote data type <type 'str'> to outstream.") else: try: outstream.write(bytes(encoded)) except TypeError as te: if not "convert 'bytes' object to str implicitly" in str(te): log.error(str(te)) try: outstream.write(encoded.decode()) except TypeError as yate: if not "does not support the buffer interface" in str(yate): log.error(str(yate)) except IOError as ioe: if 'Broken pipe' in str(ioe): log.error('Error sending data: Broken pipe') else: log.exception(ioe) break else: log.debug("Wrote data type <class 'str'> outstream.") except IOError as ioe: if 'Broken pipe' in str(ioe): log.error('Error sending data: Broken pipe') else: log.exception(ioe) break else: log.debug("Wrote data type <class 'bytes'> to outstream.") try: outstream.close() except IOError as ioe: log.error("Unable to close outstream %s:\r\t%s" % (outstream, ioe)) else: log.debug("Closed outstream: %d bytes sent." % sent) def _create_if_necessary(directory): if not os.path.isabs(directory): log.debug("Got non-absolute path: %s" % directory) directory = os.path.abspath(directory) if not os.path.isdir(directory): log.info("Creating directory: %s" % directory) try: os.makedirs(directory, 0x1C0) except OSError as ose: log.error(ose, exc_info=1) return False else: log.debug("Created directory.") return True def create_uid_email(username=None, hostname=None): if hostname: hostname = hostname.replace(' ', '_') if not username: try: username = os.environ['LOGNAME'] except KeyError: username = os.environ['USERNAME'] if not hostname: hostname = gethostname() uid = "%s@%s" % (username.replace(' ', '_'), hostname) else: username = username.replace(' ', '_') if (not hostname) and (username.find('@') == 0): uid = "%s@%s" % (username, gethostname()) elif hostname: uid = "%s@%s" % (username, hostname) else: uid = username return uid def _deprefix(line, prefix, callback=None): try: assert line.upper().startswith(u''.join(prefix).upper()) except AssertionError: log.debug("Line doesn't start with prefix '%s':\n%s" % (prefix, line)) return line else: newline = line[len(prefix):] if callback is not None: try: callback(newline) except Exception as exc: log.exception(exc) return newline def _find_binary(binary=None): found = None if binary is not None: if os.path.isabs(binary) and os.path.isfile(binary): return binary if not os.path.isabs(binary): try: found = _which(binary) log.debug("Found potential binary paths: %s" % '\n'.join([path for path in found])) found = found[0] except IndexError as ie: log.info("Could not determine absolute path of binary: '%s'" % binary) elif os.access(binary, os.X_OK): found = binary if found is None: try: found = _which('gpg', abspath_only=True, disallow_symlinks=True)[0] except IndexError as ie: log.error("Could not find binary for 'gpg'.") try: found = _which('gpg2')[0] except IndexError as ie: log.error("Could not find binary for 'gpg2'.") if found is None: raise RuntimeError("GnuPG is not installed!") return found def _has_readwrite(path): return os.access(path, os.R_OK ^ os.W_OK) def _is_file(filename): try: statinfo = os.lstat(filename) log.debug("lstat(%r) with type=%s gave us %r" % (repr(filename), type(filename), repr(statinfo))) if not (statinfo.st_size > 0): raise ValueError("'%s' appears to be an empty file!" % filename) except OSError as oserr: log.error(oserr) if filename == '-': log.debug("Got '-' for filename, assuming sys.stdin...") return True except (ValueError, TypeError, IOError) as err: log.error(err) else: return True return False def _is_stream(input): return isinstance(input, tuple(_STREAMLIKE_TYPES)) def _is_string(thing): if (_py3k and isinstance(thing, str)): return True if (not _py3k and isinstance(thing, basestring)): return True return False def _is_bytes(thing): if isinstance(thing, (bytes, bytearray)): return True return False def _is_list_or_tuple(instance): return isinstance(instance, (list, tuple,)) def _is_gpg1(version): (major, minor, micro) = _match_version_string(version) if major == 1: return True return False def _is_gpg2(version): (major, minor, micro) = _match_version_string(version) if major == 2: return True return False def _make_binary_stream(thing, encoding=None, armor=True): if _py3k: if isinstance(thing, str): thing = thing.encode(encoding) else: if type(thing) is not str: thing = thing.encode(encoding) try: rv = BytesIO(thing) except NameError: rv = StringIO(thing) return rv def _make_passphrase(length=None, save=False, file=None): if not length: length = 40 passphrase = _make_random_string(length) if save: ruid, euid, suid = os.getresuid() gid = os.getgid() now = mktime(localtime()) if not file: filename = str('passphrase-%s-%s' % uid, now) file = os.path.join(_repo, filename) with open(file, 'a') as fh: fh.write(passphrase) fh.flush() fh.close() os.chmod(file, stat.S_IRUSR | stat.S_IWUSR) os.chown(file, ruid, gid) log.warn("Generated passphrase saved to %s" % file) return passphrase
Apache License 2.0
haltakov/simple-photo-gallery
simplegallery/media.py
rotate_image_by_orientation
python
def rotate_image_by_orientation(image): try: exif = image._getexif() if exif and EXIF_TAG_MAP["Orientation"] in exif: orientation = exif[EXIF_TAG_MAP["Orientation"]] if orientation == 3: rotation_angle = 180 elif orientation == 6: rotation_angle = 270 elif orientation == 8: rotation_angle = 90 else: rotation_angle = 0 if rotation_angle != 0: return image.rotate(rotation_angle, expand=True) except: pass return image
Rotates an image according to it's Orientation EXIF Tag :param im: Image :return: Rotated image
https://github.com/haltakov/simple-photo-gallery/blob/d47299072644b2e9d319d1e0bc52b020fa06c42f/simplegallery/media.py#L14-L40
import os import cv2 import requests from io import BytesIO from PIL import Image, ExifTags from datetime import datetime import simplegallery.common as spg_common EXIF_TAG_MAP = {ExifTags.TAGS[tag]: tag for tag in ExifTags.TAGS}
MIT License
sonyai/pandemicsimulator
python/pandemic_simulator/environment/interfaces/person.py
Person.assigned_locations
python
def assigned_locations(self) -> Sequence[LocationID]:
Property that returns a sequence of location ids that the person is assigned to. :return: A collection of LocationIDs
https://github.com/sonyai/pandemicsimulator/blob/505c8786bb554e7de97e283ed386f2ea884f166e/python/pandemic_simulator/environment/interfaces/person.py#L95-L100
from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing import Optional, Sequence, List, Tuple from .contact_tracer import ContactTracer from .ids import PersonID, LocationID from .infection_model import IndividualInfectionState, Risk from .pandemic_testing_result import PandemicTestResult from .pandemic_types import NoOP from .regulation import PandemicRegulation from .sim_time import SimTime __all__ = ['Person', 'PersonState'] @dataclass class PersonState: current_location: LocationID risk: Risk infection_state: Optional[IndividualInfectionState] = None infection_spread_multiplier: float = 1. quarantine: bool = field(init=False, default=False) quarantine_if_contact_positive: bool = field(init=False, default=False) quarantine_if_household_quarantined: bool = field(init=False, default=False) sick_at_home: bool = field(init=False, default=False) avoid_gathering_size: int = field(init=False, default=-1) test_result: PandemicTestResult = field(init=False, default=PandemicTestResult.UNTESTED) avoid_location_types: List[type] = field(default_factory=list, init=False) not_infection_probability: float = field(default=1., init=False) not_infection_probability_history: List[Tuple[LocationID, float]] = field(default_factory=list, init=False) class Person(ABC): @abstractmethod def step(self, sim_time: SimTime, contact_tracer: Optional[ContactTracer] = None) -> Optional[NoOP]: pass @abstractmethod def receive_regulation(self, regulation: PandemicRegulation) -> None: pass @abstractmethod def enter_location(self, location_id: LocationID) -> bool: pass @property @abstractmethod def id(self) -> PersonID: pass @property @abstractmethod def home(self) -> LocationID: pass @property @abstractmethod
Apache License 2.0
unofficial-memsource/memsource-cli-client
memsource_cli/models/xml_settings_dto.py
XmlSettingsDto.exclude_attributes_plain
python
def exclude_attributes_plain(self, exclude_attributes_plain): self._exclude_attributes_plain = exclude_attributes_plain
Sets the exclude_attributes_plain of this XmlSettingsDto. Example: \"lang,href\" # noqa: E501 :param exclude_attributes_plain: The exclude_attributes_plain of this XmlSettingsDto. # noqa: E501 :type: str
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/xml_settings_dto.py#L285-L294
import pprint import re import six class XmlSettingsDto(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'rules_format': 'str', 'include_elements_plain': 'str', 'exclude_elements_plain': 'str', 'include_attributes_plain': 'str', 'exclude_attributes_plain': 'str', 'inline_elements_non_translatable_plain': 'str', 'inline_elements_plain': 'str', 'inline_elements_auto_plain': 'bool', 'html_subfilter_elements_plain': 'str', 'entities': 'bool', 'lock_elements_plain': 'str', 'lock_attributes_plain': 'str', 'include_x_path': 'str', 'inline_elements_xpath': 'str', 'inline_elements_non_translatable_x_path': 'str', 'inline_elements_auto_x_path': 'bool', 'html_subfilter_elements_xpath': 'str', 'lock_x_path': 'str', 'segmentation': 'bool', 'tag_regexp': 'str', 'context_note_xpath': 'str', 'max_len_x_path': 'str', 'preserve_whitespace_x_path': 'str', 'preserve_char_entities': 'str', 'context_key_x_path': 'str', 'xsl_url': 'str' } attribute_map = { 'rules_format': 'rulesFormat', 'include_elements_plain': 'includeElementsPlain', 'exclude_elements_plain': 'excludeElementsPlain', 'include_attributes_plain': 'includeAttributesPlain', 'exclude_attributes_plain': 'excludeAttributesPlain', 'inline_elements_non_translatable_plain': 'inlineElementsNonTranslatablePlain', 'inline_elements_plain': 'inlineElementsPlain', 'inline_elements_auto_plain': 'inlineElementsAutoPlain', 'html_subfilter_elements_plain': 'htmlSubfilterElementsPlain', 'entities': 'entities', 'lock_elements_plain': 'lockElementsPlain', 'lock_attributes_plain': 'lockAttributesPlain', 'include_x_path': 'includeXPath', 'inline_elements_xpath': 'inlineElementsXpath', 'inline_elements_non_translatable_x_path': 'inlineElementsNonTranslatableXPath', 'inline_elements_auto_x_path': 'inlineElementsAutoXPath', 'html_subfilter_elements_xpath': 'htmlSubfilterElementsXpath', 'lock_x_path': 'lockXPath', 'segmentation': 'segmentation', 'tag_regexp': 'tagRegexp', 'context_note_xpath': 'contextNoteXpath', 'max_len_x_path': 'maxLenXPath', 'preserve_whitespace_x_path': 'preserveWhitespaceXPath', 'preserve_char_entities': 'preserveCharEntities', 'context_key_x_path': 'contextKeyXPath', 'xsl_url': 'xslUrl' } def __init__(self, rules_format=None, include_elements_plain=None, exclude_elements_plain=None, include_attributes_plain=None, exclude_attributes_plain=None, inline_elements_non_translatable_plain=None, inline_elements_plain=None, inline_elements_auto_plain=None, html_subfilter_elements_plain=None, entities=None, lock_elements_plain=None, lock_attributes_plain=None, include_x_path=None, inline_elements_xpath=None, inline_elements_non_translatable_x_path=None, inline_elements_auto_x_path=None, html_subfilter_elements_xpath=None, lock_x_path=None, segmentation=None, tag_regexp=None, context_note_xpath=None, max_len_x_path=None, preserve_whitespace_x_path=None, preserve_char_entities=None, context_key_x_path=None, xsl_url=None): self._rules_format = None self._include_elements_plain = None self._exclude_elements_plain = None self._include_attributes_plain = None self._exclude_attributes_plain = None self._inline_elements_non_translatable_plain = None self._inline_elements_plain = None self._inline_elements_auto_plain = None self._html_subfilter_elements_plain = None self._entities = None self._lock_elements_plain = None self._lock_attributes_plain = None self._include_x_path = None self._inline_elements_xpath = None self._inline_elements_non_translatable_x_path = None self._inline_elements_auto_x_path = None self._html_subfilter_elements_xpath = None self._lock_x_path = None self._segmentation = None self._tag_regexp = None self._context_note_xpath = None self._max_len_x_path = None self._preserve_whitespace_x_path = None self._preserve_char_entities = None self._context_key_x_path = None self._xsl_url = None self.discriminator = None if rules_format is not None: self.rules_format = rules_format if include_elements_plain is not None: self.include_elements_plain = include_elements_plain if exclude_elements_plain is not None: self.exclude_elements_plain = exclude_elements_plain if include_attributes_plain is not None: self.include_attributes_plain = include_attributes_plain if exclude_attributes_plain is not None: self.exclude_attributes_plain = exclude_attributes_plain if inline_elements_non_translatable_plain is not None: self.inline_elements_non_translatable_plain = inline_elements_non_translatable_plain if inline_elements_plain is not None: self.inline_elements_plain = inline_elements_plain if inline_elements_auto_plain is not None: self.inline_elements_auto_plain = inline_elements_auto_plain if html_subfilter_elements_plain is not None: self.html_subfilter_elements_plain = html_subfilter_elements_plain if entities is not None: self.entities = entities if lock_elements_plain is not None: self.lock_elements_plain = lock_elements_plain if lock_attributes_plain is not None: self.lock_attributes_plain = lock_attributes_plain if include_x_path is not None: self.include_x_path = include_x_path if inline_elements_xpath is not None: self.inline_elements_xpath = inline_elements_xpath if inline_elements_non_translatable_x_path is not None: self.inline_elements_non_translatable_x_path = inline_elements_non_translatable_x_path if inline_elements_auto_x_path is not None: self.inline_elements_auto_x_path = inline_elements_auto_x_path if html_subfilter_elements_xpath is not None: self.html_subfilter_elements_xpath = html_subfilter_elements_xpath if lock_x_path is not None: self.lock_x_path = lock_x_path if segmentation is not None: self.segmentation = segmentation if tag_regexp is not None: self.tag_regexp = tag_regexp if context_note_xpath is not None: self.context_note_xpath = context_note_xpath if max_len_x_path is not None: self.max_len_x_path = max_len_x_path if preserve_whitespace_x_path is not None: self.preserve_whitespace_x_path = preserve_whitespace_x_path if preserve_char_entities is not None: self.preserve_char_entities = preserve_char_entities if context_key_x_path is not None: self.context_key_x_path = context_key_x_path if xsl_url is not None: self.xsl_url = xsl_url @property def rules_format(self): return self._rules_format @rules_format.setter def rules_format(self, rules_format): allowed_values = ["PLAIN", "XPATH"] if rules_format not in allowed_values: raise ValueError( "Invalid value for `rules_format` ({0}), must be one of {1}" .format(rules_format, allowed_values) ) self._rules_format = rules_format @property def include_elements_plain(self): return self._include_elements_plain @include_elements_plain.setter def include_elements_plain(self, include_elements_plain): self._include_elements_plain = include_elements_plain @property def exclude_elements_plain(self): return self._exclude_elements_plain @exclude_elements_plain.setter def exclude_elements_plain(self, exclude_elements_plain): self._exclude_elements_plain = exclude_elements_plain @property def include_attributes_plain(self): return self._include_attributes_plain @include_attributes_plain.setter def include_attributes_plain(self, include_attributes_plain): self._include_attributes_plain = include_attributes_plain @property def exclude_attributes_plain(self): return self._exclude_attributes_plain @exclude_attributes_plain.setter
Apache License 2.0
atsushisakai/pyoptsamples
NonlinearOptimization/NewtonMethod/NewtonMethod.py
Jacob
python
def Jacob(state): x=state[0] y=state[1] dx=4*x**3+4*x*y-44*x+2*x+2*y**2-14 dy=2*x**2+4*x*y+4*y**3-26*y-22 J=[dx,dy] return J
u""" jacobi matrix of Himmelblau's function
https://github.com/atsushisakai/pyoptsamples/blob/c498e8978c64496be2dfe3c6a3b328b33da4b2c1/NonlinearOptimization/NewtonMethod/NewtonMethod.py#L27-L36
import matplotlib.pyplot as plt import numpy as np import random delta = 0.1 minXY=-5.0 maxXY=5.0 nContour=50 alpha=0.01 def Hessian(state): x=state[0] y=state[1] dxx=12*x**2+4*y-42; dxy=4*x+4*y dyy=4*x+12*y**2-26 H=np.array([[dxx,dxy],[dxy,dyy]]) return H
MIT License
terra-project/jigu
jigu/core/sdk/dec.py
Dec.__init__
python
def __init__(self, arg): if isinstance(arg, float): arg = str("%f" % arg) if isinstance(arg, str): parts = PATTERN.match(arg) if parts is None: raise ValueError(f"Unable to parse Dec object from string: {arg}") self.i = int(parts.group(2)) * SDK_DEC_UNIT if parts.group(3): fraction = ( parts.group(3).lstrip(".")[0:SDK_DEC_PREC].ljust(SDK_DEC_PREC, "0") ) self.i += int(fraction) if parts.group(1) is not None: self.i *= -1 elif isinstance(arg, int): self.i = arg * SDK_DEC_UNIT elif isinstance(arg, Dec): self.i = arg.i elif isinstance(arg, Decimal): whole = int(arg) fraction = arg % 1 self.i = (whole * SDK_DEC_UNIT) + (fraction * SDK_DEC_UNIT) else: raise TypeError(f"Unable to create Dec object from given argument {arg}") self.i = int(self.i)
BigInt-based Decimal representation with basic arithmetic operations with compatible Python numeric types (int, float, Decimal). Does not work with NaN, Infinity, +0, -0, etc. Serializes as a string with 18 points of decimal precision.
https://github.com/terra-project/jigu/blob/98c580b3f72754bf7924488337305806ad14f08b/jigu/core/sdk/dec.py#L21-L52
from __future__ import annotations import re from decimal import Decimal from jigu.util.serdes import JsonDeserializable, JsonSerializable from jigu.util.validation import Schemas as S SDK_DEC_PREC = 18 SDK_DEC_UNIT = 10 ** SDK_DEC_PREC SDK_DEC_REGEX_PATTERN = r"^(\-)?(\d+)(\.\d+)?\Z" PATTERN = re.compile(SDK_DEC_REGEX_PATTERN) __all__ = ["SDK_DEC_PREC", "Dec"] class Dec(JsonSerializable, JsonDeserializable): __schema__ = S.STRING_WITH_PATTERN(SDK_DEC_REGEX_PATTERN)
MIT License
nestauk/nesta
nesta/core/orms/orm_utils.py
is_null
python
def is_null(x): try: return bool(pd.isnull(x)) except ValueError: return False
Wrapper around pandas isnull with an exception for iterables, which cause a ValueError on conversion to boolean.
https://github.com/nestauk/nesta/blob/f0abf0b19a4b0c6c9799b3afe0bd67310122b705/nesta/core/orms/orm_utils.py#L531-L537
from configparser import ConfigParser from contextlib import contextmanager from sqlalchemy import create_engine, insert from sqlalchemy import exists as sql_exists from sqlalchemy.exc import OperationalError from sqlalchemy.engine.url import URL from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import class_mapper from sqlalchemy.sql.expression import and_, or_ from nesta.core.luigihacks.misctools import find_filepath_from_pathstub from nesta.core.luigihacks.misctools import get_config, load_yaml_from_pathstub from nesta.packages.misc_utils.batches import split_batches from elasticsearch import Elasticsearch from elasticsearch.helpers import scan from datetime import datetime from py2neo.database import Graph import pandas as pd import importlib import re import pymysql import os import json import logging import time from collections import defaultdict from collections.abc import Mapping def _get_key_value(obj, key): value = getattr(obj, key) if isinstance(value, datetime): value = value.isoformat() return (key, value) def orm_column_names(_class): mapper = class_mapper(_class) columns = {column.key for column in mapper.columns} return columns def object_to_dict(obj, shallow=False, properties=True, found=None): if found is None: found = set() _class = obj.__class__ mapper = class_mapper(_class) columns = [column.key for column in mapper.columns] out = dict(map(lambda c: _get_key_value(obj, c), columns)) if properties: property_names = [name for name in dir(_class) if type(getattr(_class, name)) is property] for name in property_names: out[name] = getattr(obj, name) relationships = {} if shallow else mapper.relationships for name, relation in relationships.items(): if relation in found: continue found.add(relation) related_obj = getattr(obj, name) if related_obj is None: continue if relation.uselist: out[name] = [object_to_dict(child, found=found) for child in related_obj] else: out[name] = object_to_dict(related_obj, found=found) return out def assert_correct_config(test, config, key): err_msg = ("In test mode='{test}', config key '{key}' " "must end with '{suffix}'") if test and not key.endswith("_dev"): raise ValueError(err_msg.format(test=test, key=key, suffix='_dev')) elif not test and not key.endswith("_prod"): raise ValueError(err_msg.format(test=test, key=key, suffix='_prod')) index = config['index'] if test and not index.endswith("_dev"): raise ValueError(f"In test mode the index '{key}' " "must end with '_dev'") def default_to_regular(d): if isinstance(d, defaultdict): d = {k: default_to_regular(v) for k, v in d.items()} return d def parse_es_config(increment_version): raw_config = load_yaml_from_pathstub('config', 'elasticsearch.yaml') config = defaultdict(lambda: defaultdict(dict)) for endpoint, endpoint_config in raw_config['endpoints'].items(): indexes = endpoint_config.pop('indexes') base_config = raw_config['defaults'].copy() base_config.update(endpoint_config) scheme = base_config.pop('scheme') _id = base_config.pop('id') rgn = base_config['region'] base_config['host'] = f'{scheme}://search-{endpoint}-{_id}.{rgn}.es.amazonaws.com' for dataset, version in indexes.items(): prod_idx = f'{dataset}_v' + str(version + increment_version) dev_idx = f'{dataset}_dev' + ('0' if increment_version else '') config[endpoint][dataset][True] = {'index': prod_idx, **base_config} config[endpoint][dataset][False] = {'index': dev_idx, **base_config} return default_to_regular(config) def setup_es(endpoint, dataset, production, drop_and_recreate=False, increment_version=False): es_master_config = parse_es_config(increment_version) es_config = es_master_config[endpoint][dataset][production] es = Elasticsearch(es_config['host'], port=es_config['port'], use_ssl=True, send_get_body_as='POST') index = es_config['index'] exists = es.indices.exists(index=index) if drop_and_recreate and (not production) and exists: es.indices.delete(index=index) exists = False if not exists: mapping = get_es_mapping(dataset, endpoint) es.indices.create(index=index, body=mapping) return es, es_config def get_es_ids(es, es_config, size=1000, query={}): query["_source"] = False scanner = scan(es, query=query, index=es_config['index'], doc_type=es_config['type'], size=size) return {s['_id'] for s in scanner} def load_json_from_pathstub(pathstub, filename, sort_on_load=True): _path = find_filepath_from_pathstub(pathstub) _path = os.path.join(_path, filename) with open(_path) as f: js = json.load(f) if sort_on_load: _js = json.dumps(js, sort_keys=True) js = json.loads(_js) return js def update_nested(original_dict, update_dict): for k, v in update_dict.items(): nested_dict = v if isinstance(v, Mapping): nested_dict = original_dict.get(k, {}) if nested_dict is not None: nested_dict = update_nested(nested_dict, v) original_dict[k] = nested_dict return original_dict def _get_es_mapping(dataset, endpoint): mapping = {} for _path, _prefix in [('defaults', 'defaults'), ('datasets', f'{dataset}_mapping'), (f'endpoints/{endpoint}', f'{dataset}_mapping')]: try: _mapping = load_json_from_pathstub(f"mappings/{_path}", f"{_prefix}.json") except json.JSONDecodeError as exc: raise ValueError(f'Could not decode "mappings/{_path}/{_prefix}.json"') from exc except FileNotFoundError: continue update_nested(mapping, _mapping) return mapping def _apply_alias(mapping, dataset, endpoint): ep_path = f"mappings/endpoints/{endpoint}" try: alias_lookup = load_json_from_pathstub(ep_path, "aliases.json") except FileNotFoundError: return try: config = load_yaml_from_pathstub(ep_path, "config.yaml") hard_alias = config['hard-alias'] except (FileNotFoundError, KeyError): hard_alias = False propts = mapping["mappings"]["_doc"]["properties"] _fields = set() for alias, lookup in alias_lookup.items(): if dataset not in lookup: continue field = lookup[dataset] propts[alias] = (propts[field] if hard_alias else {"type": "alias", "path": field}) _fields.add(field) if hard_alias: for f in _fields: propts.pop(f) def _prune_nested(mapping): for k in list(mapping.keys()): v = mapping[k] if isinstance(v, Mapping): _prune_nested(v) elif v is None: mapping.pop(k) def get_es_mapping(dataset, endpoint): mapping = _get_es_mapping(dataset, endpoint) _apply_alias(mapping, dataset, endpoint) _prune_nested(mapping) return mapping def cast_as_sql_python_type(field, data): _data = field.type.python_type(data) if field.type.python_type is str: n = field.type.length if field.type.length < len(_data) else None _data = _data[:n] return _data def get_session(db_env, section, database, Base): engine = get_mysql_engine(db_env, section, database) Session = try_until_allowed(sessionmaker, engine) session = try_until_allowed(Session) try_until_allowed(Base.metadata.create_all, session.get_bind()) return session def filter_out_duplicates(db_env, section, database, Base, _class, data, low_memory=False): session = get_session(db_env, section, database, Base) return _filter_out_duplicates(session, Base, _class, data, low_memory) return results def get_all_pks(session, _class): pkey_cols = _class.__table__.primary_key.columns fields = [getattr(_class, pkey.name) for pkey in pkey_cols] all_pks = set(session.query(*fields).all()) return all_pks def has_auto_pkey(_class): pkey_cols = _class.__table__.primary_key.columns is_auto_pkey = any(p.autoincrement and p.type.python_type is int for p in pkey_cols) return is_auto_pkey def generate_pk(row, _class): pkey_cols = _class.__table__.primary_key.columns pk = tuple([cast_as_sql_python_type(pkey, row[pkey.name]) for pkey in pkey_cols]) return pk def _filter_out_duplicates(session, Base, _class, data, low_memory=False): objs = [] existing_objs = [] failed_objs = [] pkey_cols = _class.__table__.primary_key.columns is_auto_pkey = has_auto_pkey(_class) all_pks = (get_all_pks(session, _class) if low_memory and not is_auto_pkey else set()) for irow, row in enumerate(data): if not is_auto_pkey and not all(pkey.name in row for pkey in pkey_cols): logging.warning(f"{row} does not contain any of {pkey_cols}" f"{[pkey.name in row for pkey in pkey_cols]}") failed_objs.append(row) continue if not is_auto_pkey: pk = generate_pk(row, _class) if pk in all_pks and not is_auto_pkey: existing_objs.append(row) continue all_pks.add(pk) if not is_auto_pkey and not low_memory and session.query(exists(_class, **row)).scalar(): existing_objs.append(row) continue objs.append(row) session.close() return objs, existing_objs, failed_objs def retrieve_row_by_pk(session, pk, _class): q = session.query(_class) pkey_cols = _class.__table__.primary_key.columns for pk_col, pk_value in zip(pkey_cols, pk): q = q.filter(pk_col == pk_value) obj = q.one() _row = object_to_dict(obj) return _row def create_delete_stmt(_class, pks): pkey_cols = _class.__table__.primary_key.columns is_composite = len(pkey_cols) > 1 if is_composite: all_rows_stmt = [] for col_values in pks: this_row_stmt = [] for col, value in zip(pkey_cols, col_values): key = getattr(_class, col.name) this_row_stmt.append(key == value) all_rows_stmt.append(and_(*this_row_stmt)) all_rows_stmt = or_(*all_rows_stmt) else: col, = pkey_cols key = getattr(_class, col.name) all_rows_stmt = key.in_(tuple(pk for pk, in pks)) delete_stmt = _class.__table__.delete().where(all_rows_stmt) return delete_stmt
MIT License
hunch/hunch-gift-app
django/contrib/admindocs/utils.py
parse_rst
python
def parse_rst(text, default_reference_context, thing_being_parsed=None): overrides = { 'doctitle_xform' : True, 'inital_header_level' : 3, "default_reference_context" : default_reference_context, "link_base" : reverse('django-admindocs-docroot').rstrip('/') } if thing_being_parsed: thing_being_parsed = smart_str("<%s>" % thing_being_parsed) parts = docutils.core.publish_parts(text, source_path=thing_being_parsed, destination_path=None, writer_name='html', settings_overrides=overrides) return mark_safe(parts['fragment'])
Convert the string from reST to an XHTML fragment.
https://github.com/hunch/hunch-gift-app/blob/8c7cad24cc0d9900deb4175e6b768c64a3d7adcf/django/contrib/admindocs/utils.py#L57-L72
import re from email.Parser import HeaderParser from email.Errors import HeaderParseError from django.utils.safestring import mark_safe from django.core.urlresolvers import reverse from django.utils.encoding import smart_str try: import docutils.core import docutils.nodes import docutils.parsers.rst.roles except ImportError: docutils_is_available = False else: docutils_is_available = True def trim_docstring(docstring): if not docstring or not docstring.strip(): return '' lines = docstring.expandtabs().splitlines() indent = min([len(line) - len(line.lstrip()) for line in lines if line.lstrip()]) trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]] return "\n".join(trimmed).strip() def parse_docstring(docstring): docstring = trim_docstring(docstring) parts = re.split(r'\n{2,}', docstring) title = parts[0] if len(parts) == 1: body = '' metadata = {} else: parser = HeaderParser() try: metadata = parser.parsestr(parts[-1]) except HeaderParseError: metadata = {} body = "\n\n".join(parts[1:]) else: metadata = dict(metadata.items()) if metadata: body = "\n\n".join(parts[1:-1]) else: body = "\n\n".join(parts[1:]) return title, body, metadata
MIT License
ethereum/trinity
tests/conftest.py
ipc_server
python
async def ipc_server( monkeypatch, event_bus, jsonrpc_ipc_pipe_path, event_loop, chain_with_block_validation): trinity_config = TrinityConfig(app_identifier="eth1", network_id=1) rpc = RPCServer( initialize_eth1_modules(chain_with_block_validation, event_bus, trinity_config), chain_with_block_validation, event_bus, ) ipc_server = IPCServer(rpc, jsonrpc_ipc_pipe_path) async with background_asyncio_service(ipc_server): yield ipc_server
This fixture runs a single RPC server over IPC over the course of all tests. It yields the IPC server only for monkeypatching purposes
https://github.com/ethereum/trinity/blob/6383280c5044feb06695ac2f7bc1100b7bcf4fe0/tests/conftest.py#L278-L297
import asyncio import contextlib import logging import os from pathlib import Path import tempfile import uuid import pytest from async_service import background_asyncio_service from lahja import AsyncioEndpoint from eth_utils import ( decode_hex, to_canonical_address, to_wei, ) from eth_keys import keys from eth.consensus.applier import ConsensusApplier from eth.consensus.noproof import NoProofConsensus from eth import constants as eth_constants from eth.chains.base import ( Chain, MiningChain ) from eth.db.atomic import AtomicDB from eth.vm.forks.spurious_dragon import SpuriousDragonVM from lahja import ( ConnectionConfig, ) from trinity.config import ( Eth1AppConfig, TrinityConfig, ) from trinity.constants import ( NETWORKING_EVENTBUS_ENDPOINT, ) from trinity.chains.coro import ( AsyncChainMixin, ) from trinity.initialization import ( ensure_eth1_dirs, initialize_data_dir, ) from trinity.rpc.main import ( RPCServer, ) from trinity.rpc.modules import ( initialize_eth1_modules, ) from trinity.rpc.ipc import ( IPCServer, ) from trinity._utils.xdg import ( get_xdg_trinity_root, ) from trinity._utils.filesystem import ( is_under_path, ) def pytest_addoption(parser): parser.addoption("--enode", type=str, required=False) parser.addoption("--integration", action="store_true", default=False) parser.addoption("--silence_async_service", action="store_true", default=False) parser.addoption("--fork", type=str, required=False) class TestAsyncChain(Chain, AsyncChainMixin): pass @pytest.fixture(scope='session', autouse=True) def silence_loggers(request): if request.config.getoption("--silence_async_service"): logging.getLogger("async_service").setLevel(logging.INFO) @pytest.fixture(autouse=True) def xdg_trinity_root(monkeypatch, tmpdir): with tempfile.TemporaryDirectory() as tmp_dir: xdg_root_dir = Path(tmp_dir) / 'trinity' monkeypatch.setenv('XDG_TRINITY_ROOT', str(xdg_root_dir)) assert not is_under_path(os.path.expandvars('$HOME'), get_xdg_trinity_root()) yield xdg_root_dir @pytest.fixture(scope='session') def event_loop(): loop = asyncio.new_event_loop() try: yield loop finally: loop.close() @contextlib.asynccontextmanager async def make_networking_event_bus(): ipc_path = Path(f"networking-{uuid.uuid4()}.ipc") networking_connection_config = ConnectionConfig( name=NETWORKING_EVENTBUS_ENDPOINT, path=ipc_path ) async with AsyncioEndpoint.serve(networking_connection_config) as endpoint: yield endpoint @pytest.fixture async def event_bus(): async with make_networking_event_bus() as endpoint: yield endpoint @pytest.fixture async def other_event_bus(): async with make_networking_event_bus() as endpoint: yield endpoint @pytest.fixture(scope='session') def jsonrpc_ipc_pipe_path(): with tempfile.TemporaryDirectory() as temp_dir: yield Path(temp_dir) / '{0}.ipc'.format(uuid.uuid4()) @pytest.fixture def trinity_config(): _trinity_config = TrinityConfig(network_id=1) initialize_data_dir(_trinity_config) return _trinity_config @pytest.fixture def eth1_app_config(trinity_config): eth1_app_config = Eth1AppConfig(trinity_config, None) ensure_eth1_dirs(eth1_app_config) return eth1_app_config @pytest.fixture def base_db(): return AtomicDB() @pytest.fixture def funded_address_private_key(): return keys.PrivateKey( decode_hex('0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8') ) @pytest.fixture def funded_address(funded_address_private_key): return funded_address_private_key.public_key.to_canonical_address() @pytest.fixture def funded_address_initial_balance(): return to_wei(1000, 'ether') def _chain_with_block_validation(base_db, genesis_state, chain_cls=Chain): genesis_params = { "bloom": 0, "coinbase": to_canonical_address("8888f1f195afa192cfee860698584c030f4c9db1"), "difficulty": 131072, "extra_data": b"B", "gas_limit": 3141592, "gas_used": 0, "mix_hash": decode_hex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), "nonce": decode_hex("0102030405060708"), "block_number": 0, "parent_hash": decode_hex("0000000000000000000000000000000000000000000000000000000000000000"), "receipt_root": decode_hex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), "timestamp": 1422494849, "transaction_root": decode_hex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), "uncles_hash": decode_hex("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") } klass = chain_cls.configure( __name__='TestChain', vm_configuration=( (eth_constants.GENESIS_BLOCK_NUMBER, SpuriousDragonVM), ), chain_id=1337, ) chain = klass.from_genesis(base_db, genesis_params, genesis_state) return chain @pytest.fixture def chain_with_block_validation(base_db, genesis_state): return _chain_with_block_validation(base_db, genesis_state, TestAsyncChain) def import_block_without_validation(chain, block): return super(type(chain), chain).import_block(block, perform_validation=False) @pytest.fixture def base_genesis_state(funded_address, funded_address_initial_balance): return { funded_address: { 'balance': funded_address_initial_balance, 'nonce': 0, 'code': b'', 'storage': {}, } } @pytest.fixture def genesis_state(base_genesis_state): return base_genesis_state @pytest.fixture def chain_without_block_validation( base_db, genesis_state): klass = MiningChain.configure( __name__='TestChainWithoutBlockValidation', vm_configuration=ConsensusApplier(NoProofConsensus).amend_vm_configuration( ( (eth_constants.GENESIS_BLOCK_NUMBER, SpuriousDragonVM), ) ), chain_id=1337, ) genesis_params = { 'block_number': eth_constants.GENESIS_BLOCK_NUMBER, 'difficulty': eth_constants.GENESIS_DIFFICULTY, 'gas_limit': 3141592, 'parent_hash': eth_constants.GENESIS_PARENT_HASH, 'coinbase': eth_constants.GENESIS_COINBASE, 'nonce': eth_constants.GENESIS_NONCE, 'mix_hash': eth_constants.GENESIS_MIX_HASH, 'extra_data': eth_constants.GENESIS_EXTRA_DATA, 'timestamp': 1501851927, } chain = klass.from_genesis(base_db, genesis_params, genesis_state) return chain @pytest.mark.asyncio @pytest.fixture
MIT License
rbw/redap
redap/services/base.py
ResponseHandler.props_to_str
python
def props_to_str(self, entry): formatted = {} for field_name, value in entry.get_attributes_dict().items(): if field_name in self.hidden_fields: continue if len(value) == 1: formatted[field_name] = value[0] elif len(value) < 1: formatted[field_name] = None else: formatted[field_name] = value return formatted
Converts value array to string if count <= 1, skips hidden fields
https://github.com/rbw/redap/blob/34d6338c4d8fe330ce3a40d2a694456964f1496d/redap/services/base.py#L24-L40
from flask import current_app as app from ldap3 import MODIFY_REPLACE from redap.core import ldap from redap.exceptions import RedapError ACTIVE_DIRECTORY = 'ad' FREEIPA = 'freeipa' ADD = 'ADD' REPLACE = MODIFY_REPLACE class ResponseHandler(object): def __init__(self, response, hidden_fields, raise_on_empty=False): self.entries = response.all() self.hidden_fields = hidden_fields self.count = len(self.entries) if self.count == 0 and raise_on_empty: raise RedapError(message='No such object', status_code=404)
MIT License
slimakoi/amino.py
amino/client.py
Client.leave_chat
python
async def leave_chat(self, chatId: str): async with self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers()) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status
Leave an Chat. **Parameters** - **chatId** : ID of the Chat. **Returns** - **Success** : 200 (int) - **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
https://github.com/slimakoi/amino.py/blob/56ef11394b40e9abe873cb537777247d2cc89551/amino/client.py#L647-L661
import json import base64 import aiohttp import asyncio import threading from uuid import uuid4 from time import timezone, sleep from typing import BinaryIO, Union from time import time as timestamp from locale import getdefaultlocale as locale from .lib.util import exceptions, headers, device, objects, helpers from .socket import Callbacks, SocketHandler device = device.DeviceGenerator() class Client(Callbacks, SocketHandler): def __init__(self, deviceId: str = None, socketDebugging = False): self.api = "https://service.narvii.com/api/v1" self.authenticated = False self.configured = False self.user_agent = device.user_agent if deviceId is not None: self.device_id = deviceId else: self.device_id = device.device_id SocketHandler.__init__(self, self, debug=socketDebugging) Callbacks.__init__(self, self) self.json = None self.sid = None self.userId = None self.account: objects.UserProfile = objects.UserProfile(None) self.profile: objects.UserProfile = objects.UserProfile(None) self.session = aiohttp.ClientSession() def __del__(self): try: loop = asyncio.get_event_loop() loop.create_task(self._close_session()) except RuntimeError: loop = asyncio.new_event_loop() loop.run_until_complete(self._close_session()) async def _close_session(self): if not self.session.closed: await self.session.close() def parse_headers(self, data = None): if not data: return headers.Headers(data=data, deviceId=self.device_id).headers else: return headers.Headers(deviceId=self.device_id).headers async def join_voice_chat(self, comId: str, chatId: str, joinType: int = 1): data = { "o": { "ndcId": int(comId), "threadId": chatId, "joinRole": joinType, "id": "2154531" }, "t": 112 } data = json.dumps(data) await self.send(data) async def join_video_chat(self, comId: str, chatId: str, joinType: int = 1): data = { "o": { "ndcId": int(comId), "threadId": chatId, "joinRole": joinType, "channelType": 5, "id": "2154531" }, "t": 108 } data = json.dumps(data) await self.send(data) async def join_video_chat_as_viewer(self, comId: str, chatId: str): data = { "o": { "ndcId": int(comId), "threadId": chatId, "joinRole": 2, "id": "72446" }, "t": 112 } data = json.dumps(data) await self.send(data) async def run_vc(self, comId: str, chatId: str, joinType: str): while self.active: data = { "o": { "ndcId": comId, "threadId": chatId, "joinRole": joinType, "id": "2154531" }, "t": 112 } data = json.dumps(data) await self.send(data) sleep(1) async def start_vc(self, comId: str, chatId: str, joinType: int = 1): data = { "o": { "ndcId": comId, "threadId": chatId, "joinRole": joinType, "id": "2154531" }, "t": 112 } data = json.dumps(data) await self.send(data) data = { "o": { "ndcId": comId, "threadId": chatId, "channelType": 1, "id": "2154531" }, "t": 108 } data = json.dumps(data) await self.send(data) self.active = True threading.Thread(target=self.run_vc, args=[comId, chatId, joinType]) async def end_vc(self, comId: str, chatId: str, joinType: int = 2): self.active = False data = { "o": { "ndcId": comId, "threadId": chatId, "joinRole": joinType, "id": "2154531" }, "t": 112 } data = json.dumps(data) await self.send(data) async def login_sid(self, SID: str): uId = helpers.sid_to_uid(SID) self.authenticated = True self.sid = SID self.userId = uId self.account: objects.UserProfile = await self.get_user_info(uId) self.profile: objects.UserProfile = await self.get_user_info(uId) headers.sid = self.sid await self.startup() async def login(self, email: str, password: str): data = json.dumps({ "email": email, "v": 2, "secret": f"0 {password}", "deviceID": self.device_id, "clientType": 100, "action": "normal", "timestamp": int(timestamp() * 1000) }) async with self.session.post(f"{self.api}/g/s/auth/login", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: self.authenticated = True self.json = json.loads(await response.text()) self.sid = self.json["sid"] self.userId = self.json["account"]["uid"] self.account: objects.UserProfile = objects.UserProfile(self.json["account"]).UserProfile self.profile: objects.UserProfile = objects.UserProfile(self.json["userProfile"]).UserProfile headers.sid = self.sid await self.startup() return response.status async def register(self, nickname: str, email: str, password: str, verificationCode: str, deviceId: str = device.device_id): data = json.dumps({ "secret": f"0 {password}", "deviceID": deviceId, "email": email, "clientType": 100, "nickname": nickname, "latitude": 0, "longitude": 0, "address": None, "clientCallbackURL": "narviiapp://relogin", "validationContext": { "data": { "code": verificationCode }, "type": 1, "identity": email }, "type": 1, "identity": email, "timestamp": int(timestamp() * 1000) }) async with self.session.post(f"{self.api}/g/s/auth/register", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status async def restore(self, email: str, password: str): data = json.dumps({ "secret": f"0 {password}", "deviceID": device.device_id, "email": email, "timestamp": int(timestamp() * 1000) }) async with self.session.post(f"{self.api}/g/s/account/delete-request/cancel", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status async def logout(self): data = json.dumps({ "deviceID": self.device_id, "clientType": 100, "timestamp": int(timestamp() * 1000) }) async with self.session.post(f"{self.api}/g/s/auth/logout", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: self.authenticated = False self.json = None self.sid = None self.userId = None self.account: None self.profile: None headers.sid = None await self.close() await self.session.close() return response.status async def configure(self, age: int, gender: str): if gender.lower() == "male": gender = 1 elif gender.lower() == "female": gender = 2 elif gender.lower() == "non-binary": gender = 255 else: raise exceptions.SpecifyType() if age <= 12: raise exceptions.AgeTooLow() data = json.dumps({ "age": age, "gender": gender, "timestamp": int(timestamp() * 1000) }) async with self.session.post(f"{self.api}/g/s/persona/profile/basic", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status async def verify(self, email: str, code: str): data = json.dumps({ "validationContext": { "type": 1, "identity": email, "data": {"code": code}}, "deviceID": device.device_id, "timestamp": int(timestamp() * 1000) }) async with self.session.post(f"{self.api}/g/s/auth/check-security-validation", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status async def request_verify_code(self, email: str, resetPassword: bool = False): data = { "identity": email, "type": 1, "deviceID": device.device_id } if resetPassword is True: data["level"] = 2 data["purpose"] = "reset-password" data = json.dumps(data) async with self.session.post(f"{self.api}/g/s/auth/request-security-validation", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status async def activate_account(self, email: str, code: str): data = json.dumps({ "type": 1, "identity": email, "data": {"code": code}, "deviceID": device.device_id }) async with self.session.post(f"{self.api}/g/s/auth/activate-email", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status async def delete_account(self, password: str): data = json.dumps({ "deviceID": device.device_id, "secret": f"0 {password}" }) async with self.session.post(f"{self.api}/g/s/account/delete-request", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status async def change_password(self, email: str, password: str, code: str): data = json.dumps({ "updateSecret": f"0 {password}", "emailValidationContext": { "data": { "code": code }, "type": 1, "identity": email, "level": 2, "deviceID": device.device_id }, "phoneNumberValidationContext": None, "deviceID": device.device_id }) async with self.session.post(f"{self.api}/g/s/auth/reset-password", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status async def check_device(self, deviceId: str): data = json.dumps({ "deviceID": deviceId, "bundleID": "com.narvii.amino.master", "clientType": 100, "timezone": -timezone // 1000, "systemPushEnabled": True, "locale": locale()[0], "timestamp": int(timestamp() * 1000) }) async with self.session.post(f"{self.api}/g/s/device", headers=self.parse_headers(data=data), data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status async def get_account_info(self): async with self.session.get(f"{self.api}/g/s/account", headers=self.parse_headers()) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return objects.UserProfile(json.loads(await response.text())["account"]).UserProfile async def upload_media(self, file: BinaryIO, fileType: str): if fileType == "audio": t = "audio/aac" elif fileType == "image": t = "image/jpg" else: raise exceptions.SpecifyType(fileType) data = file.read() async with self.session.post(f"{self.api}/g/s/media/upload", headers=headers.Headers(type=t, data=data, deviceId=self.device_id).headers, data=data) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return json.loads(await response.text())["mediaValue"] def handle_socket_message(self, data): return self.resolve(data) async def get_eventlog(self, language: str = "en"): async with self.session.get(f"{self.api}/g/s/eventlog/profile?language={language}", headers=self.parse_headers()) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return json.loads(await response.text()) async def sub_clients(self, start: int = 0, size: int = 25): if not self.authenticated: raise exceptions.NotLoggedIn() async with self.session.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers()) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return objects.CommunityList(json.loads(await response.text())["communityList"]).CommunityList async def sub_clients_profile(self, start: int = 0, size: int = 25): if not self.authenticated: raise exceptions.NotLoggedIn() async with self.session.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers()) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return json.loads(await response.text())["communityList"] async def get_user_info(self, userId: str): async with self.session.get(f"{self.api}/g/s/user-profile/{userId}", headers=self.parse_headers()) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return objects.UserProfile(json.loads(await response.text())["userProfile"]).UserProfile async def get_chat_threads(self, start: int = 0, size: int = 25): async with self.session.get(f"{self.api}/g/s/chat/thread?type=joined-me&start={start}&size={size}", headers=self.parse_headers()) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return objects.ThreadList(json.loads(await response.text())["threadList"]).ThreadList async def get_chat_thread(self, chatId: str): async with self.session.get(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers()) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return objects.Thread(json.loads(await response.text())["thread"]).Thread async def get_chat_users(self, chatId: str, start: int = 0, size: int = 25): async with self.session.get(f"{self.api}/g/s/chat/thread/{chatId}/member?start={start}&size={size}&type=default&cv=1.2", headers=self.parse_headers()) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return objects.UserProfileList(json.loads(await response.text())["memberList"]).UserProfileList async def join_chat(self, chatId: str): async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers()) as response: if response.status != 200: return exceptions.CheckException(json.loads(await response.text())) else: return response.status
MIT License
nastools/homeassistant
homeassistant/components/climate/eq3btsmart.py
EQ3BTSmartThermostat.set_temperature
python
def set_temperature(self, **kwargs): temperature = kwargs.get(ATTR_TEMPERATURE) if temperature is None: return self._thermostat.target_temperature = temperature
Set new target temperature.
https://github.com/nastools/homeassistant/blob/7ca1180bd42713f2d77bbc3f0b27b231ba8784aa/homeassistant/components/climate/eq3btsmart.py#L76-L81
import logging import voluptuous as vol from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA from homeassistant.const import ( CONF_MAC, TEMP_CELSIUS, CONF_DEVICES, ATTR_TEMPERATURE) from homeassistant.util.temperature import convert import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['bluepy_devices==0.2.0'] _LOGGER = logging.getLogger(__name__) ATTR_MODE = 'mode' ATTR_MODE_READABLE = 'mode_readable' DEVICE_SCHEMA = vol.Schema({ vol.Required(CONF_MAC): cv.string, }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_DEVICES): vol.Schema({cv.string: DEVICE_SCHEMA}), }) def setup_platform(hass, config, add_devices, discovery_info=None): devices = [] for name, device_cfg in config[CONF_DEVICES].items(): mac = device_cfg[CONF_MAC] devices.append(EQ3BTSmartThermostat(mac, name)) add_devices(devices) class EQ3BTSmartThermostat(ClimateDevice): def __init__(self, _mac, _name): from bluepy_devices.devices import eq3btsmart self._name = _name self._thermostat = eq3btsmart.EQ3BTSmartThermostat(_mac) @property def name(self): return self._name @property def temperature_unit(self): return TEMP_CELSIUS @property def current_temperature(self): return self.target_temperature @property def target_temperature(self): return self._thermostat.target_temperature
MIT License
bungnoid/gltools
tools/createAlongCurve.py
create
python
def create( curve, objType='locator', objCount=0, parent=False, useDistance=False, minPercent=0.0, maxPercent=1.0, spacing=1.0, prefix='', suffix='' ): if not glTools.utils.curve.isCurve(curve): raise Exception('Object "'+curve+'" is not a valid nurbs curve!') if not prefix: prefix = glTools.utils.stringUtils.stripSuffix(curve) if not suffix: suffix = objType if not objCount: objCount = mc.getAttr(curve+'.spans') + 1 paramList = glTools.utils.curve.sampleParam( curve = curve, samples = objCount, useDistance = useDistance, minPercent = minPercent, maxPercent = maxPercent, spacing = spacing ) objList = [] for i in range(objCount): ind = glTools.utils.stringUtils.alphaIndex(i) obj = createAtParam( curve, param = paramList[i], objType = objType, name = prefix+ind+'_'+suffix ) if parent and i: obj = mc.parent(obj,objList[-1])[0] objList.append(str(obj)) return objList
Create objects along a curve @param curve: Input nurbs curve curve @type curve: str @param objType: Type of objects to create and place along curve @type objType: str @param objCount: Number of objects to create along curve. If objCount=0, number of edit points will be used. @type objCount: int @param parent: Parent each new object to the previously created object. eg. Joint chain @type parent: bool @param useDistance: Use distance along curve instead of parametric length for sample distribution @type useDistance: bool @param minPercent: Percent along the curve to start placing objects @type minPercent: float @param maxPercent: Percent along the curve to stop placing objects @type maxPercent: float @param spacing: Incremental scale for each sample distance @type spacing: float @param prefix: Name prefix for builder created nodes. If left at default ("") prefix will be derived from curve name. @type prefix: str
https://github.com/bungnoid/gltools/blob/8ff0899de43784a18bd4543285655e68e28fb5e5/tools/createAlongCurve.py#L8-L77
import maya.cmds as mc import glTools.utils.base import glTools.utils.curve import glTools.utils.stringUtils import glTools.utils.transform
MIT License
hozn/stravalib
stravalib/client.py
Client.authorization_url
python
def authorization_url(self, client_id, redirect_uri, approval_prompt='auto', scope=None, state=None): return self.protocol.authorization_url(client_id=client_id, redirect_uri=redirect_uri, approval_prompt=approval_prompt, scope=scope, state=state)
Get the URL needed to authorize your application to access a Strava user's information. See https://developers.strava.com/docs/authentication/ :param client_id: The numeric developer client id. :type client_id: int :param redirect_uri: The URL that Strava will redirect to after successful (or failed) authorization. :type redirect_uri: str :param approval_prompt: Whether to prompt for approval even if approval already granted to app. Choices are 'auto' or 'force'. (Default is 'auto') :type approval_prompt: str :param scope: The access scope required. Omit to imply "read" and "activity:read" Valid values are 'read', 'read_all', 'profile:read_all', 'profile:write', 'profile:read_all', 'activity:read_all', 'activity:write'. :type scope: list[str] :param state: An arbitrary variable that will be returned to your application in the redirect URI. :type state: str :return: The URL to use for authorization link. :rtype: str
https://github.com/hozn/stravalib/blob/d3bc7b8094ce318e8dfd94bf1aeba3f421258412/stravalib/client.py#L82-L113
from __future__ import division, absolute_import, print_function, unicode_literals import logging import warnings import functools import time import collections import calendar from io import BytesIO from datetime import datetime, timedelta import arrow import pytz import six from units.quantity import Quantity from stravalib import model, exc from stravalib.protocol import ApiV3 from stravalib.util import limiter from stravalib import unithelper class Client(object): def __init__(self, access_token=None, rate_limit_requests=True, rate_limiter=None, requests_session=None): self.log = logging.getLogger('{0.__module__}.{0.__name__}'.format(self.__class__)) if rate_limit_requests: if not rate_limiter: rate_limiter = limiter.DefaultRateLimiter() elif rate_limiter: raise ValueError("Cannot specify rate_limiter object when rate_limit_requests is False") self.protocol = ApiV3(access_token=access_token, requests_session=requests_session, rate_limiter=rate_limiter) @property def access_token(self): return self.protocol.access_token @access_token.setter def access_token(self, v): self.protocol.access_token = v
Apache License 2.0
awslabs/mxboard
python/mxboard/writer.py
FileWriter.reopen
python
def reopen(self): self._event_writer.reopen()
Reopens the EventFileWriter. Can be called after `close()` to add more events in the same directory. The events will go into a new events file. Does nothing if the EventFileWriter was not closed.
https://github.com/awslabs/mxboard/blob/432d4df2489ecf6dbb251d7f96f1ccadb368997a/python/mxboard/writer.py#L178-L184
from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import json import os import logging from .proto import event_pb2 from .proto import summary_pb2 from .event_file_writer import EventFileWriter from .summary import scalar_summary, histogram_summary, image_summary, audio_summary from .summary import text_summary, pr_curve_summary, _net2pb from .utils import _save_embedding_tsv, _make_sprite_image, _make_metadata_tsv from .utils import _add_embedding_config, _make_numpy_array, _get_embedding_dir from .utils import _is_2D_matrix class SummaryToEventTransformer(object): def __init__(self, event_writer): self._event_writer = event_writer self._seen_summary_tags = set() def add_summary(self, summary, global_step=None): if isinstance(summary, bytes): summ = summary_pb2.Summary() summ.ParseFromString(summary) summary = summ for value in summary.value: if not value.metadata: continue if value.tag in self._seen_summary_tags: value.ClearField("metadata") continue self._seen_summary_tags.add(value.tag) event = event_pb2.Event(summary=summary) self._add_event(event, global_step) def add_graph(self, graph): event = event_pb2.Event(graph_def=graph.SerializeToString()) self._add_event(event, None) def _add_event(self, event, step): event.wall_time = time.time() if step is not None: event.step = int(step) self._event_writer.add_event(event) class FileWriter(SummaryToEventTransformer): def __init__(self, logdir, max_queue=10, flush_secs=120, filename_suffix=None, verbose=True): event_writer = EventFileWriter(logdir, max_queue, flush_secs, filename_suffix, verbose) super(FileWriter, self).__init__(event_writer) def __enter__(self): return self def __exit__(self, unused_type, unused_value, unused_traceback): self.close() def get_logdir(self): return self._event_writer.get_logdir() def add_event(self, event): self._event_writer.add_event(event) def flush(self): self._event_writer.flush() def close(self): self._event_writer.close()
Apache License 2.0
numirias/qtile-plasma
plasma/node.py
Node.flip_with
python
def flip_with(self, node, reverse=False): container = Node() self.parent.replace_child(self, container) self.reset_size() for child in [node, self] if reverse else [self, node]: container.add_child(child)
Join with node in a new, orthogonal container.
https://github.com/numirias/qtile-plasma/blob/4b57f313ed6948212582de05205a3ae4372ed812/plasma/node.py#L522-L528
from collections import namedtuple import time from math import isclose import sys if sys.version_info >= (3, 6): from enum import Enum, Flag, auto else: from .enum import Enum, Flag, auto Point = namedtuple('Point', 'x y') Dimensions = namedtuple('Dimensions', 'x y width height') class Orient(Flag): HORIZONTAL = 0 VERTICAL = 1 HORIZONTAL, VERTICAL = Orient class Direction(Enum): UP = auto() DOWN = auto() LEFT = auto() RIGHT = auto() @property def orient(self): return HORIZONTAL if self in [self.LEFT, self.RIGHT] else VERTICAL @property def offset(self): return 1 if self in [self.RIGHT, self.DOWN] else -1 UP, DOWN, LEFT, RIGHT = Direction class AddMode(Flag): HORIZONTAL = 0 VERTICAL = 1 SPLIT = auto() @property def orient(self): return VERTICAL if self & self.VERTICAL else HORIZONTAL border_check = { UP: lambda a, b: isclose(a.y, b.y_end), DOWN: lambda a, b: isclose(a.y_end, b.y), LEFT: lambda a, b: isclose(a.x, b.x_end), RIGHT: lambda a, b: isclose(a.x_end, b.x), } class NotRestorableError(Exception): pass class Node: min_size_default = 100 root_orient = HORIZONTAL def __init__(self, payload=None, x=None, y=None, width=None, height=None): self.payload = payload self._x = x self._y = y self._width = width self._height = height self._size = None self.children = [] self.last_accessed = 0 self.parent = None self.restorables = {} def __repr__(self): info = self.payload or '' if self: info += ' +%d' % len(self) return '<Node %s %x>' % (info, id(self)) def __contains__(self, node): if node is self: return True for child in self: if node in child: return True return False def __iter__(self): yield from self.children def __getitem__(self, key): return self.children[key] def __setitem__(self, key, value): self.children[key] = value def __len__(self): return len(self.children) @property def root(self): try: return self.parent.root except AttributeError: return self @property def is_root(self): return self.parent is None @property def is_leaf(self): return not self @property def index(self): return self.parent.children.index(self) @property def tree(self): return [c.tree if c else c for c in self] @property def siblings(self): return [c for c in self.parent if c is not self] @property def first_leaf(self): if self.is_leaf: return self return self[0].first_leaf @property def last_leaf(self): if self.is_leaf: return self return self[-1].last_leaf @property def recent_leaf(self): if self.is_leaf: return self return max(self, key=lambda n: n.last_accessed).recent_leaf @property def prev_leaf(self): if self.is_root: return self.last_leaf idx = self.index - 1 if idx < 0: return self.parent.prev_leaf return self.parent[idx].last_leaf @property def next_leaf(self): if self.is_root: return self.first_leaf idx = self.index + 1 if idx >= len(self.parent): return self.parent.next_leaf return self.parent[idx].first_leaf @property def all_leafs(self): if self.is_leaf: yield self for child in self: yield from child.all_leafs @property def orient(self): if self.is_root: return self.root_orient return ~self.parent.orient @property def horizontal(self): return self.orient is HORIZONTAL @property def vertical(self): return self.orient is VERTICAL @property def x(self): if self.is_root: return self._x if self.horizontal: return self.parent.x return self.parent.x + self.size_offset @x.setter def x(self, val): if not self.is_root: return self._x = val @property def y(self): if self.is_root: return self._y if self.vertical: return self.parent.y return self.parent.y + self.size_offset @y.setter def y(self, val): if not self.is_root: return self._y = val @property def pos(self): return Point(self.x, self.y) @property def width(self): if self.is_root: return self._width if self.horizontal: return self.parent.width return self.size @width.setter def width(self, val): if self.is_root: self._width = val elif self.horizontal: self.parent.size = val else: self.size = val @property def height(self): if self.is_root: return self._height if self.vertical: return self.parent.height return self.size @height.setter def height(self, val): if self.is_root: self._height = val elif self.vertical: self.parent.size = val else: self.size = val @property def x_end(self): return self.x + self.width @property def y_end(self): return self.y + self.height @property def x_center(self): return self.x + self.width / 2 @property def y_center(self): return self.y + self.height / 2 @property def center(self): return Point(self.x_center, self.y_center) @property def top_left(self): return Point(self.x, self.y) @property def top_right(self): return Point(self.x + self.width, self.y) @property def bottom_left(self): return Point(self.x, self.y + self.height) @property def bottom_right(self): return Point(self.x + self.width, self.y + self.height) @property def pixel_perfect(self): x, y, width, height = self.x, self.y, self.width, self.height threshold = 0.99999 if (x - int(x)) + (width - int(width)) > threshold: width += 1 if (y - int(y)) + (height - int(height)) > threshold: height += 1 return Dimensions(*map(int, (x, y, width, height))) @property def capacity(self): return self.width if self.horizontal else self.height @property def size(self): if self.is_root: return None if self.fixed: return self._size if self.flexible: taken = sum(n.size for n in self.siblings if not n.flexible) flexibles = [n for n in self.parent if n.flexible] return (self.parent.capacity - taken) / len(flexibles) return max(sum(gc.min_size for gc in c) for c in self) @size.setter def size(self, val): if self.is_root or not self.siblings: return if val is None: self.reset_size() return occupied = sum(s.min_size_bound for s in self.siblings) val = max(min(val, self.parent.capacity - occupied), self.min_size_bound) self.force_size(val) def force_size(self, val): Node.fit_into(self.siblings, self.parent.capacity - val) if val == 0: return if self: Node.fit_into([self], val) self._size = val @property def size_offset(self): return sum(c.size for c in self.parent[:self.index]) @staticmethod def fit_into(nodes, space): if not nodes: return occupied = sum(n.min_size for n in nodes) if space >= occupied and any(n.flexible for n in nodes): return nodes_left = nodes[:] space_left = space if space < occupied: for node in nodes: if node.min_size_bound != node.min_size: continue space_left -= node.min_size nodes_left.remove(node) if not nodes_left: return factor = space_left / sum(n.size for n in nodes_left) for node in nodes_left: new_size = node.size * factor if node.fixed: node._size = new_size for child in node: Node.fit_into(child, new_size) @property def fixed(self): return self._size is not None @property def min_size(self): if self.fixed: return self._size if self.is_leaf: return self.min_size_default size = max(sum(gc.min_size for gc in c) for c in self) return max(size, self.min_size_default) @property def min_size_bound(self): if self.is_leaf: return self.min_size_default return max(sum(gc.min_size_bound for gc in c) or self.min_size_default for c in self) def reset_size(self): self._size = None @property def flexible(self): if self.fixed: return False return all((any(gc.flexible for gc in c) or c.is_leaf) for c in self) def access(self): self.last_accessed = time.time() try: self.parent.access() except AttributeError: pass def neighbor(self, direction): if self.is_root: return None if direction.orient is self.parent.orient: target_idx = self.index + direction.offset if 0 <= target_idx < len(self.parent): return self.parent[target_idx].recent_leaf if self.parent.is_root: return None return self.parent.parent.neighbor(direction) return self.parent.neighbor(direction) @property def up(self): return self.neighbor(UP) @property def down(self): return self.neighbor(DOWN) @property def left(self): return self.neighbor(LEFT) @property def right(self): return self.neighbor(RIGHT) def common_border(self, node, direction): if not border_check[direction](self, node): return False if direction in [UP, DOWN]: detached = node.x >= self.x_end or node.x_end <= self.x else: detached = node.y >= self.y_end or node.y_end <= self.y return not detached def close_neighbor(self, direction): nodes = [n for n in self.root.all_leafs if self.common_border(n, direction)] if not nodes: return None most_recent = max(nodes, key=lambda n: n.last_accessed) if most_recent.last_accessed > 0: return most_recent if direction in [UP, DOWN]: match = lambda n: n.x <= self.x_center <= n.x_end else: match = lambda n: n.y <= self.y_center <= n.y_end return next(n for n in nodes if match(n)) @property def close_up(self): return self.close_neighbor(UP) @property def close_down(self): return self.close_neighbor(DOWN) @property def close_left(self): return self.close_neighbor(LEFT) @property def close_right(self): return self.close_neighbor(RIGHT) def add_child(self, node, idx=None): if idx is None: idx = len(self) self.children.insert(idx, node) node.parent = self if len(self) == 1: return total = self.capacity Node.fit_into(node.siblings, total - (total / len(self))) def add_child_after(self, new, old): self.add_child(new, idx=old.index+1) def remove_child(self, node): node._save_restore_state() node.force_size(0) self.children.remove(node) if len(self) == 1: child = self[0] if self.is_root: child.reset_size() else: self.parent.replace_child(self, child) Node.fit_into(child, self.capacity) def remove(self): self.parent.remove_child(self) def replace_child(self, old, new): self[old.index] = new new.parent = self new._size = old._size
MIT License
yfauser/planespotter
app-server/app/lib/python2.7/site-packages/sqlalchemy/engine/base.py
Connection._root
python
def _root(self): if self.__branch_from: return self.__branch_from else: return self
return the 'root' connection. Returns 'self' if this connection is not a branch, else returns the root connection from which we ultimately branched.
https://github.com/yfauser/planespotter/blob/d400216502b6b5592a4889eb9fa277b2ddb75f9b/app-server/app/lib/python2.7/site-packages/sqlalchemy/engine/base.py#L140-L151
from __future__ import with_statement import sys from .. import exc, util, log, interfaces from ..sql import util as sql_util from ..sql import schema from .interfaces import Connectable, ExceptionContext from .util import _distill_params import contextlib class Connection(Connectable): schema_for_object = schema._schema_getter(None) def __init__(self, engine, connection=None, close_with_result=False, _branch_from=None, _execution_options=None, _dispatch=None, _has_events=None): self.engine = engine self.dialect = engine.dialect self.__branch_from = _branch_from self.__branch = _branch_from is not None if _branch_from: self.__connection = connection self._execution_options = _execution_options self._echo = _branch_from._echo self.should_close_with_result = False self.dispatch = _dispatch self._has_events = _branch_from._has_events self.schema_for_object = _branch_from.schema_for_object else: self.__connection = connection if connection is not None else engine.raw_connection() self.__transaction = None self.__savepoint_seq = 0 self.should_close_with_result = close_with_result self.__invalid = False self.__can_reconnect = True self._echo = self.engine._should_log_info() if _has_events is None: self.dispatch = self.dispatch._join(engine.dispatch) self._has_events = _has_events or ( _has_events is None and engine._has_events) assert not _execution_options self._execution_options = engine._execution_options if self._has_events or self.engine._has_events: self.dispatch.engine_connect(self, self.__branch) def _branch(self): if self.__branch_from: return self.__branch_from._branch() else: return self.engine._connection_cls( self.engine, self.__connection, _branch_from=self, _execution_options=self._execution_options, _has_events=self._has_events, _dispatch=self.dispatch) @property
MIT License
pysteps/pysteps
pysteps/utils/transformation.py
boxcox_transform
python
def boxcox_transform( R, metadata=None, Lambda=None, threshold=None, zerovalue=None, inverse=False ): R = R.copy() if metadata is None: if inverse: metadata = {"transform": "BoxCox"} else: metadata = {"transform": None} else: metadata = metadata.copy() if not inverse: if metadata["transform"] == "BoxCox": return R, metadata if Lambda is None: Lambda = metadata.get("BoxCox_lambda", 0.0) if threshold is None: threshold = metadata.get("threshold", 0.1) zeros = R < threshold if Lambda == 0.0: R[~zeros] = np.log(R[~zeros]) threshold = np.log(threshold) else: R[~zeros] = (R[~zeros] ** Lambda - 1) / Lambda threshold = (threshold ** Lambda - 1) / Lambda if zerovalue is None: zerovalue = threshold - 1 R[zeros] = zerovalue metadata["transform"] = "BoxCox" metadata["BoxCox_lambda"] = Lambda metadata["zerovalue"] = zerovalue metadata["threshold"] = threshold elif inverse: if metadata["transform"] not in ["BoxCox", "log"]: return R, metadata if Lambda is None: Lambda = metadata.pop("BoxCox_lambda", 0.0) if threshold is None: threshold = metadata.get("threshold", -10.0) if zerovalue is None: zerovalue = 0.0 if Lambda == 0.0: R = np.exp(R) threshold = np.exp(threshold) else: R = np.exp(np.log(Lambda * R + 1) / Lambda) threshold = np.exp(np.log(Lambda * threshold + 1) / Lambda) R[R < threshold] = zerovalue metadata["transform"] = None metadata["zerovalue"] = zerovalue metadata["threshold"] = threshold return R, metadata
The one-parameter Box-Cox transformation. The Box-Cox transform is a well-known power transformation introduced by Box and Cox (1964). In its one-parameter version, the Box-Cox transform takes the form T(x) = ln(x) for Lambda = 0, or T(x) = (x**Lambda - 1)/Lambda otherwise. Default parameters will produce a log transform (i.e. Lambda=0). Parameters ---------- R: array-like Array of any shape to be transformed. metadata: dict, optional Metadata dictionary containing the transform, zerovalue and threshold attributes as described in the documentation of :py:mod:`pysteps.io.importers`. Lambda: float, optional Parameter Lambda of the Box-Cox transformation. It is 0 by default, which produces the log transformation. Choose Lambda < 1 for positively skewed data, Lambda > 1 for negatively skewed data. threshold: float, optional The value that is used for thresholding with the same units as R. If None, the threshold contained in metadata is used. If no threshold is found in the metadata, a value of 0.1 is used as default. zerovalue: float, optional The value to be assigned to no rain pixels as defined by the threshold. It is equal to the threshold - 1 by default. inverse: bool, optional If set to True, it performs the inverse transform. False by default. Returns ------- R: array-like Array of any shape containing the (back-)transformed units. metadata: dict The metadata with updated attributes. References ---------- Box, G. E. and Cox, D. R. (1964), An Analysis of Transformations. Journal of the Royal Statistical Society: Series B (Methodological), 26: 211-243. doi:10.1111/j.2517-6161.1964.tb00553.x
https://github.com/pysteps/pysteps/blob/03f2c88453ef82adcd5b879b07320b441758a170/pysteps/utils/transformation.py#L27-L148
import numpy as np import scipy.stats as scipy_stats import warnings from scipy.interpolate import interp1d warnings.filterwarnings( "ignore", category=RuntimeWarning )
BSD 3-Clause New or Revised License
yfauser/planespotter
app-server/app/lib/python2.7/site-packages/sqlalchemy/ext/hybrid.py
hybrid_property.setter
python
def setter(self, fset): return self._copy(fset=fset)
Provide a modifying decorator that defines a setter method.
https://github.com/yfauser/planespotter/blob/d400216502b6b5592a4889eb9fa277b2ddb75f9b/app-server/app/lib/python2.7/site-packages/sqlalchemy/ext/hybrid.py#L934-L937
from .. import util from ..orm import attributes, interfaces HYBRID_METHOD = util.symbol('HYBRID_METHOD') HYBRID_PROPERTY = util.symbol('HYBRID_PROPERTY') class hybrid_method(interfaces.InspectionAttrInfo): is_attribute = True extension_type = HYBRID_METHOD def __init__(self, func, expr=None): self.func = func self.expression(expr or func) def __get__(self, instance, owner): if instance is None: return self.expr.__get__(owner, owner.__class__) else: return self.func.__get__(instance, owner) def expression(self, expr): self.expr = expr if not self.expr.__doc__: self.expr.__doc__ = self.func.__doc__ return self class hybrid_property(interfaces.InspectionAttrInfo): is_attribute = True extension_type = HYBRID_PROPERTY def __init__( self, fget, fset=None, fdel=None, expr=None, custom_comparator=None, update_expr=None): self.fget = fget self.fset = fset self.fdel = fdel self.expr = expr self.custom_comparator = custom_comparator self.update_expr = update_expr util.update_wrapper(self, fget) def __get__(self, instance, owner): if instance is None: return self._expr_comparator(owner) else: return self.fget(instance) def __set__(self, instance, value): if self.fset is None: raise AttributeError("can't set attribute") self.fset(instance, value) def __delete__(self, instance): if self.fdel is None: raise AttributeError("can't delete attribute") self.fdel(instance) def _copy(self, **kw): defaults = { key: value for key, value in self.__dict__.items() if not key.startswith("_")} defaults.update(**kw) return type(self)(**defaults) @property def overrides(self): return self def getter(self, fget): return self._copy(fget=fget)
MIT License
haowen-xu/tfsnippet
tfsnippet/utils/archive_file.py
normalize_archive_entry_name
python
def normalize_archive_entry_name(name): return name.replace('\\', '/')
Get the normalized name of an archive file entry. Args: name (str): Name of the archive file entry. Returns: str: The normalized name.
https://github.com/haowen-xu/tfsnippet/blob/63adaf04d2ffff8dec299623627d55d4bacac598/tfsnippet/utils/archive_file.py#L21-L30
import sys import tarfile import zipfile try: import rarfile except ImportError: rarfile = None __all__ = ['Extractor', 'TarExtractor', 'ZipExtractor', 'RarExtractor'] TAR_FILE_EXTENSIONS = ('.tar', '.tar.gz', '.tgz', '.tar.bz2', '.tbz', '.tbz2', '.tb2') if sys.version_info[:2] >= (3, 3): TAR_FILE_EXTENSIONS = TAR_FILE_EXTENSIONS + ('.tar.xz', '.txz')
MIT License
rlworkgroup/garage
src/garage/tf/samplers/worker.py
TFWorkerWrapper.env
python
def env(self): return self._inner_worker.env
Environment: Worker's environment.
https://github.com/rlworkgroup/garage/blob/3a578852c392cecde5b7c9786aa182d74f6df1d4/src/garage/tf/samplers/worker.py#L79-L81
import tensorflow as tf from garage.sampler.worker import Worker class TFWorkerClassWrapper: def __init__(self, wrapped_class): self._wrapped_class = wrapped_class def __call__(self, *args, **kwargs): wrapper = TFWorkerWrapper() wrapper._inner_worker = self._wrapped_class(*args, **kwargs) return wrapper class TFWorkerWrapper(Worker): def __init__(self): self._inner_worker = None self._sess = None self._sess_entered = None self.worker_init() def worker_init(self): self._sess = tf.compat.v1.get_default_session() if not self._sess: self._sess = tf.compat.v1.Session() self._sess_entered = True self._sess.__enter__() def shutdown(self): self._inner_worker.shutdown() if tf.compat.v1.get_default_session() and self._sess_entered: self._sess_entered = False self._sess.__exit__(None, None, None) @property def agent(self): return self._inner_worker.agent @agent.setter def agent(self, agent): self._inner_worker.agent = agent @property
MIT License
quantopian/zipline
zipline/finance/position.py
Position.handle_split
python
def handle_split(self, asset, ratio): if self.asset != asset: raise Exception("updating split with the wrong asset!") raw_share_count = self.amount / float(ratio) full_share_count = np.floor(raw_share_count) fractional_share_count = raw_share_count - full_share_count new_cost_basis = round(self.cost_basis * ratio, 2) self.cost_basis = new_cost_basis self.amount = full_share_count return_cash = round(float(fractional_share_count * new_cost_basis), 2) log.info("after split: " + str(self)) log.info("returning cash: " + str(return_cash)) return return_cash
Update the position by the split ratio, and return the resulting fractional share that will be converted into cash. Returns the unused cash.
https://github.com/quantopian/zipline/blob/014f1fc339dc8b7671d29be2d85ce57d3daec343/zipline/finance/position.py#L91-L129
from __future__ import division from math import copysign import numpy as np import logbook from zipline.assets import Future import zipline.protocol as zp log = logbook.Logger('Performance') class Position(object): __slots__ = 'inner_position', 'protocol_position' def __init__(self, asset, amount=0, cost_basis=0.0, last_sale_price=0.0, last_sale_date=None): inner = zp.InnerPosition( asset=asset, amount=amount, cost_basis=cost_basis, last_sale_price=last_sale_price, last_sale_date=last_sale_date, ) object.__setattr__(self, 'inner_position', inner) object.__setattr__(self, 'protocol_position', zp.Position(inner)) def __getattr__(self, attr): return getattr(self.inner_position, attr) def __setattr__(self, attr, value): setattr(self.inner_position, attr, value) def earn_dividend(self, dividend): return { 'amount': self.amount * dividend.amount } def earn_stock_dividend(self, stock_dividend): return { 'payment_asset': stock_dividend.payment_asset, 'share_count': np.floor( self.amount * float(stock_dividend.ratio) ) }
Apache License 2.0
deepmind/fancyflags
fancyflags/_auto.py
auto
python
def auto(callable_fn: Callable[..., Any]) -> Mapping[str, _definitions.Item]: if not callable(callable_fn): raise TypeError(f"Not a callable: {callable_fn}.") ff_dict = {} if isinstance(callable_fn, type): signature = _get_typed_signature(callable_fn.__init__) unused_self, *parameters = signature.parameters.values() else: signature = _get_typed_signature(callable_fn) parameters = signature.parameters.values() for param in parameters: if param.annotation is inspect.Signature.empty: raise TypeError(_MISSING_TYPE_ANNOTATION.format(name=param.name)) try: ff_type = _TYPE_MAP[param.annotation] except KeyError: raise TypeError(_UNSUPPORTED_ARGUMENT_TYPE.format( name=param.name, annotation=param.annotation)) if param.default is inspect.Signature.empty: raise ValueError(_MISSING_DEFAULT_VALUE.format(name=param.name)) help_string = param.name ff_dict[param.name] = ff_type(param.default, help_string) return ff_dict
Automatically builds fancyflag definitions from a callable's signature. Example usage: ```python # Function ff.DEFINE_dict('my_function_settings', **ff.auto(my_module.my_function)) # Class constructor ff.DEFINE_dict('my_class_settings', **ff.auto(my_module.MyClass)) ``` Args: callable_fn: Generates flag definitions from this callable's signature. All arguments must have type annotations and default values. The following argument types are supported: * `bool`, `float`, `int`, or `str` scalars * Homogeneous sequences of these types * Optional scalars or sequences of these types Returns: Mapping from parameter names to fancyflags `Item`s, to be splatted into `ff.DEFINE_dict`. Raises: ValueError: If any of the arguments to `callable_fn` lacks a default value. TypeError: If any of the arguments to `callable_fn` lacks a type annotation. TypeError: If any of the arguments to `callable_fn` has an unsupported type. TypeError: If `callable_fn` is not callable.
https://github.com/deepmind/fancyflags/blob/2e13d9818fb41dbb4476c4ebbcfe5f5a35643ef0/fancyflags/_auto.py#L84-L145
import inspect import typing from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple from fancyflags import _definitions _TYPE_MAP = { List[bool]: _definitions.Sequence, List[float]: _definitions.Sequence, List[int]: _definitions.Sequence, List[str]: _definitions.Sequence, Sequence[bool]: _definitions.Sequence, Sequence[float]: _definitions.Sequence, Sequence[int]: _definitions.Sequence, Sequence[str]: _definitions.Sequence, Tuple[bool]: _definitions.Sequence, Tuple[float]: _definitions.Sequence, Tuple[int]: _definitions.Sequence, Tuple[str]: _definitions.Sequence, bool: _definitions.Boolean, float: _definitions.Float, int: _definitions.Integer, str: _definitions.String, } _TYPE_MAP.update({Optional[tp]: parser for tp, parser in _TYPE_MAP.items()}) _MISSING_TYPE_ANNOTATION = "Missing type annotation for argument {name!r}" _UNSUPPORTED_ARGUMENT_TYPE = ( "No matching flag type for argument {{name!r}} with type annotation: " "{{annotation}}\n" "Supported types:\n{}".format("\n".join(str(t) for t in _TYPE_MAP))) _MISSING_DEFAULT_VALUE = "Missing default value for argument {name!r}" def _get_typed_signature(fn: Callable[..., Any]) -> inspect.Signature: type_hints = typing.get_type_hints(fn) or {} orig_signature = inspect.signature(fn) new_params = [] for key, orig_param in orig_signature.parameters.items(): new_params.append( inspect.Parameter( name=key, default=orig_param.default, annotation=type_hints.get(key, orig_param.annotation), kind=orig_param.kind, )) return orig_signature.replace(parameters=new_params)
Apache License 2.0
br-idl/paddlevit
image_classification/MobileViT/utils.py
AverageMeter.update
python
def update(self, val, n=1): self.sum += val * n self.cnt += n self.avg = self.sum / self.cnt
update avg by val and n, where val is the avg of n values
https://github.com/br-idl/paddlevit/blob/1f02492bdb1ec1b4452c098ad50016c9ab6f2e31/image_classification/MobileViT/utils.py#L40-L44
import math from paddle.optimizer.lr import LRScheduler class AverageMeter(): def __init__(self): self.avg = 0 self.sum = 0 self.cnt = 0 self.reset() def reset(self): self.avg = 0 self.sum = 0 self.cnt = 0
Apache License 2.0
zxdavb/ramses_rf
ramses_rf/protocol/address.py
Address.__init__
python
def __init__(self, id) -> None: self.id = id self.type = id[:2] self._hex_id = None if not self.is_valid(id): raise ValueError(f"Invalid device_id: {id}")
Create an address from a valid device id.
https://github.com/zxdavb/ramses_rf/blob/dd7233abde1c3b1e645cfabcf3ccc96d0d2c381b/ramses_rf/protocol/address.py#L34-L42
import re from functools import lru_cache from typing import List, Tuple from .const import ( DEVICE_LOOKUP, DEVICE_TYPES, HGI_DEVICE_ID, NON_DEVICE_ID, NUL_DEVICE_ID, __dev_mode__, ) from .exceptions import InvalidAddrSetError DEV_MODE = __dev_mode__ and False __device_id_regex__ = re.compile(r"^(-{2}:-{6}|\d{2}:\d{6})$") class Address: DEVICE_ID_REGEX = __device_id_regex__
MIT License
natenka/q_and_a
code/02_explore_network_map/solution_2_ip_and_hostname.py
cli
python
def cli(start): common_params = { "auth_password": "cisco", "auth_secondary": "cisco", "auth_strict_key": False, "auth_username": "cisco", "platform": "cisco_iosxe", "timeout_socket": 5, "timeout_transport": 10, } topology = explore_topology(start, params=common_params) pprint(topology)
Run CDP network explorer. Enter START IP address. Example: \b python solution_2_ip_and_hostname.py 192.168.100.1
https://github.com/natenka/q_and_a/blob/b84ee77db631a0469795d3d9ab636f0550fed2d0/code/02_explore_network_map/solution_2_ip_and_hostname.py#L73-L92
import re from pprint import pprint import click from scrapli import Scrapli from scrapli.exceptions import ScrapliException def connect_ssh(params, command, verbose=True): if verbose: print("Connect...", params["host"]) try: with Scrapli(**params) as ssh: prompt = ssh.get_prompt() reply = ssh.send_command(command) output = reply.result hostname = re.search(r"(\S+)[>#]", prompt).group(1) return hostname, output except ScrapliException as error: print(error) def parse_cdp(output): regex = ( r"Device ID: (?P<host>\w+)\." r".*?" r"IP address: (?P<ip>\S+)\n" r".*?" r"Interface: (?P<local_port>\S+), +" r"Port ID \(outgoing port\): (?P<remote_port>\S+)" ) neighbors = {} match_iter = re.finditer(regex, output, re.DOTALL) for match in match_iter: groupdict = match.groupdict() hostname = groupdict.pop("host") neighbors[hostname] = groupdict return neighbors def explore_topology(start_device_ip, params): visited_hostnames = set() visited_ipadresses = set() topology = {} todo = [] todo.append(start_device_ip) while len(todo) > 0: current_ip = todo.pop(0) params["host"] = current_ip result = connect_ssh(params, "sh cdp neig det") if not result: continue current_host, sh_cdp_neighbors_output = result neighbors = parse_cdp(sh_cdp_neighbors_output) topology[current_host] = neighbors visited_ipadresses.add(current_ip) visited_hostnames.add(current_host) for neighbor, n_data in neighbors.items(): neighbor_ip = n_data["ip"] if neighbor not in visited_hostnames | visited_ipadresses | set(todo): todo.append(neighbor_ip) return topology @click.command() @click.argument("start")
MIT License
yahoo/panoptes
yahoo_panoptes/framework/utilities/helpers.py
inspect_calling_module_for_name
python
def inspect_calling_module_for_name(name): if is_python_2(): return False for frame in inspect.stack(): if hasattr(frame, 'filename'): if name in frame.filename: return True return False
Python 3 only! Inspects the stack to check if `name` is in the filename of a frame Returns: bool: True if the `name` is in the filename of a frame.
https://github.com/yahoo/panoptes/blob/e9767752b15cc970eba8fa5ec55e41e45e74e4f0/yahoo_panoptes/framework/utilities/helpers.py#L229-L244
from __future__ import division from future import standard_library from builtins import hex from builtins import range from past.utils import old_div import ctypes import inspect import logging import os import platform import threading import uuid import gevent import ipaddress from _socket import gaierror, herror try: from cStringIO import StringIO except ImportError: from io import StringIO from yahoo_panoptes.framework.exceptions import PanoptesBaseException from yahoo_panoptes.framework import validators from configobj import ConfigObj, ConfigObjError, flatten_errors from validate import Validator from gevent import socket from gevent.util import wrap_errors standard_library.install_aliases() import re import sys LOG = logging.getLogger(__name__) def is_python_2(): return sys.version_info[0] == 2 def normalize_plugin_name(plugin_name): assert validators.PanoptesValidators.valid_nonempty_string(plugin_name), u'plugin_name must be a non-empty str' temp_plugin_name = plugin_name.replace(u'_', u'__') normalized_plugin_name = re.sub(r'[^A-Za-z0-9_]', u'_', temp_plugin_name) return normalized_plugin_name def get_module_mtime(module_path): mtime = 0 assert validators.PanoptesValidators.valid_nonempty_string(module_path), u'module_path must be a non-empty str' if os.path.isdir(module_path): for f in os.listdir(module_path): f_time = int(os.path.getmtime(module_path + u'/' + f)) mtime = f_time if f_time > mtime else mtime elif os.path.isfile(module_path + u'.py'): mtime = int(os.path.getmtime(module_path + u'.py')) return mtime def resolve_hostnames(hostnames, timeout): assert validators.PanoptesValidators.valid_nonempty_iterable_of_strings(hostnames), u'hostnames should be a list' assert validators.PanoptesValidators.valid_nonzero_integer(timeout), u'timeout should be an int greater than zero' jobs = [gevent.spawn(wrap_errors(gaierror, socket.gethostbyname), host) for host in hostnames] gevent.joinall(jobs, timeout=timeout) addresses = [job.value if not isinstance(job.get(), gaierror) else None for job in jobs] results = [(hostnames[i], result) for i, result in enumerate(addresses)] return results def unknown_hostname(ip): return u'unknown-' + re.sub(r'[.:]', u'-', ip) def get_hostnames(ips, timeout): assert validators.PanoptesValidators.valid_nonempty_iterable_of_strings(ips), u'ips should be a list' assert validators.PanoptesValidators.valid_nonzero_integer(timeout), u'timeout should be an int greater than zero' jobs = [gevent.spawn(wrap_errors((gaierror, herror), socket.gethostbyaddr), ip) for ip in ips] gevent.joinall(jobs, timeout=timeout) hostnames = [None if isinstance(job.get(), (gaierror, herror)) else job.value for job in jobs] results = { ips[i]: unknown_hostname(ips[i]) if ((not result) or (not result[0]) or result[0].startswith(u'UNKNOWN')) else result[0] for i, result in enumerate(hostnames)} return results def get_ip_version(ip): try: socket.inet_aton(ip) return 4 except socket.error: pass try: socket.inet_pton(socket.AF_INET6, ip) return 6 except socket.error: pass raise ValueError(ip) def get_hostname(): return str(platform.node()) def get_os_tid(): if sys.platform.startswith(u'linux'): return ctypes.CDLL(u'libc.so.6').syscall(186) else: if is_python_2(): return threading._get_ident() else: return threading.get_ident() def get_calling_module_name(depth=3): assert isinstance(depth, int), u'depth should be an int' frame = inspect.stack()[depth] LOG.debug(u'Got calling frame %r', frame) module = inspect.getmodule(frame[0]) if module: return module.__name__
Apache License 2.0
nuagenetworks/vspk-python
vspk/v5_0/nul4service.py
NUL4Service.description
python
def description(self, value): self._description = value
Set description value. Notes: Description of the service
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v5_0/nul4service.py#L248-L256
from .fetchers import NUL4ServiceGroupsFetcher from bambou import NURESTObject class NUL4Service(NURESTObject): __rest_name__ = "l4service" __resource_name__ = "l4services" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" def __init__(self, **kwargs): super(NUL4Service, self).__init__() self._icmp_code = None self._icmp_type = None self._name = None self._last_updated_by = None self._default_service = None self._description = None self._entity_scope = None self._ports = None self._protocol = None self._external_id = None self.expose_attribute(local_name="icmp_code", remote_name="ICMPCode", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="icmp_type", remote_name="ICMPType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=True) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="default_service", remote_name="defaultService", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="ports", remote_name="ports", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="protocol", remote_name="protocol", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.l4_service_groups = NUL4ServiceGroupsFetcher.fetcher_with_object(parent_object=self, relationship="member") self._compute_args(**kwargs) @property def icmp_code(self): return self._icmp_code @icmp_code.setter def icmp_code(self, value): self._icmp_code = value @property def icmp_type(self): return self._icmp_type @icmp_type.setter def icmp_type(self, value): self._icmp_type = value @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def last_updated_by(self): return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): self._last_updated_by = value @property def default_service(self): return self._default_service @default_service.setter def default_service(self, value): self._default_service = value @property def description(self): return self._description @description.setter
BSD 3-Clause New or Revised License
theroyakash/akdsframework
AKDSFramework/structure/linkedlist.py
SinglyLinkedList.add
python
def add(self, value, at_end=True, position=0): new = Node(value) if at_end: position = self.size else: position = position if not 0 <= position <= self.size: raise IndexError('Directed position out of bounds') elif position == 0: new.next = self.head self.head = new self.size += 1 else: temp = self.head for _ in range(position - 1): temp = temp.next new.next = temp.next temp.next = new self.size += 1
Adds any element to the linked list Args: - value (Any): Put in the value you want to add. - at_end (bool): If you want to add to the end of the list leave this blank. Defaults to adding at the end. - position (int): If you choose ``at_end`` = False then add the position where you want to add new value.
https://github.com/theroyakash/akdsframework/blob/3c98792007389658c9ea9966c2263d6a07601f3a/AKDSFramework/structure/linkedlist.py#L31-L58
class Node: def __init__(self, value): self.value = value self.next = None class SinglyLinkedList: def __init__(self): self.head = Node(None) self.size = 0
MIT License
databand-ai/dbnd
modules/dbnd/src/dbnd/_vendor/dulwich/walk.py
Walker._reorder
python
def _reorder(self, results): if self.order == ORDER_TOPO: results = _topo_reorder(results, self.get_parents) if self.reverse: results = reversed(list(results)) return results
Possibly reorder a results iterator. :param results: An iterator of WalkEntry objects, in the order returned from the queue_cls. :return: An iterator or list of WalkEntry objects, in the order required by the Walker.
https://github.com/databand-ai/dbnd/blob/ec0076f9a142b20e2f7afd886ed1a18683c553ec/modules/dbnd/src/dbnd/_vendor/dulwich/walk.py#L373-L385
from collections import defaultdict import collections import heapq from itertools import chain from dbnd._vendor.dulwich.diff_tree import ( RENAME_CHANGE_TYPES, tree_changes, tree_changes_for_merge, RenameDetector, ) from dbnd._vendor.dulwich.errors import MissingCommitError from dbnd._vendor.dulwich.objects import Tag ORDER_DATE = "date" ORDER_TOPO = "topo" ALL_ORDERS = (ORDER_DATE, ORDER_TOPO) _MAX_EXTRA_COMMITS = 5 class WalkEntry(object): def __init__(self, walker, commit): self.commit = commit self._store = walker.store self._get_parents = walker.get_parents self._changes = {} self._rename_detector = walker.rename_detector def changes(self, path_prefix=None): cached = self._changes.get(path_prefix) if cached is None: commit = self.commit if not self._get_parents(commit): changes_func = tree_changes parent = None elif len(self._get_parents(commit)) == 1: changes_func = tree_changes parent = self._store[self._get_parents(commit)[0]].tree if path_prefix: mode, subtree_sha = parent.lookup_path( self._store.__getitem__, path_prefix, ) parent = self._store[subtree_sha] else: changes_func = tree_changes_for_merge parent = [self._store[p].tree for p in self._get_parents(commit)] if path_prefix: parent_trees = [self._store[p] for p in parent] parent = [] for p in parent_trees: try: mode, st = p.lookup_path( self._store.__getitem__, path_prefix, ) except KeyError: pass else: parent.append(st) commit_tree_sha = commit.tree if path_prefix: commit_tree = self._store[commit_tree_sha] mode, commit_tree_sha = commit_tree.lookup_path( self._store.__getitem__, path_prefix, ) cached = list( changes_func( self._store, parent, commit_tree_sha, rename_detector=self._rename_detector, ) ) self._changes[path_prefix] = cached return self._changes[path_prefix] def __repr__(self): return "<WalkEntry commit=%s, changes=%r>" % (self.commit.id, self.changes()) class _CommitTimeQueue(object): def __init__(self, walker): self._walker = walker self._store = walker.store self._get_parents = walker.get_parents self._excluded = walker.excluded self._pq = [] self._pq_set = set() self._seen = set() self._done = set() self._min_time = walker.since self._last = None self._extra_commits_left = _MAX_EXTRA_COMMITS self._is_finished = False for commit_id in chain(walker.include, walker.excluded): self._push(commit_id) def _push(self, object_id): try: obj = self._store[object_id] except KeyError: raise MissingCommitError(object_id) if isinstance(obj, Tag): self._push(obj.object[1]) return commit = obj if commit.id not in self._pq_set and commit.id not in self._done: heapq.heappush(self._pq, (-commit.commit_time, commit)) self._pq_set.add(commit.id) self._seen.add(commit.id) def _exclude_parents(self, commit): excluded = self._excluded seen = self._seen todo = [commit] while todo: commit = todo.pop() for parent in self._get_parents(commit): if parent not in excluded and parent in seen: todo.append(self._store[parent]) excluded.add(parent) def next(self): if self._is_finished: return None while self._pq: _, commit = heapq.heappop(self._pq) sha = commit.id self._pq_set.remove(sha) if sha in self._done: continue self._done.add(sha) for parent_id in self._get_parents(commit): self._push(parent_id) reset_extra_commits = True is_excluded = sha in self._excluded if is_excluded: self._exclude_parents(commit) if self._pq and all(c.id in self._excluded for _, c in self._pq): _, n = self._pq[0] if self._last and n.commit_time >= self._last.commit_time: reset_extra_commits = True else: reset_extra_commits = False if self._min_time is not None and commit.commit_time < self._min_time: reset_extra_commits = False if reset_extra_commits: self._extra_commits_left = _MAX_EXTRA_COMMITS else: self._extra_commits_left -= 1 if not self._extra_commits_left: break if not is_excluded: self._last = commit return WalkEntry(self._walker, commit) self._is_finished = True return None __next__ = next class Walker(object): def __init__( self, store, include, exclude=None, order=ORDER_DATE, reverse=False, max_entries=None, paths=None, rename_detector=None, follow=False, since=None, until=None, get_parents=lambda commit: commit.parents, queue_cls=_CommitTimeQueue, ): if order not in ALL_ORDERS: raise ValueError("Unknown walk order %s" % order) self.store = store if isinstance(include, bytes): include = [include] self.include = include self.excluded = set(exclude or []) self.order = order self.reverse = reverse self.max_entries = max_entries self.paths = paths and set(paths) or None if follow and not rename_detector: rename_detector = RenameDetector(store) self.rename_detector = rename_detector self.get_parents = get_parents self.follow = follow self.since = since self.until = until self._num_entries = 0 self._queue = queue_cls(self) self._out_queue = collections.deque() def _path_matches(self, changed_path): if changed_path is None: return False for followed_path in self.paths: if changed_path == followed_path: return True if ( changed_path.startswith(followed_path) and changed_path[len(followed_path)] == b"/"[0] ): return True return False def _change_matches(self, change): if not change: return False old_path = change.old.path new_path = change.new.path if self._path_matches(new_path): if self.follow and change.type in RENAME_CHANGE_TYPES: self.paths.add(old_path) self.paths.remove(new_path) return True elif self._path_matches(old_path): return True return False def _should_return(self, entry): commit = entry.commit if self.since is not None and commit.commit_time < self.since: return False if self.until is not None and commit.commit_time > self.until: return False if commit.id in self.excluded: return False if self.paths is None: return True if len(self.get_parents(commit)) > 1: for path_changes in entry.changes(): for change in path_changes: if self._change_matches(change): return True else: for change in entry.changes(): if self._change_matches(change): return True return None def _next(self): max_entries = self.max_entries while max_entries is None or self._num_entries < max_entries: entry = next(self._queue) if entry is not None: self._out_queue.append(entry) if entry is None or len(self._out_queue) > _MAX_EXTRA_COMMITS: if not self._out_queue: return None entry = self._out_queue.popleft() if self._should_return(entry): self._num_entries += 1 return entry return None
Apache License 2.0
negrinho/deep_architect
deep_architect/contrib/misc/calibration_utils.py
argsort
python
def argsort(xs, fns, increasing=True): def key_fn(x): return tuple([f(x) for f in fns]) idxs, _ = zip_toggle( sorted( enumerate(xs), key=lambda x: key_fn(x[1]), reverse=not increasing)) return idxs
The functions in fns are used to compute a key which are then used to construct a tuple which is then used to sort. The earlier keys are more important than the later ones.
https://github.com/negrinho/deep_architect/blob/3427c5d45b0cbdc9c2fe1f4e5213f6961ef41749/deep_architect/contrib/misc/calibration_utils.py#L18-L30
import deep_architect.visualization as vi import deep_architect.utils as ut import numpy as np import itertools def map_dict(d, fn): return {k: fn(k, v) for (k, v) in d.items()} def zip_toggle(xs): assert isinstance(xs, list) return list(zip(*xs))
MIT License
python-useful-helpers/threaded
threaded/_threadpooled.py
ThreadPoolExecutor.max_workers
python
def max_workers(self) -> int: return self._max_workers
MaxWorkers. :rtype: int
https://github.com/python-useful-helpers/threaded/blob/527d8b1b70a58e8c133b2f0c19878ec31ee28a9b/threaded/_threadpooled.py#L259-L264
__all__ = ("ThreadPooled", "threadpooled") import asyncio import concurrent.futures import functools import typing from . import _base_threaded class ThreadPooled(_base_threaded.APIPooled): __slots__ = ("__loop_getter", "__loop_getter_need_context") __executor: typing.Optional["ThreadPoolExecutor"] = None @classmethod def configure(cls: typing.Type["ThreadPooled"], max_workers: typing.Optional[int] = None) -> None: if isinstance(cls.__executor, ThreadPoolExecutor): if cls.__executor.max_workers == max_workers: return cls.__executor.shutdown() cls.__executor = ThreadPoolExecutor(max_workers=max_workers) @classmethod def shutdown(cls: typing.Type["ThreadPooled"]) -> None: if cls.__executor is not None: cls.__executor.shutdown() @property def executor(self) -> "ThreadPoolExecutor": if not isinstance(self.__executor, ThreadPoolExecutor) or self.__executor.is_shutdown: self.configure() return self.__executor def __init__( self, func: typing.Optional[typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]]] = None, *, loop_getter: typing.Optional[ typing.Union[typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop] ] = None, loop_getter_need_context: bool = False, ) -> None: super().__init__(func=func) self.__loop_getter: typing.Optional[ typing.Union[typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop] ] = loop_getter self.__loop_getter_need_context: bool = loop_getter_need_context @property def loop_getter( self, ) -> typing.Optional[typing.Union[typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop]]: return self.__loop_getter @property def loop_getter_need_context(self) -> bool: return self.__loop_getter_need_context def _get_loop(self, *args: typing.Any, **kwargs: typing.Any) -> typing.Optional[asyncio.AbstractEventLoop]: if callable(self.loop_getter): if self.loop_getter_need_context: return self.loop_getter(*args, **kwargs) return self.loop_getter() return self.loop_getter def _get_function_wrapper( self, func: typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]] ) -> typing.Callable[..., "typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]"]: prepared = self._await_if_required(func) @functools.wraps(prepared) def wrapper( *args: typing.Any, **kwargs: typing.Any ) -> typing.Union["concurrent.futures.Future[typing.Any]", "typing.Awaitable[typing.Any]"]: loop: typing.Optional[asyncio.AbstractEventLoop] = self._get_loop(*args, **kwargs) if loop is None: return self.executor.submit(prepared, *args, **kwargs) return loop.run_in_executor(self.executor, functools.partial(prepared, *args, **kwargs)) return wrapper def __call__( self, *args: typing.Union[typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]], typing.Any], **kwargs: typing.Any, ) -> typing.Union[ "concurrent.futures.Future[typing.Any]", "typing.Awaitable[typing.Any]", typing.Callable[..., "typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]"], ]: return super().__call__(*args, **kwargs) def __repr__(self) -> str: return ( f"<{self.__class__.__name__}(" f"{self._func!r}, " f"loop_getter={self.loop_getter!r}, " f"loop_getter_need_context={self.loop_getter_need_context!r}, " f") at 0x{id(self):X}>" ) @typing.overload def threadpooled( func: typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]], *, loop_getter: None = None, loop_getter_need_context: bool = False, ) -> typing.Callable[..., "concurrent.futures.Future[typing.Any]"]: @typing.overload def threadpooled( func: typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]], *, loop_getter: typing.Union[typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop], loop_getter_need_context: bool = False, ) -> typing.Callable[..., "asyncio.Task[typing.Any]"]: @typing.overload def threadpooled( func: None = None, *, loop_getter: typing.Union[None, typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop] = None, loop_getter_need_context: bool = False, ) -> ThreadPooled: def threadpooled( func: typing.Optional[typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]]] = None, *, loop_getter: typing.Union[None, typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop] = None, loop_getter_need_context: bool = False, ) -> typing.Union[ ThreadPooled, typing.Callable[..., "typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]"], ]: if func is None: return ThreadPooled(func=func, loop_getter=loop_getter, loop_getter_need_context=loop_getter_need_context) return ThreadPooled( func=None, loop_getter=loop_getter, loop_getter_need_context=loop_getter_need_context )(func) class ThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor): __slots__ = () @property
Apache License 2.0
redisbloom/redisbloom-py
redisbloom/client.py
Client.topkListWithCount
python
def topkListWithCount(self, key): return self.execute_command(self.TOPK_LIST, key, 'WITHCOUNT')
Return full list of items with probabilistic count in Top-K list of ``key```.
https://github.com/redisbloom/redisbloom-py/blob/6756217312c278d0a7b12510663836a0473e5c80/redisbloom/client.py#L602-L607
import six from redis.client import Redis, Pipeline from redis._compat import nativestr def bool_ok(response): return nativestr(response) == 'OK' class BFInfo(object): capacity = None size = None filterNum = None insertedNum = None expansionRate = None def __init__(self, args): response = dict(zip(map(nativestr, args[::2]), args[1::2])) self.capacity = response['Capacity'] self.size = response['Size'] self.filterNum = response['Number of filters'] self.insertedNum = response['Number of items inserted'] self.expansionRate = response['Expansion rate'] class CFInfo(object): size = None bucketNum = None filterNum = None insertedNum = None deletedNum = None bucketSize = None expansionRate = None maxIteration = None def __init__(self, args): response = dict(zip(map(nativestr, args[::2]), args[1::2])) self.size = response['Size'] self.bucketNum = response['Number of buckets'] self.filterNum = response['Number of filters'] self.insertedNum = response['Number of items inserted'] self.deletedNum = response['Number of items deleted'] self.bucketSize = response['Bucket size'] self.expansionRate = response['Expansion rate'] self.maxIteration = response['Max iterations'] class CMSInfo(object): width = None depth = None count = None def __init__(self, args): response = dict(zip(map(nativestr, args[::2]), args[1::2])) self.width = response['width'] self.depth = response['depth'] self.count = response['count'] class TopKInfo(object): k = None width = None depth = None decay = None def __init__(self, args): response = dict(zip(map(nativestr, args[::2]), args[1::2])) self.k = response['k'] self.width = response['width'] self.depth = response['depth'] self.decay = response['decay'] class TDigestInfo(object): compression = None capacity = None mergedNodes = None unmergedNodes = None mergedWeight = None unmergedWeight = None totalCompressions = None def __init__(self, args): response = dict(zip(map(nativestr, args[::2]), args[1::2])) self.compression = response['Compression'] self.capacity = response['Capacity'] self.mergedNodes = response['Merged nodes'] self.unmergedNodes = response['Unmerged nodes'] self.mergedWeight = response['Merged weight'] self.unmergedWeight = response['Unmerged weight'] self.totalCompressions = response['Total compressions'] def spaceHolder(response): return response def parseToList(response): res = [] for item in response: if isinstance(item, int): res.append(item) elif item is not None: res.append(nativestr(item)) else: res.append(None) return res class Client(Redis): BF_RESERVE = 'BF.RESERVE' BF_ADD = 'BF.ADD' BF_MADD = 'BF.MADD' BF_INSERT = 'BF.INSERT' BF_EXISTS = 'BF.EXISTS' BF_MEXISTS = 'BF.MEXISTS' BF_SCANDUMP = 'BF.SCANDUMP' BF_LOADCHUNK = 'BF.LOADCHUNK' BF_INFO = 'BF.INFO' CF_RESERVE = 'CF.RESERVE' CF_ADD = 'CF.ADD' CF_ADDNX = 'CF.ADDNX' CF_INSERT = 'CF.INSERT' CF_INSERTNX = 'CF.INSERTNX' CF_EXISTS = 'CF.EXISTS' CF_DEL = 'CF.DEL' CF_COUNT = 'CF.COUNT' CF_SCANDUMP = 'CF.SCANDUMP' CF_LOADCHUNK = 'CF.LOADCHUNK' CF_INFO = 'CF.INFO' CMS_INITBYDIM = 'CMS.INITBYDIM' CMS_INITBYPROB = 'CMS.INITBYPROB' CMS_INCRBY = 'CMS.INCRBY' CMS_QUERY = 'CMS.QUERY' CMS_MERGE = 'CMS.MERGE' CMS_INFO = 'CMS.INFO' TOPK_RESERVE = 'TOPK.RESERVE' TOPK_ADD = 'TOPK.ADD' TOPK_QUERY = 'TOPK.QUERY' TOPK_COUNT = 'TOPK.COUNT' TOPK_LIST = 'TOPK.LIST' TOPK_INFO = 'TOPK.INFO' TDIGEST_CREATE = 'TDIGEST.CREATE' TDIGEST_RESET = 'TDIGEST.RESET' TDIGEST_ADD = 'TDIGEST.ADD' TDIGEST_MERGE = 'TDIGEST.MERGE' TDIGEST_CDF = 'TDIGEST.CDF' TDIGEST_QUANTILE = 'TDIGEST.QUANTILE' TDIGEST_MIN = 'TDIGEST.MIN' TDIGEST_MAX = 'TDIGEST.MAX' TDIGEST_INFO = 'TDIGEST.INFO' def __init__(self, *args, **kwargs): Redis.__init__(self, *args, **kwargs) MODULE_CALLBACKS = { self.BF_RESERVE: bool_ok, self.BF_INFO: BFInfo, self.CF_RESERVE: bool_ok, self.CF_INFO: CFInfo, self.CMS_INITBYDIM: bool_ok, self.CMS_INITBYPROB: bool_ok, self.CMS_MERGE: bool_ok, self.CMS_INFO: CMSInfo, self.TOPK_RESERVE: bool_ok, self.TOPK_ADD: parseToList, self.TOPK_LIST: parseToList, self.TOPK_INFO: TopKInfo, self.TDIGEST_CREATE: bool_ok, self.TDIGEST_INFO: TDigestInfo, } for k, v in six.iteritems(MODULE_CALLBACKS): self.set_response_callback(k, v) @staticmethod def appendItems(params, items): params.extend(['ITEMS']) params += items @staticmethod def appendError(params, error): if error is not None: params.extend(['ERROR', error]) @staticmethod def appendCapacity(params, capacity): if capacity is not None: params.extend(['CAPACITY', capacity]) @staticmethod def appendExpansion(params, expansion): if expansion is not None: params.extend(['EXPANSION', expansion]) @staticmethod def appendNoScale(params, noScale): if noScale is not None: params.extend(['NONSCALING']) @staticmethod def appendWeights(params, weights): if len(weights) > 0: params.append('WEIGHTS') params += weights @staticmethod def appendNoCreate(params, noCreate): if noCreate is not None: params.extend(['NOCREATE']) @staticmethod def appendItemsAndIncrements(params, items, increments): for i in range(len(items)): params.append(items[i]) params.append(increments[i]) @staticmethod def appendValuesAndWeights(params, items, weights): for i in range(len(items)): params.append(items[i]) params.append(weights[i]) @staticmethod def appendMaxIterations(params, max_iterations): if max_iterations is not None: params.extend(['MAXITERATIONS', max_iterations]) @staticmethod def appendBucketSize(params, bucket_size): if bucket_size is not None: params.extend(['BUCKETSIZE', bucket_size]) def bfCreate(self, key, errorRate, capacity, expansion=None, noScale=None): params = [key, errorRate, capacity] self.appendExpansion(params, expansion) self.appendNoScale(params, noScale) return self.execute_command(self.BF_RESERVE, *params) def bfAdd(self, key, item): params = [key, item] return self.execute_command(self.BF_ADD, *params) def bfMAdd(self, key, *items): params = [key] params += items return self.execute_command(self.BF_MADD, *params) def bfInsert(self, key, items, capacity=None, error=None, noCreate=None, expansion=None, noScale=None): params = [key] self.appendCapacity(params, capacity) self.appendError(params, error) self.appendExpansion(params, expansion) self.appendNoCreate(params, noCreate) self.appendNoScale(params, noScale) self.appendItems(params, items) return self.execute_command(self.BF_INSERT, *params) def bfExists(self, key, item): params = [key, item] return self.execute_command(self.BF_EXISTS, *params) def bfMExists(self, key, *items): params = [key] params += items return self.execute_command(self.BF_MEXISTS, *params) def bfScandump(self, key, iter): params = [key, iter] return self.execute_command(self.BF_SCANDUMP, *params) def bfLoadChunk(self, key, iter, data): params = [key, iter, data] return self.execute_command(self.BF_LOADCHUNK, *params) def bfInfo(self, key): return self.execute_command(self.BF_INFO, key) def cfCreate(self, key, capacity, expansion=None, bucket_size=None, max_iterations=None): params = [key, capacity] self.appendExpansion(params, expansion) self.appendBucketSize(params, bucket_size) self.appendMaxIterations(params, max_iterations) return self.execute_command(self.CF_RESERVE, *params) def cfAdd(self, key, item): params = [key, item] return self.execute_command(self.CF_ADD, *params) def cfAddNX(self, key, item): params = [key, item] return self.execute_command(self.CF_ADDNX, *params) def cfInsert(self, key, items, capacity=None, nocreate=None): params = [key] self.appendCapacity(params, capacity) self.appendNoCreate(params, nocreate) self.appendItems(params, items) return self.execute_command(self.CF_INSERT, *params) def cfInsertNX(self, key, items, capacity=None, nocreate=None): params = [key] self.appendCapacity(params, capacity) self.appendNoCreate(params, nocreate) self.appendItems(params, items) return self.execute_command(self.CF_INSERTNX, *params) def cfExists(self, key, item): params = [key, item] return self.execute_command(self.CF_EXISTS, *params) def cfDel(self, key, item): params = [key, item] return self.execute_command(self.CF_DEL, *params) def cfCount(self, key, item): params = [key, item] return self.execute_command(self.CF_COUNT, *params) def cfScandump(self, key, iter): params = [key, iter] return self.execute_command(self.CF_SCANDUMP, *params) def cfLoadChunk(self, key, iter, data): params = [key, iter, data] return self.execute_command(self.CF_LOADCHUNK, *params) def cfInfo(self, key): return self.execute_command(self.CF_INFO, key) def cmsInitByDim(self, key, width, depth): params = [key, width, depth] return self.execute_command(self.CMS_INITBYDIM, *params) def cmsInitByProb(self, key, error, probability): params = [key, error, probability] return self.execute_command(self.CMS_INITBYPROB, *params) def cmsIncrBy(self, key, items, increments): params = [key] self.appendItemsAndIncrements(params, items, increments) return self.execute_command(self.CMS_INCRBY, *params) def cmsQuery(self, key, *items): params = [key] params += items return self.execute_command(self.CMS_QUERY, *params) def cmsMerge(self, destKey, numKeys, srcKeys, weights=[]): params = [destKey, numKeys] params += srcKeys self.appendWeights(params, weights) return self.execute_command(self.CMS_MERGE, *params) def cmsInfo(self, key): return self.execute_command(self.CMS_INFO, key) def topkReserve(self, key, k, width, depth, decay): params = [key, k, width, depth, decay] return self.execute_command(self.TOPK_RESERVE, *params) def topkAdd(self, key, *items): params = [key] params += items return self.execute_command(self.TOPK_ADD, *params) def topkQuery(self, key, *items): params = [key] params += items return self.execute_command(self.TOPK_QUERY, *params) def topkCount(self, key, *items): params = [key] params += items return self.execute_command(self.TOPK_COUNT, *params) def topkList(self, key): return self.execute_command(self.TOPK_LIST, key)
BSD 3-Clause New or Revised License
gwww/elkm1
elkm1_lib/message.py
MessageDecode._zc_decode
python
def _zc_decode(self, msg): status = _status_decode(int(msg[7:8], 16)) return {"zone_number": int(msg[4:7]) - 1, "zone_status": status}
ZC: Zone Change.
https://github.com/gwww/elkm1/blob/e84865b6b3a6d4ba1d062eefcada44c123180b9a/elkm1_lib/message.py#L287-L290
import datetime as dt import re import time from collections import namedtuple from .const import Max MessageEncode = namedtuple("MessageEncode", ["message", "response_command"]) class MessageDecode: def __init__(self): self._handlers = {} def add_handler(self, message_type, handler): if message_type not in self._handlers: self._handlers[message_type] = [] if handler not in self._handlers[message_type]: self._handlers[message_type].append(handler) def remove_handler(self, message_type, handler): if message_type not in self._handlers: return if handler in self._handlers[message_type]: self._handlers[message_type].remove(handler) def call_handlers(self, cmd, decoded_msg): for handler in self._handlers.get(cmd, []): handler(**decoded_msg) def decode(self, msg): valid, error_msg = _is_valid_length_and_checksum(msg) if valid: cmd = msg[2:4] decoder = getattr(self, f"_{cmd.lower()}_decode", None) if not decoder: cmd = "unknown" decoder = self._unknown_decode try: self.call_handlers(cmd, decoder(msg)) except (IndexError, ValueError) as exc: raise ValueError("Cannot decode message") from exc return if not msg or msg.startswith("Username: ") or msg.startswith("Password: "): return if "Login successful" in msg: self.call_handlers("login", {"succeeded": True}) elif msg.startswith("Username/Password not found") or msg == "Disabled": self.call_handlers("login", {"succeeded": False}) else: raise ValueError(error_msg) def _am_decode(self, msg): return {"alarm_memory": msg[4 : 4 + Max.AREAS.value]} def _as_decode(self, msg): return { "armed_statuses": msg[4:12], "arm_up_states": msg[12:20], "alarm_states": msg[20:28], } def _az_decode(self, msg): return {"alarm_status": msg[4 : 4 + Max.ZONES.value]} def _cr_one_custom_value_decode(self, index, part): value = int(part[0:5]) value_format = int(part[5]) if value_format == 2: value = ((value >> 8) & 0xFF, value & 0xFF) return {"index": index, "value": value, "value_format": value_format} def _cr_decode(self, msg): if int(msg[4:6]) > 0: index = int(msg[4:6]) - 1 return {"values": [self._cr_one_custom_value_decode(index, msg[6:12])]} part = 6 ret = [] for i in range(Max.SETTINGS.value): ret.append(self._cr_one_custom_value_decode(i, msg[part : part + 6])) part += 6 return {"values": ret} def _cc_decode(self, msg): return {"output": int(msg[4:7]) - 1, "output_status": msg[7] == "1"} def _cs_decode(self, msg): output_status = [x == "1" for x in msg[4 : 4 + Max.OUTPUTS.value]] return {"output_status": output_status} def _cv_decode(self, msg): return {"counter": int(msg[4:6]) - 1, "value": int(msg[6:11])} def _ee_decode(self, msg): return { "area": int(msg[4:5]) - 1, "is_exit": msg[5:6] == "0", "timer1": int(msg[6:9]), "timer2": int(msg[9:12]), "armed_status": msg[12:13], } def _ic_decode(self, msg): code = msg[4:16] if re.match(r"(0\d){6}", code): code = re.sub(r"0(\d)", r"\1", code) return { "code": code, "user": int(msg[16:19]) - 1, "keypad": int(msg[19:21]) - 1, } def _ie_decode(self, _msg): return {} def _ka_decode(self, msg): return {"keypad_areas": [ord(x) - 0x31 for x in msg[4 : 4 + Max.KEYPADS.value]]} def _kc_decode(self, msg): return {"keypad": int(msg[4:6]) - 1, "key": int(msg[6:8])} def _ld_decode(self, msg): area = int(msg[11]) - 1 hour = int(msg[12:14]) minute = int(msg[14:16]) month = int(msg[16:18]) day = int(msg[18:20]) year = int(msg[24:26]) + 2000 log_local_datetime = dt.datetime(year, month, day, hour, minute) log_local_time = time.mktime(log_local_datetime.timetuple()) log_gm_timestruct = time.gmtime(log_local_time) log = {} log["event"] = int(msg[4:8]) log["number"] = int(msg[8:11]) log["index"] = int(msg[20:23]) log["timestamp"] = dt.datetime( *log_gm_timestruct[:6], tzinfo=dt.timezone.utc ).isoformat() return {"area": area, "log": log} def _lw_decode(self, msg): keypad_temps = [] zone_temps = [] for i in range(16): keypad_temps.append(int(msg[4 + 3 * i : 7 + 3 * i]) - 40) zone_temps.append(int(msg[52 + 3 * i : 55 + 3 * i]) - 60) return {"keypad_temps": keypad_temps, "zone_temps": zone_temps} def _pc_decode(self, msg): housecode = msg[4:7] return { "housecode": housecode, "index": housecode_to_index(housecode), "light_level": int(msg[7:9]), } def _ps_decode(self, msg): return { "bank": ord(msg[4]) - 0x30, "statuses": [ord(x) - 0x30 for x in msg[5:69]], } def _rp_decode(self, msg): return {"remote_programming_status": int(msg[4:6])} def _rr_decode(self, msg): return {"real_time_clock": msg[4:20]} def _sd_decode(self, msg): desc_ch1 = msg[9] show_on_keypad = ord(desc_ch1) >= 0x80 if show_on_keypad: desc_ch1 = chr(ord(desc_ch1) & 0x7F) return { "desc_type": int(msg[4:6]), "unit": int(msg[6:9]) - 1, "desc": (desc_ch1 + msg[10:25]).rstrip(), "show_on_keypad": show_on_keypad, } def _ss_decode(self, msg): return {"system_trouble_status": msg[4:-2]} def _st_decode(self, msg): group = int(msg[4:5]) temperature = int(msg[7:10]) if group == 0: temperature -= 60 elif group == 1: temperature -= 40 return {"group": group, "device": int(msg[5:7]) - 1, "temperature": temperature} def _tc_decode(self, msg): return {"task": int(msg[4:7]) - 1} def _tr_decode(self, msg): return { "thermostat_index": int(msg[4:6]) - 1, "mode": int(msg[6]), "hold": msg[7] == "1", "fan": int(msg[8]), "current_temp": int(msg[9:11]), "heat_setpoint": int(msg[11:13]), "cool_setpoint": int(msg[13:15]), "humidity": int(msg[15:17]), } def _ua_decode(self, msg): return {} def _vn_decode(self, msg): elkm1_version = f"{int(msg[4:6], 16)}.{int(msg[6:8], 16)}.{int(msg[8:10], 16)}" xep_version = ( f"{int(msg[10:12], 16)}.{int(msg[12:14], 16)}.{int(msg[14:16], 16)}" ) return {"elkm1_version": elkm1_version, "xep_version": xep_version} def _xk_decode(self, msg): return {"real_time_clock": msg[4:20]} def _zb_decode(self, msg): return {"zone_number": int(msg[4:7]) - 1, "zone_bypassed": msg[7] == "1"}
MIT License
googlecloudplatform/deploymentmanager-samples
examples/v2/igm-updater/python/service.py
GenerateConfig
python
def GenerateConfig(context): name = context.env['name'] igm_name = name + '-igm' curr_it_name = name + '-it-' + context.properties['currVersion']['name'] machine_type = context.properties['machineType'] zone = context.properties['zone'] config = {'resources': []} current_it = { 'name': curr_it_name, 'type': 'instance-template.py', 'properties': { 'machineType': machine_type, 'zone': zone, 'itName': curr_it_name, 'image': context.properties['currVersion']['image'] } } config['resources'].append(current_it) igm = { 'name': igm_name, 'type': 'compute.beta.instanceGroupManager', 'properties': { 'baseInstanceName': igm_name + '-instance', 'instanceTemplate': '$(ref.' + curr_it_name + '.selfLink)', 'zone': zone, 'targetSize': 1, 'targetPools': [ '$(ref.' + context.properties['targetPool'] + '.selfLink)', ], 'updatePolicy': { 'minimalAction': 'REPLACE', 'type': 'PROACTIVE', } } } config['resources'].append(igm) autoscaler = { 'name': name + '-as', 'type': 'compute.v1.autoscaler', 'properties': { 'autoscalingPolicy': { 'minNumReplicas': context.properties['minSize'], 'maxNumReplicas': context.properties['maxSize'] }, 'target': '$(ref.' + igm_name + '.selfLink)', 'zone': zone } } config['resources'].append(autoscaler) return config
Generate YAML resource configuration.
https://github.com/googlecloudplatform/deploymentmanager-samples/blob/ecce5ed123626d3b861f38cc7290cfb3d0373688/examples/v2/igm-updater/python/service.py#L21-L80
Apache License 2.0
microsoft/electionguard-web-api
app/api/v1/mediator/key_admin.py
fetch_joint_key
python
def fetch_joint_key( request: Request, key_name: str, ) -> ElectionJointKeyResponse: ceremony = get_key_ceremony(key_name, request.app.state.settings) if not ceremony.elgamal_public_key: raise HTTPException( status_code=status.HTTP_412_PRECONDITION_FAILED, detail=f"No joint key for {key_name}", ) if not ceremony.commitment_hash: raise HTTPException( status_code=status.HTTP_412_PRECONDITION_FAILED, detail=f"No commitment hash for {key_name}", ) return ElectionJointKeyResponse( elgamal_public_key=write_json_object(ceremony.elgamal_public_key), commitment_hash=write_json_object(ceremony.commitment_hash), )
Get The Joint Election Key
https://github.com/microsoft/electionguard-web-api/blob/23a5f57ce8c0f6e63c1c762d58fd98a8b99c2f74/app/api/v1/mediator/key_admin.py#L232-L254
from typing import List import sys from fastapi import APIRouter, Body, HTTPException, Request, status from electionguard.hash import hash_elems from electionguard.key_ceremony import ( PublicKeySet, ElectionPublicKey, ElectionPartialKeyVerification, ElectionPartialKeyChallenge, verify_election_partial_key_challenge, ) from electionguard.elgamal import elgamal_combine_public_keys from electionguard.serializable import write_json_object, read_json_object from electionguard.group import ElementModP from ....core.client import get_client_id from ....core.key_guardian import get_key_guardian from ....core.key_ceremony import ( from_query, get_key_ceremony, update_key_ceremony, update_key_ceremony_state, validate_can_publish, ) from ....core.repository import get_repository, DataCollection from ..models import ( BaseQueryRequest, BaseResponse, KeyCeremony, KeyCeremonyState, KeyCeremonyGuardian, KeyCeremonyGuardianStatus, KeyCeremonyGuardianState, KeyCeremonyCreateRequest, KeyCeremonyStateResponse, KeyCeremonyQueryResponse, KeyCeremonyVerifyChallengesResponse, PublishElectionJointKeyRequest, ElectionJointKeyResponse, ) from ..tags import KEY_CEREMONY_ADMIN router = APIRouter() @router.get( "/ceremony", response_model=KeyCeremonyQueryResponse, tags=[KEY_CEREMONY_ADMIN] ) def fetch_ceremony( request: Request, key_name: str, ) -> KeyCeremonyQueryResponse: key_ceremony = get_key_ceremony(key_name, request.app.state.settings) return KeyCeremonyQueryResponse(key_ceremonies=[key_ceremony]) @router.put("/ceremony", response_model=BaseResponse, tags=[KEY_CEREMONY_ADMIN]) def create_ceremony( request: Request, data: KeyCeremonyCreateRequest = Body(...), ) -> BaseResponse: ceremony = KeyCeremony( key_name=data.key_name, state=KeyCeremonyState.CREATED, number_of_guardians=data.number_of_guardians, quorum=data.quorum, guardian_ids=data.guardian_ids, guardian_status={ guardian_id: KeyCeremonyGuardianState() for guardian_id in data.guardian_ids }, ) try: with get_repository( get_client_id(), DataCollection.KEY_CEREMONY, request.app.state.settings ) as repository: query_result = repository.get({"key_name": data.key_name}) if not query_result: repository.set(ceremony.dict()) return BaseResponse() raise HTTPException( status_code=status.HTTP_409_CONFLICT, detail=f"Already exists {data.key_name}", ) except Exception as error: print(sys.exc_info()) raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Create Key Ceremony Failed", ) from error @router.get( "/ceremony/state", response_model=KeyCeremonyStateResponse, tags=[KEY_CEREMONY_ADMIN], ) def fetch_ceremony_state( request: Request, key_name: str, ) -> KeyCeremonyStateResponse: ceremony = get_key_ceremony(key_name, request.app.state.settings) return KeyCeremonyStateResponse( key_name=key_name, state=ceremony.state, guardian_status=ceremony.guardian_status, ) @router.post( "/ceremony/find", response_model=KeyCeremonyQueryResponse, tags=[KEY_CEREMONY_ADMIN] ) def find_ceremonies( request: Request, skip: int = 0, limit: int = 100, data: BaseQueryRequest = Body(...), ) -> KeyCeremonyQueryResponse: try: filter = write_json_object(data.filter) if data.filter else {} with get_repository( get_client_id(), DataCollection.KEY_CEREMONY, request.app.state.settings ) as repository: cursor = repository.find(filter, skip, limit) key_ceremonies: List[KeyCeremony] = [] for item in cursor: key_ceremonies.append(from_query(item)) return KeyCeremonyQueryResponse(key_ceremonies=key_ceremonies) except Exception as error: print(sys.exc_info()) raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="find guardians failed", ) from error @router.post("/ceremony/open", response_model=BaseResponse, tags=[KEY_CEREMONY_ADMIN]) def open_ceremony(request: Request, key_name: str) -> BaseResponse: return update_key_ceremony_state( key_name, KeyCeremonyState.OPEN, request.app.state.settings ) @router.post("/ceremony/close", response_model=BaseResponse, tags=[KEY_CEREMONY_ADMIN]) def close_ceremony(request: Request, key_name: str) -> BaseResponse: return update_key_ceremony_state( key_name, KeyCeremonyState.CLOSED, request.app.state.settings ) @router.post( "/ceremony/challenge", response_model=BaseResponse, tags=[KEY_CEREMONY_ADMIN] ) def challenge_ceremony(request: Request, key_name: str) -> BaseResponse: return update_key_ceremony_state( key_name, KeyCeremonyState.CHALLENGED, request.app.state.settings ) @router.get( "/ceremony/challenge/verify", response_model=BaseResponse, tags=[KEY_CEREMONY_ADMIN] ) def verify_ceremony_challenges(request: Request, key_name: str) -> BaseResponse: ceremony = get_key_ceremony(key_name, request.app.state.settings) challenge_guardians: List[KeyCeremonyGuardian] = [] for guardian_id, state in ceremony.guardian_status.items(): if state.backups_verified == KeyCeremonyGuardianStatus.ERROR: challenge_guardians.append(get_key_guardian(key_name, guardian_id)) if not any(challenge_guardians): return BaseResponse(message="no challenges exist") verifications: List[ElectionPartialKeyVerification] = [] for guardian in challenge_guardians: if not guardian.challenges: continue for challenge in guardian.challenges: verifications.append( verify_election_partial_key_challenge( "API", read_json_object(challenge, ElectionPartialKeyChallenge), ) ) return KeyCeremonyVerifyChallengesResponse(verifications=verifications) @router.post("/ceremony/cancel", response_model=BaseResponse, tags=[KEY_CEREMONY_ADMIN]) def cancel_ceremony(request: Request, key_name: str) -> BaseResponse: return update_key_ceremony_state( key_name, KeyCeremonyState.CANCELLED, request.app.state.settings ) @router.get( "/ceremony/joint_key", response_model=ElectionJointKeyResponse, tags=[KEY_CEREMONY_ADMIN], )
MIT License
airsplay/vokenization
xmatching/metric.py
batchwise_accuracy
python
def batchwise_accuracy(lang_output, visn_output, lang_mask): batch_size, lang_len, dim = lang_output.shape assert batch_size % 2 == 0 and batch_size == visn_output.shape[0] visn_output = visn_output.unsqueeze(1) negative_scores = (lang_output.reshape(batch_size, 1, lang_len, dim) * visn_output.reshape(1, batch_size, 1, dim)).sum(-1) max_neg_score, max_neg_idx = negative_scores.max(1) pos_idx = torch.arange(0, batch_size, dtype=torch.int64).to(lang_output.device) correct = (pos_idx.unsqueeze(1) == max_neg_idx) bool_lang_mask = lang_mask.type(correct.dtype) correct = correct * bool_lang_mask correct_num = correct.sum() accuracy = correct_num * 1. / bool_lang_mask.sum() return accuracy
Calculate the accuracy of contextual word retrieval, average by batch. :param lang_output: [batch_size, max_len, hid_dim] :param visn_output: [batch_size, hid_dim] :param lang_mask: Int Tensor [batch_size, max_len], 1 for tokens, 0 for paddings. :return:
https://github.com/airsplay/vokenization/blob/5601b799184ed54414872565f233e22c76f5f6f0/xmatching/metric.py#L4-L34
import torch
MIT License
rgreinho/python-cookiecutter
{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/core/version.py
detect_from_metadata
python
def detect_from_metadata(package): try: try: version_info = version.VersionInfo(package) package_version = version_info.release_string() except (ModuleNotFoundError, pkg_resources.DistributionNotFound): distribution_info = pkg_resources.get_distribution(package) package_version = distribution_info.version except Exception: package_version = 0 return package_version
Detect a package version number from the metadata. If the version number cannot be detected, the function returns 0. :param str package: package name :returns str: the package version number.
https://github.com/rgreinho/python-cookiecutter/blob/b85dda782ed2b47b9e4a090790c70698750912a2/{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/core/version.py#L6-L25
from pbr import version import pkg_resources
MIT License
amzn/mxfusion
mxfusion/inference/inference.py
Inference.save
python
def save(self, zip_filename=DEFAULT_ZIP): mxnet_parameters, mxnet_constants, variable_constants = self.params.get_serializable() configuration = self.get_serializable() graphs = [g.as_json()for g in self._graphs] version_dict = {"serialization_version": SERIALIZATION_VERSION} files_to_save = [] objects = [graphs, mxnet_parameters, mxnet_constants, variable_constants, configuration, version_dict] ordered_filenames = [FILENAMES['graphs'], FILENAMES['mxnet_params'], FILENAMES['mxnet_constants'], FILENAMES['variable_constants'], FILENAMES['configuration'], FILENAMES['version_file']] encodings = [ENCODINGS['json'], ENCODINGS['numpy'], ENCODINGS['numpy'], ENCODINGS['json'], ENCODINGS['json'], ENCODINGS['json']] for filename, obj, encoding in zip(ordered_filenames, objects, encodings): if encoding == ENCODINGS['json']: buffer = io.StringIO() json.dump(obj, buffer, ensure_ascii=False, cls=ModelComponentEncoder) elif encoding == ENCODINGS['numpy']: buffer = io.BytesIO() np_obj = make_numpy(obj) np.savez(buffer, **np_obj) files_to_save.append((filename, buffer)) zip_buffer = io.BytesIO() with zipfile.ZipFile(zip_buffer, "a", zipfile.ZIP_DEFLATED, False) as zip_file: for base_name, data in files_to_save: zip_file.writestr(base_name, data.getvalue()) with open(zip_filename, 'wb') as f: f.write(zip_buffer.getvalue())
Saves down everything needed to reload an inference algorithm. This method writes everything into a single zip archive, with 6 internal files. 1. version.json - This has the version of serialization used to create the zip file. 2. graphs.json - This is a networkx representation of all FactorGraphs used during Inference. See mxfusion.models.FactorGraph.save for more information. 3. mxnet_parameters.npz - This is a numpy zip file saved using numpy.savez(), containing one file for each mxnet parameter in the InferenceParameters object. Each parameter is saved in a binary file named by the parameter's UUID. 4. mxnet_constants.npz - The same as mxnet_parameters, except only for constant mxnet parameters. 5. variable_constants.json - Parameters file of primitive data type constants, such as ints or floats. I.E. { UUID : int/float} 6. configuration.json - This has other configuration related to inference such as the observation pattern. :param zip_filename: Path to and name of the zip archive to save the inference method as. :type zip_filename: str
https://github.com/amzn/mxfusion/blob/af6223e9636b055d029d136dd7ae023b210b4560/mxfusion/inference/inference.py#L255-L310
import warnings import io import json import numpy as np import mxnet as mx import zipfile from .inference_parameters import InferenceParameters from ..common.config import get_default_device, get_default_dtype from ..common.exceptions import InferenceError, SerializationError from ..util.inference import discover_shape_constants, init_outcomes from ..models import FactorGraph, Model, Posterior from ..util.serialization import ModelComponentEncoder, make_numpy, load_json_from_zip, load_parameters, FILENAMES, DEFAULT_ZIP, ENCODINGS, SERIALIZATION_VERSION class Inference(object): def __init__(self, inference_algorithm, constants=None, hybridize=False, dtype=None, context=None): self.dtype = dtype if dtype is not None else get_default_dtype() self.mxnet_context = context if context is not None else get_default_device() self._hybridize = hybridize self._graphs = inference_algorithm.graphs self._inference_algorithm = inference_algorithm self.params = InferenceParameters(constants=constants, dtype=self.dtype, context=self.mxnet_context) self._initialized = False def print_params(self): def get_class_name(graph): if isinstance(graph, Model): return "Model" elif isinstance(graph, Posterior): return "Posterior" else: return "FactorGraph" string = "" for param_uuid, param in self.params.param_dict.items(): temp = [(graph,graph[param_uuid]) for i,graph in enumerate(self._graphs) if param_uuid in graph] graph, var_param = temp[0] string += "{} in {}({}) : {} \n\n".format(var_param, get_class_name(graph), graph._uuid[:5], param.data()) return string @property def observed_variables(self): return self._inference_algorithm.observed_variables @property def observed_variable_UUIDs(self): return self._inference_algorithm.observed_variable_UUIDs @property def observed_variable_names(self): return self._inference_algorithm.observed_variable_names @property def graphs(self): return self._graphs @property def inference_algorithm(self): return self._inference_algorithm def create_executor(self): infr = self._inference_algorithm.create_executor(data_def=self.observed_variable_UUIDs, params=self.params, var_ties=self.params.var_ties) if self._hybridize: infr.hybridize() infr.initialize(ctx=self.mxnet_context) return infr def _initialize_params(self): self.params.initialize_params(self._graphs, self.observed_variable_UUIDs) def initialize(self, **kw): if not self._initialized: data = [kw[v] for v in self.observed_variable_names] if len(data) > 0: if not all(isinstance(d, type(d)) for d in data): raise InferenceError("All items in the keywords must be of the same type. " "Either all shapes or all data objects.") if isinstance(data[0], (tuple, list)): data_shapes = {i: d for i, d in zip(self.observed_variable_UUIDs, data)} elif isinstance(data[0], mx.nd.ndarray.NDArray): data_shapes = {i: d.shape for i, d in zip(self.observed_variable_UUIDs, data)} else: raise InferenceError("Keywords not of type mx.nd.NDArray or tuple/list " "for shapes passed into initialization.") shape_constants = discover_shape_constants(data_shapes, self._graphs) self.params.update_constants(shape_constants) self._initialize_params() self._initialized = True else: warnings.warn("Trying to initialize the inference twice, skipping.") def run(self, **kwargs): data = [kwargs[v] for v in self.observed_variable_names] self.initialize(**kwargs) executor = self.create_executor() return executor(mx.nd.zeros(1, ctx=self.mxnet_context), *data) def set_initializer(self): pass def load(self, zip_filename=DEFAULT_ZIP): zip_version = load_json_from_zip(zip_filename, FILENAMES['version_file']) if zip_version['serialization_version'] != SERIALIZATION_VERSION: raise SerializationError("Serialization version of saved inference \ and running code are note the same.") with zipfile.ZipFile(zip_filename, 'r') as zip_file: mxnet_parameters = load_parameters(FILENAMES['mxnet_params'], zip_file, context=self.mxnet_context) mxnet_constants = load_parameters(FILENAMES['mxnet_constants'], zip_file, context=self.mxnet_context) variable_constants = load_json_from_zip(zip_filename, FILENAMES['variable_constants']) from ..util.serialization import ModelComponentDecoder graphs_list = load_json_from_zip(zip_filename, FILENAMES['graphs'], decoder=ModelComponentDecoder) graphs = FactorGraph.load_graphs(graphs_list) primary_model = graphs[0] secondary_graphs = graphs[1:] self._uuid_map = FactorGraph.reconcile_graphs( current_graphs=self.graphs, primary_previous_graph=primary_model, secondary_previous_graphs=secondary_graphs) new_parameters = InferenceParameters.load_parameters( uuid_map=self._uuid_map, mxnet_parameters=mxnet_parameters, variable_constants=variable_constants, mxnet_constants=mxnet_constants, current_params=self.params._params) self.params = new_parameters configuration = load_json_from_zip(zip_filename, FILENAMES['configuration']) self.load_configuration(configuration, self._uuid_map) def load_configuration(self, configuration, uuid_map): pass def get_serializable(self): return {'observed': self.observed_variable_UUIDs}
Apache License 2.0
abatilo/typed-json-dataclass
typed_json_dataclass/typed_json_dataclass.py
TypedJsonMixin._validate_list_types
python
def _validate_list_types(self, actual_value, expected_type): if isinstance(actual_value, list) and hasattr(expected_type, '__args__'): nested_type = expected_type.__args__[0] if isinstance(nested_type, typing.ForwardRef): type_for_forward_ref = str(nested_type)[12:-2] return all( type_for_forward_ref == v.__class__.__name__ for v in actual_value ) return all( self._validate_list_types(v, nested_type) for v in actual_value ) else: return isinstance(actual_value, expected_type)
Recursively checks nested lists like List[List[str]] and checks that all elements in the list are uniform
https://github.com/abatilo/typed-json-dataclass/blob/7d22aeeb557032e8c09b1ef67c26293992b17fe7/typed_json_dataclass/typed_json_dataclass.py#L181-L203
import json import typing from dataclasses import InitVar, MISSING, asdict, fields, is_dataclass from enum import Enum from warnings import warn from typed_json_dataclass.utils import to_camel, to_snake, recursive_rename class MappingMode(Enum): SnakeCase = 1 CamelCase = 2 NoMap = 3 class TypedJsonMixin: def __post_init__(self): for field_def in fields(self): field_name = field_def.name field_value = getattr(self, field_name) actual_type = type(field_value) if hasattr(field_def.type, '__origin__'): expected_type = field_def.type.__origin__ else: expected_type = field_def.type if field_value is not None: class_name = self.__class__.__name__ if (class_name == expected_type and isinstance(expected_type, str)): if actual_type != self.__class__: raise TypeError((f'{class_name}.{field_name} was ' 'defined as a <class ' f"'{expected_type}'>, " f'but we found a {actual_type} ' 'instead')) else: if expected_type == typing.Union: possible_types = field_def.type.__args__ matches = (isinstance(field_value, possible_type) for possible_type in possible_types) if not any(matches): raise TypeError((f'{class_name}.{field_name} was ' 'defined to be any of: ' f'{possible_types} but was found ' f'to be {actual_type} instead')) elif (isinstance(field_value, expected_type) and isinstance(field_value, list)): if not hasattr(field_def.type, '__args__'): raise TypeError((f'{class_name}.{field_name} was ' f'defined as a {actual_type}, ' 'but you must use ' 'typing.List[type] ' 'instead')) expected_element_type = field_def.type.__args__[0] if isinstance(expected_element_type, typing.TypeVar): raise TypeError((f'{class_name}.{field_name} was ' f'defined as a {actual_type}, ' 'but is missing information ' 'about the' ' type of the elements inside ' 'it')) if not self._ensure_no_native_collections( expected_element_type ): raise TypeError(((f'{class_name}.{field_name} was ' 'detected to use a native ' 'Python ' 'collection in its type ' 'definition. ' 'We should only use ' 'typing.List[] ' 'for these'))) for i, element in enumerate(field_value): if isinstance(element, dict): if not element: raise TypeError(((f'{class_name}.' f'{field_name} ' 'was found to have an ' 'empty dictionary. An ' 'empty ' 'dictionary will not ' 'properly instantiate a ' 'nested object'))) getattr( self, field_name )[i] = expected_element_type(**element) if not self._validate_list_types( field_value, field_def.type ): raise TypeError((f'{class_name}.{field_name} is ' f'{field_value} which does not ' 'match ' f'{field_def.type}. ' 'Unfortunately, ' 'we are unable to infer the ' 'explicit ' f'type of {class_name}.' f'{field_name}')) elif not isinstance(field_value, expected_type): if isinstance(field_value, dict): if not self._ensure_no_native_collections( expected_type ): raise TypeError((f'{class_name}.{field_name} ' 'was ' 'detected to use a native ' 'Python ' 'dict in its type ' 'definition. ' 'We should only use custom ' 'objects for these')) try: setattr( self, field_name, expected_type(**field_value) ) except TypeError: raise TypeError(f'{class_name}.{field_name} ' 'is ' 'expected to be ' f'{expected_type}, but value ' f'{field_value} is a dict ' 'with unexpected keys') else: raise TypeError(f'{class_name}.{field_name} is ' 'expected to be ' f'{expected_type}, but value ' f'{field_value} with ' f'type {actual_type} was found ' 'instead') def _ensure_no_native_collections(self, expected_type): if hasattr(expected_type, '__origin__'): return self._ensure_no_native_collections( expected_type.__args__[0] ) else: return expected_type not in {dict, list, set, tuple}
MIT License
open-catalyst-project/ocp
ocpmodels/preprocessing/atoms_to_graphs.py
AtomsToGraphs.convert
python
def convert( self, atoms, ): atomic_numbers = torch.Tensor(atoms.get_atomic_numbers()) positions = torch.Tensor(atoms.get_positions()) cell = torch.Tensor(atoms.get_cell()).view(1, 3, 3) natoms = positions.shape[0] data = Data( cell=cell, pos=positions, atomic_numbers=atomic_numbers, natoms=natoms, ) if self.r_edges: split_idx_dist = self._get_neighbors_pymatgen(atoms) edge_index, edge_distances, cell_offsets = self._reshape_features( *split_idx_dist ) data.edge_index = edge_index data.cell_offsets = cell_offsets if self.r_energy: energy = atoms.get_potential_energy(apply_constraint=False) data.y = energy if self.r_forces: forces = torch.Tensor(atoms.get_forces(apply_constraint=False)) data.force = forces if self.r_distances and self.r_edges: data.distances = edge_distances if self.r_fixed: fixed_idx = torch.zeros(natoms) if hasattr(atoms, "constraints"): from ase.constraints import FixAtoms for constraint in atoms.constraints: if isinstance(constraint, FixAtoms): fixed_idx[constraint.index] = 1 data.fixed = fixed_idx return data
Convert a single atomic stucture to a graph. Args: atoms (ase.atoms.Atoms): An ASE atoms object. Returns: data (torch_geometric.data.Data): A torch geometic data object with edge_index, positions, atomic_numbers, and optionally, energy, forces, and distances. Optional properties can included by setting r_property=True when constructing the class.
https://github.com/open-catalyst-project/ocp/blob/1044e311182c1120c6e6d137ce6db3f445148973/ocpmodels/preprocessing/atoms_to_graphs.py#L121-L178
import ase.db.sqlite import ase.io.trajectory import numpy as np import torch from torch_geometric.data import Data from ocpmodels.common.utils import collate try: from pymatgen.io.ase import AseAtomsAdaptor except Exception: pass try: shell = get_ipython().__class__.__name__ if shell == "ZMQInteractiveShell": from tqdm.notebook import tqdm else: from tqdm import tqdm except NameError: from tqdm import tqdm class AtomsToGraphs: def __init__( self, max_neigh=200, radius=6, r_energy=False, r_forces=False, r_distances=False, r_edges=True, r_fixed=True, ): self.max_neigh = max_neigh self.radius = radius self.r_energy = r_energy self.r_forces = r_forces self.r_distances = r_distances self.r_fixed = r_fixed self.r_edges = r_edges def _get_neighbors_pymatgen(self, atoms): struct = AseAtomsAdaptor.get_structure(atoms) _c_index, _n_index, _offsets, n_distance = struct.get_neighbor_list( r=self.radius, numerical_tol=0, exclude_self=True ) _nonmax_idx = [] for i in range(len(atoms)): idx_i = (_c_index == i).nonzero()[0] idx_sorted = np.argsort(n_distance[idx_i])[: self.max_neigh] _nonmax_idx.append(idx_i[idx_sorted]) _nonmax_idx = np.concatenate(_nonmax_idx) _c_index = _c_index[_nonmax_idx] _n_index = _n_index[_nonmax_idx] n_distance = n_distance[_nonmax_idx] _offsets = _offsets[_nonmax_idx] return _c_index, _n_index, n_distance, _offsets def _reshape_features(self, c_index, n_index, n_distance, offsets): edge_index = torch.LongTensor(np.vstack((n_index, c_index))) edge_distances = torch.FloatTensor(n_distance) cell_offsets = torch.LongTensor(offsets) nonzero = torch.where(edge_distances >= 1e-8)[0] edge_index = edge_index[:, nonzero] edge_distances = edge_distances[nonzero] cell_offsets = cell_offsets[nonzero] return edge_index, edge_distances, cell_offsets
MIT License
getavalon/core
avalon/vendor/requests/sessions.py
SessionRedirectMixin.resolve_redirects
python
def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, **adapter_kwargs): hist = [] url = self.get_redirect_target(resp) while url: prepared_request = req.copy() hist.append(resp) resp.history = hist[1:] try: resp.content except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp) resp.close() if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url) parsed = urlparse(url) url = parsed.geturl() if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) if resp.is_permanent_redirect and req.url != prepared_request.url: self.redirect_cache[req.url] = prepared_request.url self.rebuild_method(prepared_request, resp) if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) merge_cookies(prepared_request._cookies, self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) rewindable = ( prepared_request._body_position is not None and ('Content-Length' in headers or 'Transfer-Encoding' in headers) ) if rewindable: rewind_body(prepared_request) req = prepared_request resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) url = self.get_redirect_target(resp) yield resp
Receives a Response. Returns a generator of Responses.
https://github.com/getavalon/core/blob/31e8cb4760e00e3db64443f6f932b7fd8e96d41d/avalon/vendor/requests/sessions.py#L116-L221
import os import platform import time from collections import Mapping from datetime import timedelta from .auth import _basic_auth_str from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse from .cookies import ( cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .hooks import default_hooks, dispatch_hook from ._internal_utils import to_native_string from .utils import to_key_val_list, default_headers from .exceptions import ( TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) from .packages.urllib3._collections import RecentlyUsedContainer from .structures import CaseInsensitiveDict from .adapters import HTTPAdapter from .utils import ( requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, get_auth_from_url, rewind_body ) from .status_codes import codes from .models import REDIRECT_STATI REDIRECT_CACHE_SIZE = 1000 if platform.system() == 'Windows': try: preferred_clock = time.perf_counter except AttributeError: preferred_clock = time.clock else: preferred_clock = time.time def merge_setting(request_setting, session_setting, dict_class=OrderedDict): if session_setting is None: return request_setting if request_setting is None: return session_setting if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) none_keys = [k for (k, v) in merged_setting.items() if v is None] for key in none_keys: del merged_setting[key] return merged_setting def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): if session_hooks is None or session_hooks.get('response') == []: return request_hooks if request_hooks is None or request_hooks.get('response') == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) class SessionRedirectMixin(object): def get_redirect_target(self, resp): if resp.is_redirect: location = resp.headers['location'] if is_py3: location = location.encode('latin1') return to_native_string(location, 'utf8') return None
MIT License
python-openxml/python-docx
docx/oxml/table.py
CT_Row.tr_idx
python
def tr_idx(self): return self.getparent().tr_lst.index(self)
The index of this ``<w:tr>`` element within its parent ``<w:tbl>`` element.
https://github.com/python-openxml/python-docx/blob/36cac78de080d412e9e50d56c2784e33655cad59/docx/oxml/table.py#L54-L59
from __future__ import ( absolute_import, division, print_function, unicode_literals ) from . import parse_xml from ..enum.table import WD_CELL_VERTICAL_ALIGNMENT, WD_ROW_HEIGHT_RULE from ..exceptions import InvalidSpanError from .ns import nsdecls, qn from ..shared import Emu, Twips from .simpletypes import ( ST_Merge, ST_TblLayoutType, ST_TblWidth, ST_TwipsMeasure, XsdInt ) from .xmlchemy import ( BaseOxmlElement, OneAndOnlyOne, OneOrMore, OptionalAttribute, RequiredAttribute, ZeroOrOne, ZeroOrMore ) class CT_Height(BaseOxmlElement): val = OptionalAttribute('w:val', ST_TwipsMeasure) hRule = OptionalAttribute('w:hRule', WD_ROW_HEIGHT_RULE) class CT_Row(BaseOxmlElement): tblPrEx = ZeroOrOne('w:tblPrEx') trPr = ZeroOrOne('w:trPr') tc = ZeroOrMore('w:tc') def tc_at_grid_col(self, idx): grid_col = 0 for tc in self.tc_lst: if grid_col == idx: return tc grid_col += tc.grid_span if grid_col > idx: raise ValueError('no cell on grid column %d' % idx) raise ValueError('index out of bounds') @property
MIT License
dongjunlee/quantified-self
kino/background/schedule/__init__.py
cancel_job
python
def cancel_job(job): default_scheduler.cancel_job(job)
Delete a scheduled job on the default scheduler :param job: The job to be unscheduled
https://github.com/dongjunlee/quantified-self/blob/226aa009008e30d76e9253fe47fbe862766bdc83/kino/background/schedule/__init__.py#L545-L550
import collections import datetime import functools import logging import time import threading logger = logging.getLogger("schedule") class CancelJob(object): pass class Scheduler(object): def __init__(self): self.jobs = [] def run_pending(self): runnable_jobs = (job for job in self.jobs if job.should_run) for job in sorted(runnable_jobs): self._run_job(job) def run_continuously(self, interval=1): cease_continuous_run = threading.Event() class ScheduleThread(threading.Thread): @classmethod def run(cls): while not cease_continuous_run.is_set(): self.run_pending() time.sleep(interval) continuous_thread = ScheduleThread() continuous_thread.daemon = True continuous_thread.start() return cease_continuous_run def run_all(self, delay_seconds=0): logger.info( "Running *all* %i jobs with %is delay inbetween", len(self.jobs), delay_seconds, ) for job in self.jobs[:]: self._run_job(job) time.sleep(delay_seconds) def clear(self, tag=None): if tag is None: del self.jobs[:] else: self.jobs[:] = (job for job in self.jobs if tag not in job.tags) def cancel_job(self, job): try: self.jobs.remove(job) except ValueError: pass def every(self, interval=1): job = Job(interval) self.jobs.append(job) return job def _run_job(self, job): ret = job.run() if isinstance(ret, CancelJob) or ret is CancelJob: self.cancel_job(job) @property def next_run(self): if not self.jobs: return None return min(self.jobs).next_run @property def idle_seconds(self): return (self.next_run - datetime.datetime.now()).total_seconds() class Job(object): def __init__(self, interval): self.interval = interval self.job_func = None self.unit = None self.at_time = None self.last_run = None self.next_run = None self.period = None self.start_day = None self.tags = set() def __lt__(self, other): return self.next_run < other.next_run def __repr__(self): def format_time(t): return t.strftime("%Y-%m-%d %H:%M:%S") if t else "[never]" timestats = "(last run: %s, next run: %s)" % ( format_time(self.last_run), format_time(self.next_run), ) if hasattr(self.job_func, "__name__"): job_func_name = self.job_func.__name__ else: job_func_name = repr(self.job_func) args = [repr(x) for x in self.job_func.args] kwargs = ["%s=%s" % (k, repr(v)) for k, v in self.job_func.keywords.items()] call_repr = job_func_name + "(" + ", ".join(args + kwargs) + ")" if self.at_time is not None: return "Every %s %s at %s do %s %s" % ( self.interval, self.unit[:-1] if self.interval == 1 else self.unit, self.at_time, call_repr, timestats, ) else: return "Every %s %s do %s %s" % ( self.interval, self.unit[:-1] if self.interval == 1 else self.unit, call_repr, timestats, ) @property def second(self): assert self.interval == 1, "Use seconds instead of second" return self.seconds @property def seconds(self): self.unit = "seconds" return self @property def minute(self): assert self.interval == 1, "Use minutes instead of minute" return self.minutes @property def minutes(self): self.unit = "minutes" return self @property def hour(self): assert self.interval == 1, "Use hours instead of hour" return self.hours @property def hours(self): self.unit = "hours" return self @property def day(self): assert self.interval == 1, "Use days instead of day" return self.days @property def days(self): self.unit = "days" return self @property def week(self): assert self.interval == 1, "Use weeks instead of week" return self.weeks @property def weeks(self): self.unit = "weeks" return self @property def monday(self): assert self.interval == 1, "Use mondays instead of monday" self.start_day = "monday" return self.weeks @property def tuesday(self): assert self.interval == 1, "Use tuesdays instead of tuesday" self.start_day = "tuesday" return self.weeks @property def wednesday(self): assert self.interval == 1, "Use wedesdays instead of wednesday" self.start_day = "wednesday" return self.weeks @property def thursday(self): assert self.interval == 1, "Use thursday instead of thursday" self.start_day = "thursday" return self.weeks @property def friday(self): assert self.interval == 1, "Use fridays instead of friday" self.start_day = "friday" return self.weeks @property def saturday(self): assert self.interval == 1, "Use saturdays instead of saturday" self.start_day = "saturday" return self.weeks @property def sunday(self): assert self.interval == 1, "Use sundays instead of sunday" self.start_day = "sunday" return self.weeks def tag(self, *tags): if any([not isinstance(tag, collections.Hashable) for tag in tags]): raise TypeError("Every tag should be hashable") if not all(isinstance(tag, collections.Hashable) for tag in tags): raise TypeError("Tags must be hashable") self.tags.update(tags) return self def at(self, time_str): assert self.unit in ("days", "hours") or self.start_day hour, minute = time_str.split(":") minute = int(minute) if self.unit == "days" or self.start_day: hour = int(hour) assert 0 <= hour <= 23 elif self.unit == "hours": hour = 0 assert 0 <= minute <= 59 self.at_time = datetime.time(hour, minute) return self def do(self, job_func, *args, **kwargs): self.job_func = functools.partial(job_func, *args, **kwargs) try: functools.update_wrapper(self.job_func, job_func) except AttributeError: pass self._schedule_next_run() return self @property def should_run(self): return datetime.datetime.now() >= self.next_run def run(self): logger.info("Running job %s", self) ret = self.job_func() self.last_run = datetime.datetime.now() self._schedule_next_run() return ret def _schedule_next_run(self): assert self.unit in ("seconds", "minutes", "hours", "days", "weeks") self.period = datetime.timedelta(**{self.unit: self.interval}) self.next_run = datetime.datetime.now() + self.period if self.start_day is not None: assert self.unit == "weeks" weekdays = ( "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday", ) assert self.start_day in weekdays weekday = weekdays.index(self.start_day) days_ahead = weekday - self.next_run.weekday() if days_ahead <= 0: days_ahead += 7 self.next_run += datetime.timedelta(days_ahead) - self.period if self.at_time is not None: assert self.unit in ("days", "hours") or self.start_day is not None kwargs = { "minute": self.at_time.minute, "second": self.at_time.second, "microsecond": 0, } if self.unit == "days" or self.start_day is not None: kwargs["hour"] = self.at_time.hour self.next_run = self.next_run.replace(**kwargs) if not self.last_run: now = datetime.datetime.now() if ( self.unit == "days" and self.at_time > now.time() and self.interval == 1 ): self.next_run = self.next_run - datetime.timedelta(days=1) elif self.unit == "hours" and self.at_time.minute > now.minute: self.next_run = self.next_run - datetime.timedelta(hours=1) if self.start_day is not None and self.at_time is not None: if (self.next_run - datetime.datetime.now()).days >= 7: self.next_run -= self.period default_scheduler = Scheduler() jobs = default_scheduler.jobs def every(interval=1): return default_scheduler.every(interval) def run_continuously(interval=1): return default_scheduler.run_continuously(interval) def run_pending(): default_scheduler.run_pending() def run_all(delay_seconds=0): default_scheduler.run_all(delay_seconds=delay_seconds) def clear(tag=None): default_scheduler.clear(tag)
MIT License
voidful/nlprep
nlprep/file_utils.py
_session_with_backoff
python
def _session_with_backoff() -> requests.Session: session = requests.Session() retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504]) session.mount("http://", HTTPAdapter(max_retries=retries)) session.mount("https://", HTTPAdapter(max_retries=retries)) return session
We ran into an issue where http requests to s3 were timing out, possibly because we were making too many requests too quickly. This helper function returns a requests session that has retry-with-backoff built in. See <https://stackoverflow.com/questions/23267409/how-to-implement-retry-mechanism-into-python-requests-library>.
https://github.com/voidful/nlprep/blob/df6f76b72aa93a16f0dc483f5515eb1b5248d4da/nlprep/file_utils.py#L271-L284
import glob import os import logging import tempfile import json from os import PathLike from urllib.parse import urlparse from pathlib import Path from typing import Optional, Tuple, Union, IO, Callable, Set, List, Iterator, Iterable from hashlib import sha256 from functools import wraps from zipfile import ZipFile, is_zipfile import tarfile import shutil import boto3 import botocore from botocore.exceptions import ClientError, EndpointConnectionError from filelock import FileLock import requests from requests.adapters import HTTPAdapter from requests.exceptions import ConnectionError from requests.packages.urllib3.util.retry import Retry from tqdm import tqdm logger = logging.getLogger(__name__) CACHE_ROOT = Path(os.getenv("NLPREP_CACHE_ROOT", Path.home() / ".nlprep")) CACHE_DIRECTORY = str(CACHE_ROOT / "cache") DEPRECATED_CACHE_DIRECTORY = str(CACHE_ROOT / "datasets") DATASET_CACHE = CACHE_DIRECTORY if os.path.exists(DEPRECATED_CACHE_DIRECTORY): logger = logging.getLogger(__name__) logger.warning( f"Deprecated cache directory found ({DEPRECATED_CACHE_DIRECTORY}). " f"Please remove this directory from your system to free up space." ) def url_to_filename(url: str, etag: str = None) -> str: url_bytes = url.encode("utf-8") url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = sha256(etag_bytes) filename += "." + etag_hash.hexdigest() return filename def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]: if cache_dir is None: cache_dir = CACHE_DIRECTORY cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise FileNotFoundError("file {} not found".format(cache_path)) meta_path = cache_path + ".json" if not os.path.exists(meta_path): raise FileNotFoundError("file {} not found".format(meta_path)) with open(meta_path) as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] return url, etag def cached_path( url_or_filename: Union[str, PathLike], cache_dir: Union[str, Path] = None, extract_archive: bool = False, force_extract: bool = False, ) -> str: if cache_dir is None: cache_dir = CACHE_DIRECTORY if isinstance(url_or_filename, PathLike): url_or_filename = str(url_or_filename) exclamation_index = url_or_filename.find("!") if extract_archive and exclamation_index >= 0: archive_path = url_or_filename[:exclamation_index] archive_path = cached_path(archive_path, cache_dir, True, force_extract) if not os.path.isdir(archive_path): raise ValueError( f"{url_or_filename} uses the ! syntax, but does not specify an archive file." ) return os.path.join(archive_path, url_or_filename[exclamation_index + 1:]) url_or_filename = os.path.expanduser(url_or_filename) parsed = urlparse(url_or_filename) file_path: str extraction_path: Optional[str] = None if parsed.scheme in ("http", "https", "s3"): file_path = get_from_cache(url_or_filename, cache_dir) if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)): extraction_path = file_path + "-extracted" elif os.path.exists(url_or_filename): file_path = url_or_filename if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)): extraction_dir, extraction_name = os.path.split(file_path) extraction_name = extraction_name.replace(".", "-") + "-extracted" extraction_path = os.path.join(extraction_dir, extraction_name) elif parsed.scheme == "": raise FileNotFoundError("file {} not found".format(url_or_filename)) else: raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) if extraction_path is not None: if os.path.isdir(extraction_path) and os.listdir(extraction_path) and not force_extract: return extraction_path with FileLock(file_path + ".lock"): shutil.rmtree(extraction_path, ignore_errors=True) os.makedirs(extraction_path) if is_zipfile(file_path): with ZipFile(file_path, "r") as zip_file: zip_file.extractall(extraction_path) zip_file.close() else: tar_file = tarfile.open(file_path) tar_file.extractall(extraction_path) tar_file.close() return extraction_path return file_path def is_url_or_existing_file(url_or_filename: Union[str, Path, None]) -> bool: if url_or_filename is None: return False url_or_filename = os.path.expanduser(str(url_or_filename)) parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https", "s3") or os.path.exists(url_or_filename) def _split_s3_path(url: str) -> Tuple[str, str]: parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def _s3_request(func: Callable): @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise FileNotFoundError("file {} not found".format(url)) else: raise return wrapper def _get_s3_resource(): session = boto3.session.Session() if session.get_credentials() is None: s3_resource = session.resource( "s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED) ) else: s3_resource = session.resource("s3") return s3_resource @_s3_request def _s3_etag(url: str) -> Optional[str]: s3_resource = _get_s3_resource() bucket_name, s3_path = _split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @_s3_request def _s3_get(url: str, temp_file: IO) -> None: s3_resource = _get_s3_resource() bucket_name, s3_path = _split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
Apache License 2.0
chubin/pyphoon
pyphoon/__init__.py
putmoon
python
def putmoon(datetimeobj, numlines, atfiller, notext, lang, hemisphere, hemisphere_warning): output = [""] def putchar(char): output[0] += char def fputs(string): output[0] += string if not lang: try: lang = locale.getdefaultlocale()[0] except IndexError: lang = 'en' if lang not in LITS and '_' in lang: lang = lang.split('_', 1)[0] lits = LITS.get(lang, LITS.get('en')) qlits = [x + " +" for x in lits[:4]] nqlits = [x + " -" for x in lits[:4]] atflrlen = len(atfiller) juliandate = unix_to_julian(datetimeobj) pctphase, _, _, _, _, _, _ = phase(juliandate) if hemisphere == 'south': pctphase = 1 - pctphase angphase = pctphase * 2.0 * PI mcap = -cos(angphase) yrad = numlines / 2.0 xrad = yrad / ASPECTRATIO midlin = int(numlines / 2) phases, which = phasehunt2(juliandate) atflridx = 0 lin = 0 while lin < numlines: ycoord = lin + 0.5 - yrad xright = xrad * sqrt(1.0 - (ycoord * ycoord) / (yrad * yrad)) xleft = -xright if PI > angphase >= 0.0: xleft = mcap * xleft else: xright = mcap * xright colleft = int(xrad + 0.5) + int(xleft + 0.5) colright = int(xrad + 0.5) + int(xright + 0.5) col = 0 while col < colleft: putchar(' ') col += 1 while col <= colright: if hemisphere == 'north': if numlines == 6: char = background6[lin][col] elif numlines == 18: char = background18[lin][col] elif numlines == 19: char = background19[lin][col] elif numlines == 21: char = background21[lin][col] elif numlines == 22: char = background22[lin][col] elif numlines == 23: char = background23[lin][col] elif numlines == 24: char = background24[lin][col] elif numlines == 29: char = background29[lin][col] elif numlines == 32: char = background32[lin][col] else: char = '@' else: if numlines == 6: char = background6[-1-lin][-col] elif numlines == 18: char = background18[-1-lin][-col] elif numlines == 19: char = background19[-1-lin][-col] elif numlines == 21: char = background21[-1-lin][-col] elif numlines == 22: char = background22[-1-lin][-col] elif numlines == 23: char = background23[-1-lin][-col] elif numlines == 24: char = background24[-1-lin][-col] elif numlines == 29: char = background29[-1-lin][-col] elif numlines == 32: char = background32[-1-lin][-col] else: char = '@' char = rotate(char) if char != '@': putchar(char) else: putchar(atfiller[atflridx]) atflridx = (atflridx + 1) % atflrlen col += 1 if (numlines <= 27 and not notext): fputs("\t ") if lin == midlin - 2: fputs(qlits[int(which[0] * 4.0 + 0.001)]) elif lin == midlin - 1: fputs(putseconds(int((juliandate - phases[0]) * SECSPERDAY))) elif lin == midlin: fputs(nqlits[int(which[1] * 4.0 + 0.001)]) elif lin == midlin + 1: fputs(putseconds(int((phases[1] - juliandate) * SECSPERDAY))) elif lin == midlin + 2 and hemisphere_warning != 'None': if len(lits) >= 6: north_south = lits[4:6] else: north_south = LITS.get('en')[4:6] msg = north_south[hemisphere == 'south'] fputs(f'[{msg}]') putchar('\n') lin += 1 return output[0]
Print the moon
https://github.com/chubin/pyphoon/blob/131fa1d6c9cdf0a5580c6aa80132c09a7b6513eb/pyphoon/__init__.py#L66-L211
import sys import os import argparse import datetime import time import locale from math import cos, sqrt import dateutil.parser from pyphoon.lib.astro import unix_to_julian, phase, phasehunt2 from pyphoon.lib.moons import background6, background18, background19, background21, background22 from pyphoon.lib.moons import background23, background24, background29, background32 from pyphoon.lib.rotate import rotate from pyphoon.lib.translations import LITS def fatal(message): print(message, file=sys.stderr) sys.exit(1) SECSPERMINUTE = 60 SECSPERHOUR = (60 * SECSPERMINUTE) SECSPERDAY = (24 * SECSPERHOUR) PI = 3.1415926535897932384626433 DEFAULTNUMLINES = 23 DEFAULTNOTEXT = False DEFAULTHEMISPHERE = 'north' QUARTERLITLEN = 16 QUARTERLITLENPLUSONE = 17 ASPECTRATIO = 0.5 def putseconds(secs): days = int(secs / SECSPERDAY) secs = int(secs - days * SECSPERDAY) hours = int(secs / SECSPERHOUR) secs = int(secs - hours * SECSPERHOUR) minutes = int(secs / SECSPERMINUTE) secs = int(secs - minutes * SECSPERMINUTE) return f"{days:d} {hours:2d}:{minutes:02d}:{secs:02d}"
MIT License
noxdafox/clipspy
clips/facts.py
Template.facts
python
def facts(self) -> iter: fact = lib.GetNextFactInTemplate(self._ptr(), ffi.NULL) while fact != ffi.NULL: yield new_fact(self._ptr(), fact) fact = lib.GetNextFactInTemplate(self._ptr(), fact)
Iterate over the asserted Facts belonging to this Template.
https://github.com/noxdafox/clipspy/blob/a317964dc86755619d84b9adf4008d62663889ce/clips/facts.py#L279-L285
import os from itertools import chain import clips from clips.modules import Module from clips.common import PutSlotError, PUT_SLOT_ERROR from clips.common import environment_builder, environment_modifier from clips.common import CLIPSError, SaveMode, TemplateSlotDefaultType from clips._clips import lib, ffi class Fact: __slots__ = '_env', '_fact' def __init__(self, env: ffi.CData, fact: ffi.CData): self._env = env self._fact = fact lib.RetainFact(self._fact) def __del__(self): try: lib.ReleaseFact(self._env, self._fact) except (AttributeError, TypeError): pass def __hash__(self): return hash(self._fact) def __eq__(self, fact): return self._fact == fact._fact def __str__(self): return ' '.join(fact_pp_string(self._env, self._fact).split()) def __repr__(self): string = ' '.join(fact_pp_string(self._env, self._fact).split()) return "%s: %s" % (self.__class__.__name__, string) @property def index(self) -> int: return lib.FactIndex(self._fact) @property def exists(self) -> bool: return lib.FactExistp(self._fact) @property def template(self) -> 'Template': template = lib.FactDeftemplate(self._fact) name = ffi.string(lib.DeftemplateName(template)).decode() return Template(self._env, name) def retract(self): ret = lib.Retract(self._fact) if ret != lib.RE_NO_ERROR: raise CLIPSError(self._env, code=ret) class ImpliedFact(Fact): def __iter__(self): return chain(slot_value(self._env, self._fact)) def __len__(self): return len(slot_value(self._env, self._fact)) def __getitem__(self, index): return slot_value(self._env, self._fact)[index] class TemplateFact(Fact): __slots__ = '_env', '_fact' def __init__(self, env: ffi.CData, fact: ffi.CData): super().__init__(env, fact) def __iter__(self): return chain(slot_values(self._env, self._fact)) def __len__(self): slots = slot_values(self._env, self._fact) return len(tuple(slots)) def __getitem__(self, key): try: return slot_value(self._env, self._fact, slot=str(key)) except CLIPSError as error: if error.code == lib.GSE_SLOT_NOT_FOUND_ERROR: raise KeyError("'%s'" % key) else: raise error def modify_slots(self, **slots): modifier = environment_modifier(self._env, 'fact') ret = lib.FMSetFact(modifier, self._fact) if ret != lib.FME_NO_ERROR: raise CLIPSError(self._env, code=ret) for slot, slot_val in slots.items(): value = clips.values.clips_value(self._env, value=slot_val) ret = lib.FMPutSlot(modifier, str(slot).encode(), value) if ret != PutSlotError.PSE_NO_ERROR: raise PUT_SLOT_ERROR[ret](slot) if lib.FMModify(modifier) is ffi.NULL: raise CLIPSError(self._env, code=lib.FBError(self._env)) class Template: __slots__ = '_env', '_name' def __init__(self, env: ffi.CData, name: str): self._env = env self._name = name.encode() def __hash__(self): return hash(self._ptr()) def __eq__(self, tpl): return self._ptr() == tpl._ptr() def __str__(self): string = lib.DeftemplatePPForm(self._ptr()) string = ffi.string(string).decode() if string != ffi.NULL else '' return ' '.join(string.split()) def __repr__(self): string = lib.DeftemplatePPForm(self._ptr()) string = ffi.string(string).decode() if string != ffi.NULL else '' return "%s: %s" % (self.__class__.__name__, ' '.join(string.split())) def _ptr(self) -> ffi.CData: tpl = lib.FindDeftemplate(self._env, self._name) if tpl == ffi.NULL: raise CLIPSError(self._env, 'Template <%s> not defined' % self.name) return tpl @property def implied(self) -> bool: return lib.ImpliedDeftemplate(self._ptr()) @property def name(self) -> str: return self._name.decode() @property def module(self) -> Module: name = ffi.string(lib.DeftemplateModule(self._ptr())).decode() return Module(self._env, name) @property def deletable(self) -> bool: return lib.DeftemplateIsDeletable(self._ptr()) @property def slots(self) -> tuple: if self.implied: return () value = clips.values.clips_value(self._env) lib.DeftemplateSlotNames(self._ptr(), value) return tuple(TemplateSlot(self._env, self.name, n) for n in clips.values.python_value(self._env, value)) @property def watch(self) -> bool: return lib.GetDeftemplateWatch(self._ptr()) @watch.setter def watch(self, flag: bool): lib.EnvSetDeftemplateWatch(self._ptr(), flag)
BSD 3-Clause New or Revised License
twke18/spml
spml/data/datasets/base_dataset.py
ListDataset._read_image
python
def _read_image(self, image_path): img = np.array(Image.open(image_path).convert(mode='RGB')) img = img.astype(np.float32) / 255 return img
Read BGR uint8 image.
https://github.com/twke18/spml/blob/f09e4c30ecf2030d42ac70b2c35e7fdeee9bf468/spml/data/datasets/base_dataset.py#L102-L107
import os import torch import torch.utils.data import numpy as np import PIL.Image as Image import cv2 import spml.data.transforms as transforms class ListDataset(torch.utils.data.Dataset): def __init__(self, data_dir, data_list, img_mean=(0, 0, 0), img_std=(1, 1, 1), size=None, random_crop=False, random_scale=False, random_mirror=False, training=False): self.image_paths, self.semantic_label_paths, self.instance_label_paths = ( self._read_image_and_label_paths(data_dir, data_list)) self.training = training self.img_mean = img_mean self.img_std = img_std self.size = size self.random_crop = random_crop self.random_scale = random_scale self.random_mirror = random_mirror def eval(self): self.training = False def train(self): self.training = True def _read_image_and_label_paths(self, data_dir, data_list): images, semantic_labels, instance_labels = [], [], [] with open(data_list, 'r') as list_file: for line in list_file: line = line.strip('\n') try: img, semantic_lab, instance_lab = line.split(' ') except: img = line semantic_lab = instance_lab = None images.append(os.path.join(data_dir, img)) if semantic_lab is not None: semantic_labels.append(os.path.join(data_dir, semantic_lab)) if instance_lab is not None: instance_labels.append(os.path.join(data_dir, instance_lab)) return images, semantic_labels, instance_labels
MIT License
chrisyounger/git_for_splunk
bin/git_for_splunk/aob_py2/cloudconnectlib/splunktalib/state_store.py
CachedFileStateStore.__init__
python
def __init__(self, app_name, checkpoint_dir, max_cache_seconds=5): super(CachedFileStateStore, self).__init__(app_name, checkpoint_dir) self._states_cache = {} self._states_cache_lmd = {} self.max_cache_seconds = max_cache_seconds
:meta_configs: dict like and contains checkpoint_dir, session_key, server_uri etc
https://github.com/chrisyounger/git_for_splunk/blob/c450f32069b5d1087d4e4ebb0803bf7a0f25c60d/bin/git_for_splunk/aob_py2/cloudconnectlib/splunktalib/state_store.py#L192-L201
from builtins import object import json import os import os.path as op import time import traceback from abc import abstractmethod from ..splunktacollectorlib.common import log as stulog from ..splunktalib import kv_client as kvc from ..splunktalib.common import util def get_state_store(meta_configs, appname, collection_name="talib_states", use_kv_store=False, use_cache_file=True, max_cache_seconds=5): if util.is_true(use_kv_store): return StateStore(appname, meta_configs['server_uri'], meta_configs['session_key'], collection_name) checkpoint_dir = meta_configs['checkpoint_dir'] if util.is_true(use_cache_file): return CachedFileStateStore(appname, checkpoint_dir, max_cache_seconds) return FileStateStore(appname, checkpoint_dir) class BaseStateStore(object): def __init__(self, app_name): self._app_name = app_name @abstractmethod def update_state(self, key, states): pass @abstractmethod def get_state(self, key): pass @abstractmethod def delete_state(self, key): pass def close(self, key=None): pass class StateStore(BaseStateStore): def __init__(self, app_name, server_uri, session_key, collection_name="talib_states"): super(StateStore, self).__init__(app_name) self._states_cache = {} self._kv_client = None self._collection = collection_name self._kv_client = kvc.KVClient( splunkd_host=server_uri, session_key=session_key ) kvc.create_collection(self._kv_client, self._collection, self._app_name) self._load_states_cache() def update_state(self, key, states): data = {'value': json.dumps(states)} if key not in self._states_cache: data['_key'] = key self._kv_client.insert_collection_data( collection=self._collection, data=data, app=self._app_name ) else: self._kv_client.update_collection_data( collection=self._collection, key_id=key, data=data, app=self._app_name ) self._states_cache[key] = states def get_state(self, key=None): if key: return self._states_cache.get(key, None) return self._states_cache def delete_state(self, key=None): if key: self._delete_state(key) else: for key in list(self._states_cache.keys()): self._delete_state(key) def _delete_state(self, key): if key not in self._states_cache: return self._kv_client.delete_collection_data( self._collection, key, self._app_name) del self._states_cache[key] def _load_states_cache(self): states = self._kv_client.get_collection_data( self._collection, None, self._app_name) if not states: return for state in states: value = state['value'] if 'value' in state else state key = state['_key'] try: value = json.loads(value) except Exception: stulog.logger.warning( 'Unable to load state from cache, key=%s, error=%s', key, traceback.format_exc()) pass self._states_cache[key] = value def _create_checkpoint_dir_if_needed(checkpoint_dir): if os.path.isdir(checkpoint_dir): return stulog.logger.info( "Checkpoint dir '%s' doesn't exist, try to create it", checkpoint_dir) try: os.mkdir(checkpoint_dir) except OSError: stulog.logger.exception( "Failure creating checkpoint dir '%s'", checkpoint_dir ) raise Exception( "Unable to create checkpoint dir '{}'".format(checkpoint_dir) ) class FileStateStore(BaseStateStore): def __init__(self, app_name, checkpoint_dir): super(FileStateStore, self).__init__(app_name) self._checkpoint_dir = checkpoint_dir def _get_checkpoint_file(self, filename): return op.join(self._checkpoint_dir, filename) @staticmethod def _remove_if_exist(filename): if op.exists(filename): os.remove(filename) def update_state(self, key, states): _create_checkpoint_dir_if_needed(self._checkpoint_dir) filename = self._get_checkpoint_file(key) with open(filename + ".new", "w") as json_file: json.dump(states, json_file) self._remove_if_exist(filename) os.rename(filename + ".new", filename) def get_state(self, key): filename = self._get_checkpoint_file(key) if op.exists(filename): with open(filename) as json_file: state = json.load(json_file) return state else: return None def delete_state(self, key): self._remove_if_exist(self._get_checkpoint_file(key)) class CachedFileStateStore(FileStateStore):
Apache License 2.0
heremaps/xyz-spaces-python
xyzspaces/iml/apis/data_config_api.py
DataConfigApi.delete_catalog
python
def delete_catalog(self, catalog_hrn: str, billing_tag: Optional[str] = None) -> dict: path = f"/catalogs/{catalog_hrn}" url = "{}{}".format(self.base_url, path) params = {"billingTag": billing_tag} resp = self.delete(url, params) if resp.status_code == 202: return resp.json() else: self.raise_response_exception(resp)
Delete a catalog. :param catalog_hrn: a HERE Resource Name. :param billing_tag: a string which is used for grouping billing records. :return: a dict with catalog deletion status.
https://github.com/heremaps/xyz-spaces-python/blob/fe6bcb0ee5131798be892be2a25e023d2c47b2b4/xyzspaces/iml/apis/data_config_api.py#L124-L139
from typing import Any, Dict, Optional from xyzspaces.iml.apis.api import Api from xyzspaces.iml.auth import Auth class DataConfigApi(Api): def __init__( self, auth: Auth, proxies: Optional[dict] = None, ): self.auth = auth super().__init__( access_token=auth.token, proxies=proxies, ) self.base_url = "https://config.data.api.platform.here.com/config/v1" def create_catalog( self, data: Dict[str, Any], billing_tag: Optional[str] = None ) -> Dict: path = "/catalogs" url = "{}{}".format(self.base_url, path) params = {"billingTag": billing_tag} if billing_tag else {} resp = self.post(url, data, params) if resp.status_code == 202: return resp.json() else: self.raise_response_exception(resp) def get_catalog_status( self, catalog_status_href: str, billing_tag: Optional[str] = None ) -> tuple: params = {"billingTag": billing_tag} resp = self.get(url=catalog_status_href, params=params) if resp.status_code in [200, 202, 303]: return resp.json(), resp.status_code != 202 else: self.raise_response_exception(resp) def get_catalog_details( self, catalog_hrn: str, billing_tag: Optional[str] = None ) -> Dict: path = f"/catalogs/{catalog_hrn}" params = {"billingTag": billing_tag} url = "{}{}".format(self.base_url, path) resp = self.get(url, params=params) if resp.status_code == 200: return resp.json() else: self.raise_response_exception(resp) def update_catalog( self, catalog_hrn: str, data: Dict[str, Any], billing_tag: Optional[str] = None ) -> dict: path = f"/catalogs/{catalog_hrn}" url = "{}{}".format(self.base_url, path) params = {"billingTag": billing_tag} resp = self.put(url=url, data=data, params=params) if resp.status_code == 202: return resp.json() else: self.raise_response_exception(resp)
Apache License 2.0
sepandhaghighi/csv2latex
csv2latex/csv2LaTeX.py
check_update
python
def check_update(DEBUG=False): try: new_version=requests.get(UPDATE_URL).text if float(new_version)>float(version): print("New Version ("+new_version+") Of csv2latex is available (visit github page)") except Exception as e: if DEBUG==True: print(str(e))
This function check csv2latex site for newversion :param DEBUG: Flag for using Debug mode :type DEBUG:bool :return: True if new version is available
https://github.com/sepandhaghighi/csv2latex/blob/63bd41e77f898ca967cf2ff8fa83779e9e39a2fd/csv2latex/csv2LaTeX.py#L6-L19
import os from .params import * import sys import requests
MIT License
sibirrer/lenstronomy
lenstronomy/ImSim/de_lens.py
marginalisation_const
python
def marginalisation_const(M_inv): sign, log_det = np.linalg.slogdet(M_inv) if sign == 0: return -10**15 return sign * log_det/2
get marginalisation constant 1/2 log(M_beta) for flat priors :param M_inv: 2D covariance matrix :return: float
https://github.com/sibirrer/lenstronomy/blob/e6d0e179a98ecb0c4db25cdf7cfb73e83c6aeded/lenstronomy/ImSim/de_lens.py#L44-L54
__author__ = 'sibirrer' import numpy as np import sys from lenstronomy.Util.package_util import exporter export, __all__ = exporter() @export def get_param_WLS(A, C_D_inv, d, inv_bool=True): M = A.T.dot(np.multiply(C_D_inv, A.T).T) if inv_bool: if np.linalg.cond(M) < 5/sys.float_info.epsilon: M_inv = _stable_inv(M) else: M_inv = np.zeros_like(M) R = A.T.dot(np.multiply(C_D_inv, d)) B = M_inv.dot(R) else: if np.linalg.cond(M) < 5/sys.float_info.epsilon: R = A.T.dot(np.multiply(C_D_inv, d)) B = _solve_stable(M, R) else: B = np.zeros(len(A.T)) M_inv = None image = A.dot(B) return B, M_inv, image @export
MIT License