repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
gluufederation/community-edition-setup
pylib/jproperties.py
Properties.getmeta
python
def getmeta(self, key): return self._metadata.get(key, {})
Get the metadata for a key. :param key: The key to get metadata for. :return: Metadata for the key (always a dictionary, but empty if there is no metadata).
https://github.com/gluufederation/community-edition-setup/blob/85cc2d3f86ff8a10005b02ee4b3b5ad6e2e037b1/pylib/jproperties.py#L334-L341
from __future__ import print_function import codecs import functools import itertools import os import re import sys import time from collections import MutableMapping, namedtuple import six PropertyTuple = namedtuple("PropertyTuple", ["data", "meta"]) def _is_runtime_meta(key): return ( (isinstance(key, six.text_type) and key.startswith(u"__")) or (isinstance(key, six.binary_type) and key.startswith(b"__")) ) def _escape_non_ascii(unicode_obj): def replace(match): s = match.group(0) n = ord(s) if n < 0x10000: return u'\\u{0:04x}'.format(n) else: n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) return u'\\u{0:04x}\\u{1:04x}'.format(s1, s2) if isinstance(unicode_obj, six.binary_type): unicode_obj = unicode_obj.decode('utf-8') return re.sub( six.text_type(r'[^ -~]'), replace, unicode_obj ) @functools.partial(codecs.register_error, "jproperties.jbackslashreplace") def _jbackslashreplace_error_handler(err): if not isinstance(err, UnicodeEncodeError): raise err return _escape_non_ascii(err.object[err.start:err.end]), err.end def _escape_str(raw_str, only_leading_spaces=False, escape_non_printing=False, line_breaks_only=False): if isinstance(raw_str, six.binary_type): raw_str = raw_str.decode("utf-8") elif not isinstance(raw_str, six.text_type): raw_str = six.text_type(raw_str) trans_dict = { ord(u"\r"): u"\\r", ord(u"\n"): u"\\n", ord(u"\f"): u"\\f" } if not line_breaks_only: trans_dict.update( { ord(u"#"): u"\\#", ord(u"!"): u"\\!", ord(u"="): u"\\=", ord(u":"): u"\\:", ord(u"\\"): u"\\\\", ord(u"\t"): u"\\t", } ) escaped_str = raw_str.translate(trans_dict) if not only_leading_spaces: escaped_str = escaped_str.replace(u" ", u"\\ ") else: escaped_str = re.sub(u"^ ", u"\\\\ ", escaped_str) if escape_non_printing: escaped_str = _escape_non_ascii(escaped_str) return escaped_str class PropertyError(Exception): pass class ParseError(PropertyError): def __init__(self, message, line_number, file_obj=None): self.message = message self.line_number = line_number self.file_obj = file_obj def __str__(self): filename = "<unknown>" if not hasattr(self.file_obj, "filename") else self.file_obj.filename return "Parse error in %s:%d: %s" % ( filename, self.line_number, self.message ) class Properties(MutableMapping, object): _EOL = "\r\n" _WHITESPACE = " \t\f" _ALLWHITESPACE = _EOL + _WHITESPACE def __init__(self, process_escapes_in_values=True, *args, **kwargs): super(Properties, self).__init__(*args, **kwargs) self._process_escapes_in_values = process_escapes_in_values self.reset() self.clear() def __len__(self): return len(self._properties) def __getitem__(self, item): if not isinstance(item, six.string_types): raise TypeError("Property keys must be of type str or unicode") if item not in self._properties: raise KeyError("Key not found") return PropertyTuple( self._properties[item], self._metadata.get(item, {}) ) def __setitem__(self, key, value): if not isinstance(key, six.string_types): raise TypeError("Property keys must be of type str or unicode") metadata = None if isinstance(value, tuple): value, metadata = value if not isinstance(value, six.string_types): raise TypeError("Property values must be of type str or unicode") if metadata is not None and not isinstance(metadata, dict): raise TypeError("Metadata needs to be a dictionary") self._properties[key] = value if metadata is not None: self._metadata[key] = metadata def __delitem__(self, key): if not isinstance(key, six.string_types): raise TypeError("Property keys must be of type str or unicode") if key not in self._properties: raise KeyError("Key not found") del self._properties[key] if key in self._metadata: del self._metadata[key] try: self._key_order.remove(key) except ValueError: pass def __iter__(self): return self._properties.__iter__() @property def properties(self): return self._properties @properties.setter def properties(self, value): self._properties = value @properties.deleter def properties(self): self._properties = {}
MIT License
openstack/cinder
cinder/volume/drivers/nimble.py
NimbleBaseVolumeDriver.create_snapshot
python
def create_snapshot(self, snapshot): self.APIExecutor.snap_vol(snapshot)
Create a snapshot.
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/volume/drivers/nimble.py#L325-L327
import abc import functools import json import random import re import string import sys import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import requests import six from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder.objects import volume from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils DRIVER_VERSION = "4.1.0" AES_256_XTS_CIPHER = 'aes_256_xts' DEFAULT_CIPHER = 'none' EXTRA_SPEC_ENCRYPTION = 'nimble:encryption' EXTRA_SPEC_PERF_POLICY = 'nimble:perfpol-name' EXTRA_SPEC_DEDUPE = 'nimble:dedupe' EXTRA_SPEC_IOPS_LIMIT = 'nimble:iops-limit' EXTRA_SPEC_FOLDER = 'nimble:folder' DEFAULT_PERF_POLICY_SETTING = 'default' DEFAULT_ENCRYPTION_SETTING = 'no' DEFAULT_DEDUPE_SETTING = 'false' DEFAULT_IOPS_LIMIT_SETTING = None DEFAULT_FOLDER_SETTING = None DEFAULT_SNAP_QUOTA = sys.maxsize BACKUP_VOL_PREFIX = 'backup-vol-' AGENT_TYPE_OPENSTACK = 'openstack' AGENT_TYPE_OPENSTACK_GST = 'openstackv2' AGENT_TYPE_NONE = 'none' SM_SUBNET_DATA = 'data' SM_SUBNET_MGMT_PLUS_DATA = 'mgmt-data' SM_STATE_MSG = "is already in requested state" SM_OBJ_EXIST_MSG = "Object exists" SM_OBJ_ENOENT_MSG = "No such object" SM_OBJ_HAS_CLONE = "has a clone" IOPS_ERR_MSG = "Please set valid IOPS limit in the range" LUN_ID = '0' WARN_LEVEL = 80 DEFAULT_SLEEP = 5 MIN_IOPS = 256 MAX_IOPS = 4294967294 NimbleDefaultVersion = 1 LOG = logging.getLogger(__name__) nimble_opts = [ cfg.StrOpt('nimble_pool_name', default='default', help='Nimble Controller pool name'), cfg.StrOpt('nimble_subnet_label', default='*', help='Nimble Subnet Label'), cfg.BoolOpt('nimble_verify_certificate', default=False, help='Whether to verify Nimble SSL Certificate'), cfg.StrOpt('nimble_verify_cert_path', help='Path to Nimble Array SSL certificate'), ] CONF = cfg.CONF CONF.register_opts(nimble_opts, group=configuration.SHARED_CONF_GROUP) class NimbleDriverException(exception.VolumeDriverException): message = _("Nimble Cinder Driver exception") class NimbleAPIException(exception.VolumeBackendAPIException): message = _("Unexpected response from Nimble API") class NimbleVolumeBusyException(exception.VolumeIsBusy): message = _("Nimble Cinder Driver: Volume Busy") class NimbleBaseVolumeDriver(san.SanDriver): VERSION = DRIVER_VERSION CI_WIKI_NAME = "Nimble_Storage_CI" def __init__(self, *args, **kwargs): super(NimbleBaseVolumeDriver, self).__init__(*args, **kwargs) self.APIExecutor = None self.group_stats = {} self.api_protocol = None self._storage_protocol = None self._group_target_enabled = False self.configuration.append_config_values(nimble_opts) self.verify = False if self.configuration.nimble_verify_certificate is True: self.verify = self.configuration.nimble_verify_cert_path or True @staticmethod def get_driver_options(): return nimble_opts def _check_config(self): required_config = ['san_ip', 'san_login', 'san_password'] for attr in required_config: if not getattr(self.configuration, attr, None): raise exception.InvalidInput(reason=_('%s is not set.') % attr) def create_volume(self, volume): reserve = not self.configuration.san_thin_provision self.APIExecutor.create_vol( volume, self.configuration.nimble_pool_name, reserve, self._storage_protocol, self._group_target_enabled) volume_type = volume.get('volume_type') consis_group_snap_type = False if volume_type is not None: consis_group_snap_type = self.is_volume_group_snap_type( volume_type) cg_id = volume.get('group_id', None) if consis_group_snap_type and cg_id: volume_id = self.APIExecutor.get_volume_id_by_name(volume['name']) cg_volcoll_id = self.APIExecutor.get_volcoll_id_by_name(cg_id) self.APIExecutor.associate_volcoll(volume_id, cg_volcoll_id) return self._get_model_info(volume['name']) def is_volume_backup_clone(self, volume): vol_info = self.APIExecutor.get_vol_info(volume['name']) LOG.debug("is_clone: %(is_clone)s base_snap_id: %(snap)s, " "parent_vol_id: %(vol)s", {'is_clone': vol_info['clone'], 'snap': vol_info['base_snap_id'], 'vol': vol_info['parent_vol_id']}) if vol_info['base_snap_id'] and ( vol_info['parent_vol_id'] is not None): LOG.debug("Nimble base-snap exists for volume %(vol)s", {'vol': volume['name']}) volume_name_prefix = volume['name'].replace(volume['id'], "") LOG.debug("volume_name_prefix : %(prefix)s", {'prefix': volume_name_prefix}) snap_id = self.APIExecutor.get_snap_info_by_id( vol_info['base_snap_id'], vol_info['parent_vol_id']) snap_info = self.APIExecutor.get_snap_info_detail(snap_id['id']) LOG.debug("snap_info description %(snap_info)s", {'snap_info': snap_info['description']}) if snap_info['description'] and BACKUP_VOL_PREFIX in ( snap_info['description']): parent_vol_name = self.APIExecutor.get_volume_name( vol_info['parent_vol_id']) parent_vol_id = parent_vol_name. replace( volume_name_prefix, "") if BACKUP_VOL_PREFIX + parent_vol_id in snap_info[ 'description']: LOG.info('Nimble backup-snapshot exists name=%(' 'name)s', {'name': snap_info['name']}) snap_vol_name = self.APIExecutor.get_volume_name( snap_info['vol_id']) LOG.debug("snap_vol_name %(snap)s", {'snap': snap_vol_name}) return snap_info['name'], snap_vol_name return "", "" def delete_volume(self, volume): backup_snap_name, backup_vol_name = self.is_volume_backup_clone(volume) eventlet.sleep(DEFAULT_SLEEP) self.APIExecutor.online_vol(volume['name'], False) LOG.debug("Deleting volume %(vol)s", {'vol': volume['name']}) @utils.retry(NimbleAPIException, retries=3) def _retry_remove_vol(volume): self.APIExecutor.delete_vol(volume['name']) try: _retry_remove_vol(volume) except NimbleAPIException as ex: LOG.debug("delete volume exception: %s", ex) if SM_OBJ_HAS_CLONE in six.text_type(ex): LOG.warning('Volume %(vol)s : %(state)s', {'vol': volume['name'], 'state': SM_OBJ_HAS_CLONE}) self.APIExecutor.online_vol(volume['name'], True) raise exception.VolumeIsBusy(volume_name=volume['name']) raise if (backup_snap_name != "" and backup_vol_name != "") and ( backup_snap_name is not None): LOG.debug("Delete volume backup vol: %(vol)s snap: %(snap)s", {'vol': backup_vol_name, 'snap': backup_snap_name}) self.APIExecutor.online_snap(backup_vol_name, False, backup_snap_name) self.APIExecutor.delete_snap(backup_vol_name, backup_snap_name) def _generate_random_string(self, length): char_set = string.ascii_lowercase return ''.join(random.sample(char_set, length)) def _clone_volume_from_snapshot(self, volume, snapshot): reserve = not self.configuration.san_thin_provision pool_name = self.configuration.nimble_pool_name self.APIExecutor.clone_vol(volume, snapshot, reserve, self._group_target_enabled, self._storage_protocol, pool_name) if(volume['size'] > snapshot['volume_size']): vol_size = volume['size'] * units.Ki reserve_size = 100 if reserve else 0 data = {"data": {'size': vol_size, 'reserve': reserve_size, 'warn_level': int(WARN_LEVEL), 'limit': 100, 'snap_limit': DEFAULT_SNAP_QUOTA}} LOG.debug("Edit Vol request %(data)s", {'data': data}) self.APIExecutor.edit_vol(volume['name'], data) return self._get_model_info(volume['name']) def create_cloned_volume(self, volume, src_vref): snapshot_name = ('openstack-clone-' + volume['name'] + '-' + self._generate_random_string(12)) snapshot = {'volume_name': src_vref['name'], 'name': snapshot_name, 'volume_size': src_vref['size'], 'display_name': volume.display_name, 'display_description': ''} self.APIExecutor.snap_vol(snapshot) self._clone_volume_from_snapshot(volume, snapshot) return self._get_model_info(volume['name']) def create_export(self, context, volume, connector): return self._get_model_info(volume['name']) def ensure_export(self, context, volume): return self._get_model_info(volume['name'])
Apache License 2.0
extensiveautomation/extensiveautomation-server
src/ea/libs/NetLayerLib/Messages.py
Messages.decode
python
def decode(self, msgraw): msg = msgraw.split(BODY_SEP, 1) if len(msg) == 1: head = msg[0].split(HEAD_SEP) body = b'' elif len(msg) == 2: head = msg[0].split(HEAD_SEP) if sys.version_info > (3,): decoded = base64.b64decode(msg[1]) else: decoded = base64.decodestring(msg[1]) decompressed_data = zlib.decompress(decoded) if not self.__useJson: if sys.version_info > (3,): body = cPickle.loads(decompressed_data, encoding="bytes") body = bytes_to_unicode(body) else: body = cPickle.loads(decompressed_data) else: body = json.loads(decompressed_data, encoding="ISO-8859-1") else: raise Exception('invalid message') ret = {} ret['tid'] = int(head[1]) ret['body'] = body if head[0].isdigit(): ret['code'] = head[0] if ret['code'] not in [RSP_CODE_OK[0], RSP_CODE_FORBIDDEN[0], RSP_CODE_FAILED[0], RSP_CODE_ERROR[0]]: raise Exception('unknown response code: %s' % ret['code']) ret['phrase'] = head[2] if ret['phrase'] not in [RSP_CODE_OK[1], RSP_CODE_FORBIDDEN[1], RSP_CODE_FAILED[1], RSP_CODE_ERROR[1]]: raise Exception('unknown response phrase: %s' % ret['phrase']) return ('response', ret) else: ret['cmd'] = head[0] if ret['cmd'] not in [RSQ_CMD, RSQ_NOTIFY]: raise Exception('unknown request: %s' % ret['cmd']) ret['userid'] = head[2] return ('request', ret)
Decode a message: request or response @param msgraw: @type msgraw: @return: request or response @rtype: typle
https://github.com/extensiveautomation/extensiveautomation-server/blob/e4e7f3b76d8b94bf715c4345d29dd2b05bca92e5/src/ea/libs/NetLayerLib/Messages.py#L204-L275
try: import cPickle except ImportError: import pickle as cPickle import zlib import base64 import sys import json if sys.version_info > (3,): unicode = str HEAD_SEP = b" " BODY_SEP = b"\n" RSQ_CMD = b"RSQ" RSQ_NOTIFY = b"NOTIFY" RSP_CODE_OK = (b"200", b"OK") RSP_CODE_FORBIDDEN = (b"403", b"FORBIDDEN") RSP_CODE_FAILED = (b"400", b"FAILED") RSP_CODE_ERROR = (b"500", b"ERROR") CMD_ERROR = -1 CMD_HELLO = 0 CMD_GET_PROBE = 1 CMD_START_PROBE = 2 CMD_STOP_PROBE = 3 CMD_NEW_FILE = 4 CMD_INTERACT = 5 CMD_START_AGENT = 6 CMD_STOP_AGENT = 7 def bytes2str(val): if isinstance(val, bytes): return str(val, "utf8") else: return val def bytes_to_unicode(ob): t = type(ob) if t in (list, tuple): try: l_ = [str(i, 'utf-8') if isinstance(i, bytes) else i for i in ob] except UnicodeDecodeError: l_ = [i for i in ob] l_ = [bytes_to_unicode(i) if type(i) in ( list, tuple, dict) else i for i in l_] ro = tuple(l_) if t is tuple else l_ elif t is dict: byte_keys = [i for i in ob if isinstance(i, bytes)] for bk in byte_keys: v = ob[bk] del(ob[bk]) try: ob[str(bk, 'utf-8')] = v except UnicodeDecodeError: ob[bk] = v for k in ob: if isinstance(ob[k], bytes): try: ob[k] = str(ob[k], 'utf-8') except UnicodeDecodeError: ob[k] = ob[k] elif type(ob[k]) in (list, tuple, dict): ob[k] = bytes_to_unicode(ob[k]) ro = ob else: ro = ob return ro class Messages(object): def __init__(self, userId=None, useJson=False, pickleVer=2): self.__userId = userId self.__useJson = useJson self.__pickleProtocol = pickleVer def setUserId(self, userId): self.__userId = userId def encode(self, op, tid, desc, body=None): if op.isdigit(): if op not in [RSP_CODE_OK[0], RSP_CODE_FORBIDDEN[0], RSP_CODE_FAILED[0], RSP_CODE_ERROR[0]]: raise Exception('unknown response code: %s' % op) if desc not in [RSP_CODE_OK[1], RSP_CODE_FORBIDDEN[1], RSP_CODE_FAILED[1], RSP_CODE_ERROR[1]]: raise Exception('unknown response phrase: %s' % desc) else: if op not in [RSQ_CMD, RSQ_NOTIFY]: raise Exception('unknown request: %s' % op) if sys.version_info > (3,): if not isinstance(desc, bytes): desc = bytes(desc, 'utf8') ret = [HEAD_SEP.join([op, bytes(str(tid), 'utf8'), desc])] else: ret = [HEAD_SEP.join([op, str(tid), desc])] if body: if not self.__useJson: pickled = cPickle.dumps(body, protocol=self.__pickleProtocol) bod = zlib.compress(pickled) ret.append(base64.encodestring(bod)) else: json_data = json.dumps(body, ensure_ascii=False) compressed = zlib.compress(json_data) ret.append(base64.encodestring(compressed)) else: rslt = ret rslt = BODY_SEP.join(ret) return rslt
MIT License
xolox/python-coloredlogs
coloredlogs/__init__.py
find_level_aliases
python
def find_level_aliases(): mapping = collections.defaultdict(list) for name, value in find_defined_levels().items(): mapping[value].append(name) aliases = {} for value, names in mapping.items(): if len(names) > 1: names = sorted(names, key=lambda n: len(n)) canonical_name = names.pop() for alias in names: aliases[alias] = canonical_name return aliases
Find log level names which are aliases of each other. :returns: A dictionary that maps aliases to their canonical name. .. note:: Canonical names are chosen to be the alias with the longest string length so that e.g. ``WARN`` is an alias for ``WARNING`` instead of the other way around. Here's what the result looks like by default (when no custom levels or level names have been defined): >>> from coloredlogs import find_level_aliases >>> find_level_aliases() {'WARN': 'WARNING', 'FATAL': 'CRITICAL'}
https://github.com/xolox/python-coloredlogs/blob/65bdfe976ac0bf81e8c0bd9a98242b9d666b2859/coloredlogs/__init__.py#L705-L732
import collections import logging import os import re import socket import sys from humanfriendly import coerce_boolean from humanfriendly.compat import coerce_string, is_string, on_windows from humanfriendly.terminal import ANSI_COLOR_CODES, ansi_wrap, enable_ansi_support, terminal_supports_colors from humanfriendly.text import format, split __version__ = '15.0.1' DEFAULT_LOG_LEVEL = logging.INFO DEFAULT_LOG_FORMAT = '%(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s' DEFAULT_DATE_FORMAT = '%Y-%m-%d %H:%M:%S' CHROOT_FILES = ['/etc/debian_chroot'] DEFAULT_FIELD_STYLES = dict( asctime=dict(color='green'), hostname=dict(color='magenta'), levelname=dict(color='black', bold=True), name=dict(color='blue'), programname=dict(color='cyan'), username=dict(color='yellow'), ) DEFAULT_LEVEL_STYLES = dict( spam=dict(color='green', faint=True), debug=dict(color='green'), verbose=dict(color='blue'), info=dict(), notice=dict(color='magenta'), warning=dict(color='yellow'), success=dict(color='green', bold=True), error=dict(color='red'), critical=dict(color='red', bold=True), ) DEFAULT_FORMAT_STYLE = '%' FORMAT_STYLE_PATTERNS = { '%': r'%\((\w+)\)[#0 +-]*\d*(?:\.\d+)?[hlL]?[diouxXeEfFgGcrs%]', '{': r'{(\w+)[^}]*}', '$': r'\$(\w+)|\${(\w+)}', } def auto_install(): if coerce_boolean(os.environ.get('COLOREDLOGS_AUTO_INSTALL', 'false')): install() def install(level=None, **kw): logger = kw.get('logger') or logging.getLogger() reconfigure = kw.get('reconfigure', True) stream = kw.get('stream') or sys.stderr style = check_style(kw.get('style') or DEFAULT_FORMAT_STYLE) if level is None: level = os.environ.get('COLOREDLOGS_LOG_LEVEL', DEFAULT_LOG_LEVEL) level = level_to_number(level) match_streams = ([sys.stdout, sys.stderr] if stream in [sys.stdout, sys.stderr, None] else [stream]) match_handler = lambda handler: match_stream_handler(handler, match_streams) handler, logger = replace_handler(logger, match_handler, reconfigure) if not (handler and not reconfigure): syslog_enabled = kw.get('syslog') if syslog_enabled not in (None, False): from coloredlogs.syslog import enable_system_logging if syslog_enabled is True: enable_system_logging() else: enable_system_logging(level=syslog_enabled) use_colors = kw.get('isatty', None) if use_colors or (use_colors is None): if use_colors is None and 'NO_COLOR' in os.environ: use_colors = False if (use_colors or use_colors is None) and on_windows(): use_colors = enable_ansi_support() if use_colors is None: use_colors = terminal_supports_colors(stream) filters = handler.filters if handler else None if stream is sys.stderr: handler = StandardErrorHandler() else: handler = logging.StreamHandler(stream) handler.setLevel(level) if filters: handler.filters = filters formatter_options = dict(fmt=kw.get('fmt'), datefmt=kw.get('datefmt')) if style != DEFAULT_FORMAT_STYLE: formatter_options['style'] = style if not formatter_options['fmt']: formatter_options['fmt'] = os.environ.get('COLOREDLOGS_LOG_FORMAT') or DEFAULT_LOG_FORMAT if not formatter_options['datefmt']: formatter_options['datefmt'] = os.environ.get('COLOREDLOGS_DATE_FORMAT') or DEFAULT_DATE_FORMAT if kw.get('milliseconds'): parser = FormatStringParser(style=style) if not (parser.contains_field(formatter_options['fmt'], 'msecs') or '%f' in formatter_options['datefmt']): pattern = parser.get_pattern('asctime') replacements = {'%': '%(msecs)03d', '{': '{msecs:03}', '$': '${msecs}'} formatter_options['fmt'] = pattern.sub( r'\g<0>,' + replacements[style], formatter_options['fmt'], ) HostNameFilter.install( fmt=formatter_options['fmt'], handler=handler, style=style, use_chroot=kw.get('use_chroot', True), ) ProgramNameFilter.install( fmt=formatter_options['fmt'], handler=handler, programname=kw.get('programname'), style=style, ) UserNameFilter.install( fmt=formatter_options['fmt'], handler=handler, username=kw.get('username'), style=style, ) if use_colors: for name, environment_name in (('field_styles', 'COLOREDLOGS_FIELD_STYLES'), ('level_styles', 'COLOREDLOGS_LEVEL_STYLES')): value = kw.get(name) if value is None: environment_value = os.environ.get(environment_name) if environment_value is not None: value = parse_encoded_styles(environment_value) if value is not None: formatter_options[name] = value formatter_type = ColoredFormatter if use_colors else BasicFormatter handler.setFormatter(formatter_type(**formatter_options)) adjust_level(logger, level) logger.addHandler(handler) def check_style(value): if sys.version_info[:2] >= (3, 2): if value not in FORMAT_STYLE_PATTERNS: msg = "Unsupported logging format style! (%r)" raise ValueError(format(msg, value)) elif value != DEFAULT_FORMAT_STYLE: msg = "Format string styles other than %r require Python 3.2+!" raise ValueError(msg, DEFAULT_FORMAT_STYLE) return value def increase_verbosity(): defined_levels = sorted(set(find_defined_levels().values())) current_index = defined_levels.index(get_level()) selected_index = max(0, current_index - 1) set_level(defined_levels[selected_index]) def decrease_verbosity(): defined_levels = sorted(set(find_defined_levels().values())) current_index = defined_levels.index(get_level()) selected_index = min(current_index + 1, len(defined_levels) - 1) set_level(defined_levels[selected_index]) def is_verbose(): return get_level() < DEFAULT_LOG_LEVEL def get_level(): handler, logger = find_handler(logging.getLogger(), match_stream_handler) return handler.level if handler else DEFAULT_LOG_LEVEL def set_level(level): handler, logger = find_handler(logging.getLogger(), match_stream_handler) if handler and logger: handler.setLevel(level_to_number(level)) adjust_level(logger, level) else: install(level=level) def adjust_level(logger, level): level = level_to_number(level) if logger.getEffectiveLevel() > level: logger.setLevel(level) def find_defined_levels(): defined_levels = {} for name in dir(logging): if name.isupper(): value = getattr(logging, name) if isinstance(value, int): defined_levels[name] = value return defined_levels def level_to_number(value): if is_string(value): try: defined_levels = find_defined_levels() value = defined_levels[value.upper()] except KeyError: value = DEFAULT_LOG_LEVEL return value
MIT License
darkharry/yts-cli
yts/yts.py
YTS.execute_transmission
python
def execute_transmission(torrent_name): subprocess.Popen(["transmission-gtk", torrent_name])
Execute Transmission-gtk with the downloaded torrent Parameters ---------- torrent_name: str, required Name of the torrent to be downloaded
https://github.com/darkharry/yts-cli/blob/21d79bb129d1ee1517d486b9bd8b32192c99a75d/yts/yts.py#L174-L182
import requests import argparse import bs4 import sys import os import subprocess class YTS: def __init__(self, yts_url): self.url = yts_url def get_popular_downloads(self) -> dict: res = make_request(self.url) pop_movies_dict = {} pop_movies = ( bs4.BeautifulSoup(res.text, "lxml") .select("#popular-downloads")[0] .select("div[class='browse-movie-wrap col-xs-10 col-sm-5']") ) for movie in pop_movies: movie_data = movie.select("a")[0] pop_movies_dict.update(self.extract_movie_data(movie_data)) return pop_movies_dict def search_movies(self, query): url = f"{self.url}browse-movies/{query}" res = make_request(url) query_dict = {} movies_found = ( bs4.BeautifulSoup(res.text, "lxml") .select("section > div[class='row']")[0] .select("div[class='browse-movie-wrap col-xs-10 col-sm-4" " col-md-5 col-lg-4']") ) for movie in movies_found: movie_data = movie.select("a")[0] query_dict.update(self.extract_movie_data(movie_data)) return query_dict def get_movie_formats(self, movie_title): movie_page_url = self.url+"movies/"+movie_title movie_page = make_request(movie_page_url) formats = self.extract_formats(movie_page) return formats def get_torrent(self, torrent_url): res = make_request(torrent_url) return res @staticmethod def extract_formats(movie_page) -> dict: movie_formats = ( bs4.BeautifulSoup(movie_page.text, "lxml") .select("p[class='hidden-xs hidden-sm']")[0] .find_all("a") ) torrent_urls = {} for movie_format in movie_formats: torrent_urls[movie_format.getText()] = movie_format["href"] return torrent_urls @staticmethod def extract_movie_data(movie_data) -> dict: movie_title = movie_data["href"].split("/")[-1] rating = ( movie_data .select("figcaption > h4[class='rating']")[0] .getText() ) return {movie_title: rating} @staticmethod
MIT License
azure/autorest.python
test/vanilla/low-level/Expected/AcceptanceTests/XmlLowLevel/xmlservicelowlevel/rest/xml/_request_builders_py3.py
build_put_service_properties_request
python
def build_put_service_properties_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) comp = "properties" restype = "service" url = kwargs.pop("template_url", "/xml/") query_parameters = kwargs.pop("params", {}) query_parameters["comp"] = _SERIALIZER.query("comp", comp, "str") query_parameters["restype"] = _SERIALIZER.query("restype", restype, "str") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest( method="PUT", url=url, params=query_parameters, headers=header_parameters, content=content, **kwargs )
Puts storage service properties. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :keyword content: Pass in binary content you want in the body of the request (typically bytes, a byte iterator, or stream input). :paramtype content: any :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest
https://github.com/azure/autorest.python/blob/90d60a965788e3b4c0809e6686bdc3525acac89c/test/vanilla/low-level/Expected/AcceptanceTests/XmlLowLevel/xmlservicelowlevel/rest/xml/_request_builders_py3.py#L815-L849
from typing import Any, List, Optional from azure.core.rest import HttpRequest from msrest import Serializer _SERIALIZER = Serializer() def build_get_complex_type_ref_no_meta_request(**kwargs: Any) -> HttpRequest: accept = "application/xml" url = kwargs.pop("template_url", "/xml/complex-type-ref-no-meta") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_put_complex_type_ref_no_meta_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) url = kwargs.pop("template_url", "/xml/complex-type-ref-no-meta") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, content=content, **kwargs) def build_get_complex_type_ref_with_meta_request(**kwargs: Any) -> HttpRequest: accept = "application/xml" url = kwargs.pop("template_url", "/xml/complex-type-ref-with-meta") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_put_complex_type_ref_with_meta_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) url = kwargs.pop("template_url", "/xml/complex-type-ref-with-meta") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, content=content, **kwargs) def build_get_simple_request(**kwargs: Any) -> HttpRequest: accept = "application/xml" url = kwargs.pop("template_url", "/xml/simple") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_put_simple_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) accept = "application/xml" url = kwargs.pop("template_url", "/xml/simple") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, content=content, **kwargs) def build_get_wrapped_lists_request(**kwargs: Any) -> HttpRequest: accept = "application/xml" url = kwargs.pop("template_url", "/xml/wrapped-lists") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_put_wrapped_lists_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) accept = "application/xml" url = kwargs.pop("template_url", "/xml/wrapped-lists") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, content=content, **kwargs) def build_get_headers_request(**kwargs: Any) -> HttpRequest: url = kwargs.pop("template_url", "/xml/headers") return HttpRequest(method="GET", url=url, **kwargs) def build_get_empty_list_request(**kwargs: Any) -> HttpRequest: accept = "application/xml" url = kwargs.pop("template_url", "/xml/empty-list") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_put_empty_list_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) url = kwargs.pop("template_url", "/xml/empty-list") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, content=content, **kwargs) def build_get_empty_wrapped_lists_request(**kwargs: Any) -> HttpRequest: accept = "application/xml" url = kwargs.pop("template_url", "/xml/empty-wrapped-lists") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_put_empty_wrapped_lists_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) url = kwargs.pop("template_url", "/xml/empty-wrapped-lists") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, content=content, **kwargs) def build_get_root_list_request(**kwargs: Any) -> HttpRequest: accept = "application/xml" url = kwargs.pop("template_url", "/xml/root-list") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_put_root_list_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) url = kwargs.pop("template_url", "/xml/root-list") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, content=content, **kwargs) def build_get_root_list_single_item_request(**kwargs: Any) -> HttpRequest: accept = "application/xml" url = kwargs.pop("template_url", "/xml/root-list-single-item") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_put_root_list_single_item_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) url = kwargs.pop("template_url", "/xml/root-list-single-item") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, content=content, **kwargs) def build_get_empty_root_list_request(**kwargs: Any) -> HttpRequest: accept = "application/xml" url = kwargs.pop("template_url", "/xml/empty-root-list") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_put_empty_root_list_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) url = kwargs.pop("template_url", "/xml/empty-root-list") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, content=content, **kwargs) def build_get_empty_child_element_request(**kwargs: Any) -> HttpRequest: accept = "application/xml" url = kwargs.pop("template_url", "/xml/empty-child-element") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_put_empty_child_element_request(*, content: Any, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) url = kwargs.pop("template_url", "/xml/empty-child-element") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, content=content, **kwargs) def build_list_containers_request(**kwargs: Any) -> HttpRequest: comp = "list" accept = "application/xml" url = kwargs.pop("template_url", "/xml/") query_parameters = kwargs.pop("params", {}) query_parameters["comp"] = _SERIALIZER.query("comp", comp, "str") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs) def build_get_service_properties_request(**kwargs: Any) -> HttpRequest: comp = "properties" restype = "service" accept = "application/xml" url = kwargs.pop("template_url", "/xml/") query_parameters = kwargs.pop("params", {}) query_parameters["comp"] = _SERIALIZER.query("comp", comp, "str") query_parameters["restype"] = _SERIALIZER.query("restype", restype, "str") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
MIT License
harrystech/arthur-redshift-etl
python/etl/config/dw.py
DataWarehouseSchema.dsn
python
def dsn(self): if self._dsn_env_var: return etl.db.parse_connection_string(etl.config.env.get(self._dsn_env_var))
Return connection string to find this schema upstream or in the data warehouse. This returns connection string suitable for the schema which is - the value of the environment variable named in the read_access field for upstream sources - the value of the environment variable named in the etl_access field of the data warehouse for schemas that have CTAS or views (transformations) Evaluation of the DSN (and the environment variable) is deferred so that an environment variable may be not set if it is actually not used.
https://github.com/harrystech/arthur-redshift-etl/blob/126b93e998b5b393597e333e4f6eb11f531809df/python/etl/config/dw.py#L140-L153
from typing import Dict import etl.config.env import etl.db import etl.names import etl.templates from etl.errors import ETLConfigError, InvalidEnvironmentError class DataWarehouseUser: def __init__(self, user_info): self.name = user_info["name"] self.group = user_info["group"] self.schema = user_info.get("schema") class S3DataFormat: def __init__(self, s3_data_format) -> None: self.format = s3_data_format.get("format") self.format_option = s3_data_format.get("format_option") self.compression = s3_data_format.get("compression") class DataWarehouseSchema: def __init__(self, schema_info, etl_access=None): self.name = schema_info["name"] self.description = schema_info.get("description") self.owner = schema_info["owner"] self.reader_groups = schema_info.get("readers", schema_info.get("groups", [])) self.writer_groups = schema_info.get("writers", []) self.is_database_source = "read_access" in schema_info self.is_static_source = "s3_bucket" in schema_info and "s3_path_template" in schema_info self.is_external = schema_info.get("external", False) self.is_upstream_source = self.is_database_source or self.is_static_source or self.is_external self.has_transformations = not self.is_upstream_source self.is_an_unload_target = "s3_bucket" in schema_info and "s3_unload_path_template" in schema_info if self.is_database_source: self._dsn_env_var = schema_info["read_access"] elif self.is_static_source or self.is_an_unload_target: self._dsn_env_var = None elif self.is_external: self._dsn_env_var = None self.database = schema_info.get("database") self.iam_role = schema_info.get("iam_role") else: self._dsn_env_var = etl_access self.has_dsn = self._dsn_env_var is not None self._s3_bucket_template = schema_info.get("s3_bucket") self._s3_path_template = schema_info.get("s3_path_template") self._s3_unload_path_template = schema_info.get("s3_unload_path_template") self.s3_data_format = S3DataFormat(schema_info.get("s3_data_format", {})) self.include_tables = schema_info.get("include_tables", [self.name + ".*"]) self.exclude_tables = schema_info.get("exclude_tables", []) @property def source_type(self) -> str: if self.is_database_source: return "DATABASE" if self.is_static_source: return "S3_STATIC" if self.is_external: return "EXTERNAL" return "TRANSFORMATIONS" @property def s3_bucket(self) -> str: return etl.templates.render_from_config( self._s3_bucket_template, context=f"s3_bucket of schema '{self.name}'" ) @property def s3_path_prefix(self) -> str: return etl.templates.render_from_config( self._s3_path_template, context=f"s3_path_template of schema '{self.name}'" ) @property def s3_unload_path_prefix(self) -> str: return etl.templates.render_from_config( self._s3_unload_path_template, context=f"s3_unload_path_template of schema '{self.name}'" ) @property
MIT License
mavlink/mavsdk-python
mavsdk/calibration.py
CalibrationResult.__init__
python
def __init__( self, result, result_str): self.result = result self.result_str = result_str
Initializes the CalibrationResult object
https://github.com/mavlink/mavsdk-python/blob/ded17f0c12316f20c93f5e1bac4fede2b8b4e446/mavsdk/calibration.py#L133-L139
from ._base import AsyncBase from . import calibration_pb2, calibration_pb2_grpc from enum import Enum class CalibrationResult: class Result(Enum): UNKNOWN = 0 SUCCESS = 1 NEXT = 2 FAILED = 3 NO_SYSTEM = 4 CONNECTION_ERROR = 5 BUSY = 6 COMMAND_DENIED = 7 TIMEOUT = 8 CANCELLED = 9 FAILED_ARMED = 10 def translate_to_rpc(self): if self == CalibrationResult.Result.UNKNOWN: return calibration_pb2.CalibrationResult.RESULT_UNKNOWN if self == CalibrationResult.Result.SUCCESS: return calibration_pb2.CalibrationResult.RESULT_SUCCESS if self == CalibrationResult.Result.NEXT: return calibration_pb2.CalibrationResult.RESULT_NEXT if self == CalibrationResult.Result.FAILED: return calibration_pb2.CalibrationResult.RESULT_FAILED if self == CalibrationResult.Result.NO_SYSTEM: return calibration_pb2.CalibrationResult.RESULT_NO_SYSTEM if self == CalibrationResult.Result.CONNECTION_ERROR: return calibration_pb2.CalibrationResult.RESULT_CONNECTION_ERROR if self == CalibrationResult.Result.BUSY: return calibration_pb2.CalibrationResult.RESULT_BUSY if self == CalibrationResult.Result.COMMAND_DENIED: return calibration_pb2.CalibrationResult.RESULT_COMMAND_DENIED if self == CalibrationResult.Result.TIMEOUT: return calibration_pb2.CalibrationResult.RESULT_TIMEOUT if self == CalibrationResult.Result.CANCELLED: return calibration_pb2.CalibrationResult.RESULT_CANCELLED if self == CalibrationResult.Result.FAILED_ARMED: return calibration_pb2.CalibrationResult.RESULT_FAILED_ARMED @staticmethod def translate_from_rpc(rpc_enum_value): if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_UNKNOWN: return CalibrationResult.Result.UNKNOWN if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_SUCCESS: return CalibrationResult.Result.SUCCESS if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_NEXT: return CalibrationResult.Result.NEXT if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_FAILED: return CalibrationResult.Result.FAILED if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_NO_SYSTEM: return CalibrationResult.Result.NO_SYSTEM if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_CONNECTION_ERROR: return CalibrationResult.Result.CONNECTION_ERROR if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_BUSY: return CalibrationResult.Result.BUSY if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_COMMAND_DENIED: return CalibrationResult.Result.COMMAND_DENIED if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_TIMEOUT: return CalibrationResult.Result.TIMEOUT if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_CANCELLED: return CalibrationResult.Result.CANCELLED if rpc_enum_value == calibration_pb2.CalibrationResult.RESULT_FAILED_ARMED: return CalibrationResult.Result.FAILED_ARMED def __str__(self): return self.name
BSD 3-Clause New or Revised License
meltano/sdk
singer_sdk/streams/rest.py
RESTStream.get_url
python
def get_url(self, context: Optional[dict]) -> str: url = "".join([self.url_base, self.path or ""]) vals = copy.copy(dict(self.config)) vals.update(context or {}) for k, v in vals.items(): search_text = "".join(["{", k, "}"]) if search_text in url: url = url.replace(search_text, self._url_encode(v)) return url
Get stream entity URL. Developers override this method to perform dynamic URL generation. Args: context: Stream partition or context dictionary. Returns: A URL, optionally targeted to a specific partition or context.
https://github.com/meltano/sdk/blob/5916ffee84eab15689558abe15757810dad5419e/singer_sdk/streams/rest.py#L85-L103
import abc import copy import logging from datetime import datetime from typing import Any, Dict, Iterable, List, Optional, Union, cast import backoff import requests from singer.schema import Schema from singer_sdk.authenticators import APIAuthenticatorBase, SimpleAuthenticator from singer_sdk.helpers.jsonpath import extract_jsonpath from singer_sdk.plugin_base import PluginBase as TapBaseClass from singer_sdk.streams.core import Stream DEFAULT_PAGE_SIZE = 1000 class RESTStream(Stream, metaclass=abc.ABCMeta): _page_size: int = DEFAULT_PAGE_SIZE _requests_session: Optional[requests.Session] rest_method = "GET" records_jsonpath: str = "$[*]" next_page_token_jsonpath: Optional[str] = None _LOG_REQUEST_METRICS: bool = True _LOG_REQUEST_METRIC_URLS: bool = False @property @abc.abstractmethod def url_base(self) -> str: pass def __init__( self, tap: TapBaseClass, name: Optional[str] = None, schema: Optional[Union[Dict[str, Any], Schema]] = None, path: Optional[str] = None, ) -> None: super().__init__(name=name, schema=schema, tap=tap) if path: self.path = path self._http_headers: dict = {} self._requests_session = requests.Session() self._compiled_jsonpath = None self._next_page_token_compiled_jsonpath = None @staticmethod def _url_encode(val: Union[str, datetime, bool, int, List[str]]) -> str: if isinstance(val, str): result = val.replace("/", "%2F") else: result = str(val) return result
Apache License 2.0
hewlettpackard/oneview-redfish-toolkit
oneview_redfish_toolkit/blueprints/vlan_network_interface.py
_get_connection_oneview_resource
python
def _get_connection_oneview_resource(oneview_resource, connection_id): conn_settings = oneview_resource["connectionSettings"] connection = None for conn in conn_settings["connections"]: if str(conn["id"]) == connection_id: connection = conn break if not connection: msg = "Ethernet interface not found" logging.exception('Unexpected error: {}'.format(msg)) abort(status.HTTP_404_NOT_FOUND, msg) return connection
Get OneView Connection for the given resource and ID. Return OneView Connection a given OneView resource and connection ID. Logs exception of any error and Not Found. Returns: JSON: OneView Connection
https://github.com/hewlettpackard/oneview-redfish-toolkit/blob/258fb8e23973445842bb317230f34ed34fdd7ec2/oneview_redfish_toolkit/blueprints/vlan_network_interface.py#L91-L113
import logging from flask import abort from flask import Blueprint from flask import g from flask import request from flask_api import status from oneview_redfish_toolkit.api.computer_system import ComputerSystem from oneview_redfish_toolkit.api.resource_block import ResourceBlock from oneview_redfish_toolkit.api.vlan_network_interface import VLanNetworkInterface from oneview_redfish_toolkit.api.vlan_network_interface_collection import VLanNetworkInterfaceCollection from oneview_redfish_toolkit.blueprints.util.response_builder import ResponseBuilder vlan_network_interface = Blueprint("vlan_network_interface", __name__) @vlan_network_interface.route( ComputerSystem.BASE_URI + "/<sp_uuid>/EthernetInterfaces/<connection_id>/VLANs", methods=["GET"]) def get_vlan_network_interface_collection_sp(sp_uuid, connection_id): server_profile = g.oneview_client.server_profiles.get_by_id(sp_uuid).data connection = _get_connection_oneview_resource(server_profile, connection_id) vlan_collection = _get_vlan_network_interface_collection(connection["networkUri"]) return ResponseBuilder.success(vlan_collection) @vlan_network_interface.route( ResourceBlock.BASE_URI + "/<spt_uuid>/EthernetInterfaces/<connection_id>/VLANs", methods=["GET"]) def get_vlan_network_interface_collection_spt(spt_uuid, connection_id): server_profile_template = g.oneview_client.server_profile_templates.get_by_id(spt_uuid).data connection = _get_connection_oneview_resource(server_profile_template, connection_id) vlan_collection = _get_vlan_network_interface_collection(connection["networkUri"]) return ResponseBuilder.success(vlan_collection)
Apache License 2.0
rdagger/pi-st7565
st7565.py
Glcd.draw_polygon
python
def draw_polygon(self, sides, x0, y0, r, rotate=0, color=1): coords = np.empty(shape=[sides + 1, 2], dtype="float64") n = np.arange(sides, dtype="float64") theta = math.radians(rotate) for s in n: t = 2.0 * math.pi * s / sides + theta coords[int(s), 0] = r * math.cos(t) + x0 coords[int(s), 1] = r * math.sin(t) + y0 coords[sides] = coords[0] self.draw_lines(coords.astype("float32").astype("int32"), color=color)
Draws an n-sided regular polygon on the back buffer Args: sides (int): Number of polygon sides x0, y0 (int): Center point coordinates r (int): Radius rotate (Optional float): Rotation in degrees relative to origin. Default is 0. color (Optional int): 0 = pixel off, 1 = pixel on (default) Note: The center point is the center of the x0,y0 pixel. Since pixels are not divisible, the radius is integer rounded up to complete on a full pixel. Therefore diameter = 2 x r + 1.
https://github.com/rdagger/pi-st7565/blob/6888a62a0adc8cc694b20ef198ff4dc1f9ba1156/st7565.py#L648-L670
from __future__ import print_function from time import sleep import math import numpy as np class Glcd(object): CMD_DISPLAY_OFF = 0xAE CMD_DISPLAY_ON = 0xAF CMD_SET_DISP_START_LINE = 0x40 CMD_SET_PAGE = 0xB0 CMD_SET_COLUMN_UPPER = 0x10 CMD_SET_COLUMN_LOWER = 0x00 CMD_SET_ADC_NORMAL = 0xA0 CMD_SET_ADC_REVERSE = 0xA1 CMD_SET_DISP_NORMAL = 0xA6 CMD_SET_DISP_REVERSE = 0xA7 CMD_SET_ALLPTS_NORMAL = 0xA4 CMD_SET_ALLPTS_ON = 0xA5 CMD_SET_BIAS_9 = 0xA2 CMD_SET_BIAS_7 = 0xA3 CMD_INTERNAL_RESET = 0xE2 CMD_SET_COM_NORMAL = 0xC0 CMD_SET_COM_REVERSE = 0xC8 CMD_SET_POWER_CONTROL = 0x28 CMD_SET_RESISTOR_RATIO = 0x20 CMD_SET_VOLUME_FIRST = 0x81 CMD_SET_VOLUME_SECOND = 0x00 CMD_SET_STATIC_OFF = 0xAC CMD_SET_STATIC_ON = 0xAD CMD_SET_STATIC_REG = 0x00 LCD_WIDTH = 128 LCD_HEIGHT = 64 LCD_PAGE_COUNT = 8 LCD_CONTRAST = 0x19 BACKLIGHT_PWM_FREQUENCY = 100 __pagemap = (3, 2, 1, 0, 7, 6, 5, 4) def __init__(self, a0=24, cs=8, rst=25, rgb=None): import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) import spidev self.__spi = spidev.SpiDev() self.__spi.open(0, 0) self.__spi.max_speed_hz = 250000 self.back_buffer = np.zeros((Glcd.LCD_HEIGHT, Glcd.LCD_WIDTH), dtype='uint8') self.a0 = a0 self.cs = cs self.rst = rst GPIO.setup(a0, GPIO.OUT) GPIO.setup(cs, GPIO.OUT) GPIO.setup(rst, GPIO.OUT) if rgb is not None: GPIO.setup(rgb[0], GPIO.OUT) GPIO.setup(rgb[1], GPIO.OUT) GPIO.setup(rgb[2], GPIO.OUT) self.red = GPIO.PWM(rgb[0], self.BACKLIGHT_PWM_FREQUENCY) self.green = GPIO.PWM(rgb[1], self.BACKLIGHT_PWM_FREQUENCY) self.blue = GPIO.PWM(rgb[2], self.BACKLIGHT_PWM_FREQUENCY) self.red.start(0) self.green.start(0) self.blue.start(0) else: self.red, self.green, self.blue = None, None, None def send_command(self, cmd): import RPi.GPIO as GPIO GPIO.output(self.a0, GPIO.LOW) self.__spi.xfer(cmd) def send_data(self, data): import RPi.GPIO as GPIO GPIO.output(self.a0, GPIO.HIGH) self.__spi.xfer(data) def move_cursor(self, x, page): if x >= self.LCD_WIDTH | x < 0: return if page > self.LCD_PAGE_COUNT - 1 | page < 0: return self.send_command([self.CMD_SET_PAGE | self.__pagemap[page]]) self.send_command([self.CMD_SET_COLUMN_LOWER | (x & 0xf)]) self.send_command([self.CMD_SET_COLUMN_UPPER | ((x >> 4) & 0xf)]) def clear_display(self): for page in self.__pagemap: self.move_cursor(1, page) self.send_data([0] * self.LCD_WIDTH) def reset(self): import RPi.GPIO as GPIO GPIO.output(self.rst, GPIO.LOW) sleep(.5) GPIO.output(self.rst, GPIO.HIGH) def set_backlight_color(self, r, g, b): if self.red is None: print("Backlight RGB GPIO pins not initialized.") return if 0 <= r <= 100 and 0 <= g <= 100 and 0 <= b <= 100: self.red.ChangeDutyCycle(100 - r) self.green.ChangeDutyCycle(100 - g) self.blue.ChangeDutyCycle(100 - b) else: print("Invalid range. Colors must be between 0 and 100.") def set_contrast(self, level): self.send_command([self.CMD_SET_VOLUME_FIRST]) self.send_command([self.CMD_SET_VOLUME_SECOND | (level & 0x3f)]) def clear_back_buffer(self): self.back_buffer = np.zeros((self.LCD_HEIGHT, self.LCD_WIDTH), dtype='uint8') def init(self): import RPi.GPIO as GPIO GPIO.output(self.cs, GPIO.LOW) self.reset() self.send_command([self.CMD_SET_BIAS_7]) self.send_command([self.CMD_SET_ADC_NORMAL]) self.send_command([self.CMD_SET_COM_NORMAL]) self.send_command([self.CMD_SET_DISP_START_LINE]) self.send_command([self.CMD_SET_POWER_CONTROL | 0x4]) sleep(.05) self.send_command([self.CMD_SET_POWER_CONTROL | 0x6]) sleep(.05) self.send_command([self.CMD_SET_POWER_CONTROL | 0x7]) sleep(.01) self.send_command([self.CMD_SET_RESISTOR_RATIO | 0x7]) self.send_command([self.CMD_DISPLAY_ON]) self.send_command([self.CMD_SET_ALLPTS_NORMAL]) self.set_contrast(self.LCD_CONTRAST) self.clear_display() def reverse_display(self, reverse=True): if reverse: self.send_command([self.CMD_SET_DISP_REVERSE]) else: self.send_command([self.CMD_SET_DISP_NORMAL]) def sleep(self): self.send_command([self.CMD_SET_STATIC_OFF]) self.send_command([self.CMD_DISPLAY_OFF]) self.send_command([self.CMD_SET_ALLPTS_ON]) def wake(self): self.send_command([self.CMD_INTERNAL_RESET]) self.send_command([self.CMD_SET_ALLPTS_NORMAL]) self.send_command([self.CMD_DISPLAY_ON]) self.send_command([self.CMD_SET_STATIC_ON]) self.send_command([self.CMD_SET_STATIC_REG | 0x03]) def standby(self, exit=False): if exit: self.send_command([self.CMD_SET_ALLPTS_NORMAL]) self.send_command([self.CMD_DISPLAY_ON]) else: self.send_command([self.CMD_SET_STATIC_ON]) self.send_command([self.CMD_SET_STATIC_REG | 0x03]) self.send_command([self.CMD_DISPLAY_OFF]) self.send_command([self.CMD_SET_ALLPTS_ON]) def flip(self): for idx in range(0, self.LCD_PAGE_COUNT): self.move_cursor(1, idx) row_start = idx << 3 row_stop = (idx + 1) << 3 self.send_data(np.packbits(self.back_buffer[row_start:row_stop], axis=0).flatten().tolist()) def cleanup(self): self.clear_display() self.sleep() self.__spi.close() import RPi.GPIO as GPIO GPIO.cleanup() def is_off_grid(self, xmin, ymin, xmax, ymax): if xmin < 0: print('x-coordinate: {0} below minimum of 0.'.format(xmin)) return True if ymin < 0: print('y-coordinate: {0} below minimum of 0.'.format(ymin)) return True if xmax >= self.LCD_WIDTH: print('x-coordinate: {0} above maximum of {1}.'.format(xmax, self.LCD_WIDTH - 1)) return True if ymax >= self.LCD_HEIGHT: print('y-coordinate: {0} above maximum of {1}.'.format(ymax, self.LCD_HEIGHT - 1)) return True return False def is_point(self, x, y): if self.is_off_grid(x, y, x, y): return False return self.back_buffer[y, x] == 1 def draw_point(self, x, y, color=1, invert=False): if not 0 <= x < self.LCD_WIDTH: print('x-coordinate: {0} outside display range of 0 to {1} .').format(x, self.LCD_WIDTH - 1) return if not 0 <= y < self.LCD_HEIGHT: print('y-coordinate: {0} outside display range of 0 to {1} .').format(x, self.LCD_HEIGHT - 1) return if invert: self.back_buffer[y, x] ^= 1 else: self.back_buffer[y, x] = color def draw_line(self, x1, y1, x2, y2, color=1, invert=False): if self.is_off_grid(min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2)): return if y1 == y2: if x1 > x2: x1, x2 = x2, x1 if invert: self.back_buffer[y1, x1:x2 + 1] ^= 1 else: self.back_buffer[y1, x1:x2 + 1] = color return if x1 == x2: if y1 > y2: y1, y2 = y2, y1 if invert: self.back_buffer[y1:y2 + 1, x1] ^= 1 else: self.back_buffer[y1:y2 + 1, x1] = color return dx = x2 - x1 dy = y2 - y1 is_steep = abs(dy) > abs(dx) if is_steep: x1, y1 = y1, x1 x2, y2 = y2, x2 if x1 > x2: x1, x2 = x2, x1 y1, y2 = y2, y1 dx = x2 - x1 dy = y2 - y1 error = dx >> 1 ystep = 1 if y1 < y2 else -1 y = y1 for x in range(x1, x2 + 1): if invert: if is_steep: self.back_buffer[x, y] ^= 1 else: self.back_buffer[y, x] ^= 1 else: if is_steep: self.back_buffer[x, y] = color else: self.back_buffer[y, x] = color error -= abs(dy) if error < 0: y += ystep error += dx def draw_lines(self, coords, color=1, invert=False): if coords.shape[1] != 2: return x1, y1 = coords[0] for row in coords[1:]: x2, y2 = row self.draw_line(x1, y1, x2, y2, color, invert) x1, y1 = x2, y2 def draw_rectangle(self, x1, y1, w, h, color=1, invert=False): x2 = x1 + w - 1 y2 = y1 + h - 1 if self.is_off_grid(x1, y1, x2, y2): return if invert: self.back_buffer[y1, x1 + 1:x2] ^= 1 self.back_buffer[y2, x1:x2 + 1] ^= 1 self.back_buffer[y1:y2, x1] ^= 1 self.back_buffer[y1:y2, x2] ^= 1 else: self.back_buffer[y1, x1:x2] = color self.back_buffer[y2, x1:x2 + 1] = color self.back_buffer[y1:y2, x1] = color self.back_buffer[y1:y2, x2] = color def fill_rectangle(self, x1, y1, w, h, color=1, invert=False): if self.is_off_grid(x1, y1, x1 + w - 1, y1 + h - 1): return if invert: self.back_buffer[y1:y1 + h, x1:x1 + w] ^= 1 else: self.back_buffer[y1:y1 + h, x1:x1 + w] = color def draw_circle(self, x0, y0, r, color=1): if self.is_off_grid(x0 - r, y0 - r, x0 + r, y0 + r): return f = 1 - r dx = 1 dy = -r - r x = 0 y = r self.back_buffer[y0 + r, x0] = color self.back_buffer[y0 - r, x0] = color self.back_buffer[y0, x0 + r] = color self.back_buffer[y0, x0 - r] = color while x < y: if f >= 0: y -= 1 dy += 2 f += dy x += 1 dx += 2 f += dx self.back_buffer[y0 + y, x0 + x] = color self.back_buffer[y0 + y, x0 - x] = color self.back_buffer[y0 - y, x0 + x] = color self.back_buffer[y0 - y, x0 - x] = color self.back_buffer[y0 + x, x0 + y] = color self.back_buffer[y0 + x, x0 - y] = color self.back_buffer[y0 - x, x0 + y] = color self.back_buffer[y0 - x, x0 - y] = color def fill_circle(self, x0, y0, r, color=1): if self.is_off_grid(x0 - r, y0 - r, x0 + r, y0 + r): return f = 1 - r dx = 1 dy = -r - r x = 0 y = r self.back_buffer[y0 - r: y0 + r + 1, x0] = color while x < y: if f >= 0: y -= 1 dy += 2 f += dy x += 1 dx += 2 f += dx self.back_buffer[y0 - y: y0 + y + 1, x0 + x] = color self.back_buffer[y0 - y: y0 + y + 1, x0 - x] = color self.back_buffer[y0 - x: y0 + x + 1, x0 - y] = color self.back_buffer[y0 - x: y0 + x + 1, x0 + y] = color def draw_ellipse(self, x0, y0, a, b, color=1): if self.is_off_grid(x0 - a, y0 - b, x0 + a, y0 + b): return a2 = a * a b2 = b * b twoa2 = a2 + a2 twob2 = b2 + b2 x = 0 y = b px = 0 py = twoa2 * y self.back_buffer[y0 + y, x0 + x] = color self.back_buffer[y0 + y, x0 - x] = color self.back_buffer[y0 - y, x0 + x] = color self.back_buffer[y0 - y, x0 - x] = color p = round(b2 - (a2 * b) + (0.25 * a2)) while px < py: x += 1 px += twob2 if p < 0: p += b2 + px else: y -= 1 py -= twoa2 p += b2 + px - py self.back_buffer[y0 + y, x0 + x] = color self.back_buffer[y0 + y, x0 - x] = color self.back_buffer[y0 - y, x0 + x] = color self.back_buffer[y0 - y, x0 - x] = color p = round(b2 * (x + 0.5) * (x + 0.5) + a2 * (y - 1) * (y - 1) - a2 * b2) while y > 0: y -= 1 py -= twoa2 if p > 0: p += a2 - py else: x += 1 px += twob2 p += a2 - py + px self.back_buffer[y0 + y, x0 + x] = color self.back_buffer[y0 + y, x0 - x] = color self.back_buffer[y0 - y, x0 + x] = color self.back_buffer[y0 - y, x0 - x] = color def fill_ellipse(self, x0, y0, a, b, color=1): if self.is_off_grid(x0 - a, y0 - b, x0 + a, y0 + b): return a2 = a * a b2 = b * b twoa2 = a2 + a2 twob2 = b2 + b2 x = 0 y = b px = 0 py = twoa2 * y self.draw_line(x0, y0 - y, x0, y0 + y, color) p = round(b2 - (a2 * b) + (0.25 * a2)) while px < py: x += 1 px += twob2 if p < 0: p += b2 + px else: y -= 1 py -= twoa2 p += b2 + px - py self.draw_line(x0 + x, y0 - y, x0 + x, y0 + y, color) self.draw_line(x0 - x, y0 - y, x0 - x, y0 + y, color) p = round(b2 * (x + 0.5) * (x + 0.5) + a2 * (y - 1) * (y - 1) - a2 * b2) while y > 0: y -= 1 py -= twoa2 if p > 0: p += a2 - py else: x += 1 px += twob2 p += a2 - py + px self.draw_line(x0 + x, y0 - y, x0 + x, y0 + y, color) self.draw_line(x0 - x, y0 - y, x0 - x, y0 + y, color)
MIT License
nedbat/coveragepy-bbmirror
coverage/data.py
CoverageData.add_file_tracers
python
def add_file_tracers(self, file_tracers): if self._debug and self._debug.should('dataop'): self._debug.write("Adding file tracers: %d files" % (len(file_tracers),)) existing_files = self._arcs or self._lines or {} for filename, plugin_name in iitems(file_tracers): if filename not in existing_files: raise CoverageException( "Can't add file tracer data for unmeasured file '%s'" % (filename,) ) existing_plugin = self._file_tracers.get(filename) if existing_plugin is not None and plugin_name != existing_plugin: raise CoverageException( "Conflicting file tracer name for '%s': %r vs %r" % ( filename, existing_plugin, plugin_name, ) ) self._file_tracers[filename] = plugin_name self._validate()
Add per-file plugin information. `file_tracers` is { filename: plugin_name, ... }
https://github.com/nedbat/coveragepy-bbmirror/blob/0df8e82c77b677a1cda984aa56e3666948f72e9b/coverage/data.py#L378-L402
import glob import itertools import json import optparse import os import os.path import random import re import socket from coverage import env from coverage.backward import iitems, string_class from coverage.debug import _TEST_NAME_FILE from coverage.files import PathAliases from coverage.misc import CoverageException, file_be_gone, isolate_module os = isolate_module(os) class CoverageData(object): def __init__(self, debug=None): self._debug = debug self._lines = None self._arcs = None self._file_tracers = {} self._runs = [] def __repr__(self): return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format( klass=self.__class__.__name__, lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)), arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)), tracers="{{{0}}}".format(len(self._file_tracers)), runs="[{0}]".format(len(self._runs)), ) def has_arcs(self): return self._has_arcs() def lines(self, filename): if self._arcs is not None: arcs = self._arcs.get(filename) if arcs is not None: all_lines = itertools.chain.from_iterable(arcs) return list(set(l for l in all_lines if l > 0)) elif self._lines is not None: return self._lines.get(filename) return None def arcs(self, filename): if self._arcs is not None: if filename in self._arcs: return self._arcs[filename] return None def file_tracer(self, filename): if filename in (self._arcs or self._lines or {}): return self._file_tracers.get(filename, "") return None def run_infos(self): return self._runs def measured_files(self): return list(self._arcs or self._lines or {}) def line_counts(self, fullpath=False): summ = {} if fullpath: filename_fn = lambda f: f else: filename_fn = os.path.basename for filename in self.measured_files(): summ[filename_fn(filename)] = len(self.lines(filename)) return summ def __nonzero__(self): return bool(self._lines or self._arcs) __bool__ = __nonzero__ def read_fileobj(self, file_obj): data = self._read_raw_data(file_obj) self._lines = self._arcs = None if 'lines' in data: self._lines = data['lines'] if 'arcs' in data: self._arcs = dict( (fname, [tuple(pair) for pair in arcs]) for fname, arcs in iitems(data['arcs']) ) self._file_tracers = data.get('file_tracers', {}) self._runs = data.get('runs', []) self._validate() def read_file(self, filename): if self._debug and self._debug.should('dataio'): self._debug.write("Reading data from %r" % (filename,)) try: with self._open_for_reading(filename) as f: self.read_fileobj(f) except Exception as exc: raise CoverageException( "Couldn't read data from '%s': %s: %s" % ( filename, exc.__class__.__name__, exc, ) ) _GO_AWAY = "!coverage.py: This is a private format, don't read it directly!" @classmethod def _open_for_reading(cls, filename): return open(filename, "r") @classmethod def _read_raw_data(cls, file_obj): go_away = file_obj.read(len(cls._GO_AWAY)) if go_away != cls._GO_AWAY: raise CoverageException("Doesn't seem to be a coverage.py data file") return json.load(file_obj) @classmethod def _read_raw_data_file(cls, filename): with cls._open_for_reading(filename) as f: return cls._read_raw_data(f) def add_lines(self, line_data): if self._debug and self._debug.should('dataop'): self._debug.write("Adding lines: %d files, %d lines total" % ( len(line_data), sum(len(lines) for lines in line_data.values()) )) if self._has_arcs(): raise CoverageException("Can't add lines to existing arc data") if self._lines is None: self._lines = {} for filename, linenos in iitems(line_data): if filename in self._lines: new_linenos = set(self._lines[filename]) new_linenos.update(linenos) linenos = new_linenos self._lines[filename] = list(linenos) self._validate() def add_arcs(self, arc_data): if self._debug and self._debug.should('dataop'): self._debug.write("Adding arcs: %d files, %d arcs total" % ( len(arc_data), sum(len(arcs) for arcs in arc_data.values()) )) if self._has_lines(): raise CoverageException("Can't add arcs to existing line data") if self._arcs is None: self._arcs = {} for filename, arcs in iitems(arc_data): if filename in self._arcs: new_arcs = set(self._arcs[filename]) new_arcs.update(arcs) arcs = new_arcs self._arcs[filename] = list(arcs) self._validate()
Apache License 2.0
itamarst/eliot
eliot/tests/test_testing.py
LoggedActionTests.fromMessagesIndex
python
def fromMessagesIndex(self, messages, index): uuid = messages[index]["task_uuid"] level = messages[index]["task_level"] return LoggedAction.fromMessages(uuid, level, messages)
Call L{LoggedAction.fromMessages} using action specified by index in a list of message dictionaries. @param messages: A C{list} of message dictionaries. @param index: Index to the logger's messages. @return: Result of L{LoggedAction.fromMessages}.
https://github.com/itamarst/eliot/blob/f08acdb5471349c68bbe72a2ffa89b7018fb5883/eliot/tests/test_testing.py#L81-L94
from __future__ import unicode_literals from unittest import SkipTest, TestResult, TestCase, skipUnless try: import numpy as np except ImportError: np = None from ..testing import ( issuperset, assertContainsFields, LoggedAction, LoggedMessage, validateLogging, UnflushedTracebacks, assertHasMessage, assertHasAction, validate_logging, capture_logging, swap_logger, check_for_errors, ) from .._output import MemoryLogger from .._action import start_action from .._message import Message from .._validation import ActionType, MessageType, ValidationError, Field from .._traceback import write_traceback from .. import add_destination, remove_destination, _output, log_message from .common import CustomObject, CustomJSONEncoder class IsSuperSetTests(TestCase): def test_equal(self): a = {"a": 1} b = a.copy() self.assertTrue(issuperset(a, b)) def test_additionalIsSuperSet(self): a = {"a": 1, "b": 2, "c": 3} b = {"a": 1, "c": 3} self.assertTrue(issuperset(a, b)) def test_missingIsNotSuperSet(self): a = {"a": 1, "c": 3} b = {"a": 1, "b": 2, "c": 3} self.assertFalse(issuperset(a, b)) class LoggedActionTests(TestCase): def test_values(self): d1 = {"x": 1} d2 = {"y": 2} root = LoggedAction(d1, d2, []) self.assertEqual((root.startMessage, root.endMessage), (d1, d2))
Apache License 2.0
iamdecode/sklearn-pmml-model
setup.py
declare_cython_extension
python
def declare_cython_extension(extName, use_math=False, use_openmp=False, include_dirs=None): extPath = extName.replace(".", os.path.sep)+".pyx" if use_math and os.name != 'nt': compile_args = list(my_extra_compile_args_math) link_args = list(my_extra_link_args_math) libraries = ["m"] else: compile_args = list(my_extra_compile_args_nonmath) link_args = list(my_extra_link_args_nonmath) libraries = None if use_openmp: compile_args.insert( 0, openmp_compile_args ) link_args.insert( 0, openmp_link_args ) return Extension( extName, [extPath], extra_compile_args=compile_args, extra_link_args=link_args, include_dirs=include_dirs, libraries=libraries )
Declare a Cython extension module for setuptools. Parameters: extName : str Absolute module name, e.g. use `mylibrary.mypackage.mymodule` for the Cython source file `mylibrary/mypackage/mymodule.pyx`. use_math : bool If True, set math flags and link with ``libm``. use_openmp : bool If True, compile and link with OpenMP. Return value: Extension object that can be passed to ``setuptools.setup``.
https://github.com/iamdecode/sklearn-pmml-model/blob/056fab1ed7076b513a3199af3395447252029800/setup.py#L95-L136
from __future__ import division, print_function, absolute_import from sklearn_pmml_model import __version__ as version build_type="optimized" with open("README.md", "r") as fh: long_description = fh.read() import sys if sys.version_info < (2,7): sys.exit('Sorry, Python < 2.7 is not supported') import os from setuptools import setup, find_packages from setuptools.extension import Extension try: from Cython.Build import cythonize except ImportError: sys.exit("Cython not found. Cython is needed to build the extension modules.") extra_compile_args_math_optimized = ['-march=native', '-O2', '-msse', '-msse2', '-mfma', '-mfpmath=sse'] extra_compile_args_math_debug = ['-march=native', '-O0', '-g'] extra_link_args_math_optimized = [] extra_link_args_math_debug = [] extra_compile_args_nonmath_optimized = ['-O2'] extra_compile_args_nonmath_debug = ['-O0', '-g'] extra_link_args_nonmath_optimized = [] extra_link_args_nonmath_debug = [] openmp_compile_args = ['-fopenmp'] openmp_link_args = ['-fopenmp'] import numpy as np my_include_dirs = [".", np.get_include()] if build_type == 'optimized': my_extra_compile_args_math = extra_compile_args_math_optimized my_extra_compile_args_nonmath = extra_compile_args_nonmath_optimized my_extra_link_args_math = extra_link_args_math_optimized my_extra_link_args_nonmath = extra_link_args_nonmath_optimized my_debug = False print( "build configuration selected: optimized" ) elif build_type == 'debug': my_extra_compile_args_math = extra_compile_args_math_debug my_extra_compile_args_nonmath = extra_compile_args_nonmath_debug my_extra_link_args_math = extra_link_args_math_debug my_extra_link_args_nonmath = extra_link_args_nonmath_debug my_debug = True print( "build configuration selected: debug" ) else: raise ValueError("Unknown build configuration '%s'; valid: 'optimized', 'debug'" % (build_type))
BSD 2-Clause Simplified License
pythonfreecourse/calendar
app/internal/import_holidays.py
save_holidays_to_db
python
def save_holidays_to_db(holidays: List[Event], session: Session): session.add_all(holidays) session.commit() session.flush(holidays) userevents = [] for holiday in holidays: userevent = UserEvent( user_id=holiday.owner_id, event_id=holiday.id ) userevents.append(userevent) session.add_all(userevents) session.commit()
this function saves holiday list into database. :param holidays: list of holidays events :param session: current connection
https://github.com/pythonfreecourse/calendar/blob/23a33703a0038d0eae8ce7299a93ad172c8f68e9/app/internal/import_holidays.py#L47-L64
import re from datetime import datetime, timedelta from app.database.models import User, Event, UserEvent from sqlalchemy.orm import Session from typing import List, Match REGEX_EXTRACT_HOLIDAYS = re.compile( r'SUMMARY:(?P<title>.*)(\n.*){1,8}DTSTAMP:(?P<date>\w{8})', re.MULTILINE) def get_holidays_from_file(file: List[Event], session: Session) -> List[Event]: parsed_holidays = REGEX_EXTRACT_HOLIDAYS.finditer(file) holidays = [] for holiday in parsed_holidays: holiday_event = create_holiday_event( holiday, session.query(User).filter_by(id=1).first().id) holidays.append(holiday_event) return holidays def create_holiday_event(holiday: Match[str], owner_id: int) -> Event: valid_ascii_chars_range = 128 title = holiday.groupdict()['title'].strip() title_to_save = ''.join(i if ord(i) < valid_ascii_chars_range else '' for i in title) date = holiday.groupdict()['date'].strip() format_string = '%Y%m%d' holiday = Event( title=title_to_save, start=datetime.strptime(date, format_string), end=datetime.strptime(date, format_string) + timedelta(days=1), content='holiday', owner_id=owner_id ) return holiday
Apache License 2.0
google-research/federated
utils/optimizers/shampoo.py
partition_metadata
python
def partition_metadata( tensor: TfValue, partition_info: PartitionConfig, ) -> PartitionMetadata: shape = tensor.get_shape() axis_to_shard = [s > partition_info.max_dim_size for s in shape] split_sizes_per_dim = [] for sharded, dim in zip(axis_to_shard, shape): dim = int(dim) if sharded: num_shards = dim // partition_info.partition_size last_shard_size = dim % partition_info.partition_size split_sizes = [partition_info.partition_size] * num_shards if last_shard_size > 0: split_sizes.append(last_shard_size) split_sizes_per_dim.append(split_sizes) else: split_sizes_per_dim.append([dim]) num_splits_per_dim = [len(v) for v in split_sizes_per_dim] return PartitionMetadata(split_sizes_per_dim, num_splits_per_dim)
Returns metadata required for partitioning and reforming tensors. Args: tensor: Tensor to partition. partition_info: Partitioning info. Returns: split_sizes_per_dim and num_splits_per_dim.
https://github.com/google-research/federated/blob/909953fa8945cfac01328e0a6d878e1dc0376c3c/utils/optimizers/shampoo.py#L69-L99
import functools from typing import Sequence, List, Optional, Tuple, Union import numpy as np import tensorflow as tf TfValue = Union[tf.Variable, tf.Tensor] class PartitionConfig: def __init__(self, max_dim_size: int, partition_size: int): if partition_size < 1 or partition_size > max_dim_size: raise ValueError('Parition size must be no less than 1 and no greater' 'than max_dim_size.') self.max_dim_size = max_dim_size self.partition_size = partition_size class PartitionMetadata: def __init__(self, split_sizes_per_dim: Sequence[Sequence[int]], num_splits_per_dim: Sequence[int]): self.split_sizes_per_dim = split_sizes_per_dim self.num_splits_per_dim = num_splits_per_dim
Apache License 2.0
giampaolo/pyftpdlib
pyftpdlib/handlers.py
DTPHandler.transfer_in_progress
python
def transfer_in_progress(self): return self.get_transmitted_bytes() != 0
Return True if a transfer is in progress, else False.
https://github.com/giampaolo/pyftpdlib/blob/5793ee5f61029d232f940a69a92bf67996be7f00/pyftpdlib/handlers.py#L751-L753
import asynchat import contextlib import errno import glob import logging import os import random import socket import sys import time import traceback import warnings from datetime import datetime try: import pwd import grp except ImportError: pwd = grp = None try: from OpenSSL import SSL except ImportError: SSL = None try: from collections import OrderedDict except ImportError: OrderedDict = dict from . import __ver__ from ._compat import b from ._compat import getcwdu from ._compat import PY3 from ._compat import super from ._compat import u from ._compat import unicode from ._compat import xrange from .authorizers import AuthenticationFailed from .authorizers import AuthorizerError from .authorizers import DummyAuthorizer from .filesystems import AbstractedFS from .filesystems import FilesystemError from .ioloop import _ERRNOS_DISCONNECTED from .ioloop import _ERRNOS_RETRY from .ioloop import Acceptor from .ioloop import AsyncChat from .ioloop import Connector from .ioloop import RetryError from .ioloop import timer from .log import debug from .log import logger CR_BYTE = ord('\r') def _import_sendfile(): if os.name == 'posix': try: return os.sendfile except AttributeError: try: import sendfile as sf if hasattr(sf, 'has_sf_hdtr'): raise ImportError return sf.sendfile except ImportError: pass return None sendfile = _import_sendfile() proto_cmds = { 'ABOR': dict( perm=None, auth=True, arg=False, help='Syntax: ABOR (abort transfer).'), 'ALLO': dict( perm=None, auth=True, arg=True, help='Syntax: ALLO <SP> bytes (noop; allocate storage).'), 'APPE': dict( perm='a', auth=True, arg=True, help='Syntax: APPE <SP> file-name (append data to file).'), 'CDUP': dict( perm='e', auth=True, arg=False, help='Syntax: CDUP (go to parent directory).'), 'CWD': dict( perm='e', auth=True, arg=None, help='Syntax: CWD [<SP> dir-name] (change working directory).'), 'DELE': dict( perm='d', auth=True, arg=True, help='Syntax: DELE <SP> file-name (delete file).'), 'EPRT': dict( perm=None, auth=True, arg=True, help='Syntax: EPRT <SP> |proto|ip|port| (extended active mode).'), 'EPSV': dict( perm=None, auth=True, arg=None, help='Syntax: EPSV [<SP> proto/"ALL"] (extended passive mode).'), 'FEAT': dict( perm=None, auth=False, arg=False, help='Syntax: FEAT (list all new features supported).'), 'HELP': dict( perm=None, auth=False, arg=None, help='Syntax: HELP [<SP> cmd] (show help).'), 'LIST': dict( perm='l', auth=True, arg=None, help='Syntax: LIST [<SP> path] (list files).'), 'MDTM': dict( perm='l', auth=True, arg=True, help='Syntax: MDTM [<SP> path] (file last modification time).'), 'MFMT': dict( perm='T', auth=True, arg=True, help='Syntax: MFMT <SP> timeval <SP> path (file update last ' 'modification time).'), 'MLSD': dict( perm='l', auth=True, arg=None, help='Syntax: MLSD [<SP> path] (list directory).'), 'MLST': dict( perm='l', auth=True, arg=None, help='Syntax: MLST [<SP> path] (show information about path).'), 'MODE': dict( perm=None, auth=True, arg=True, help='Syntax: MODE <SP> mode (noop; set data transfer mode).'), 'MKD': dict( perm='m', auth=True, arg=True, help='Syntax: MKD <SP> path (create directory).'), 'NLST': dict( perm='l', auth=True, arg=None, help='Syntax: NLST [<SP> path] (list path in a compact form).'), 'NOOP': dict( perm=None, auth=False, arg=False, help='Syntax: NOOP (just do nothing).'), 'OPTS': dict( perm=None, auth=True, arg=True, help='Syntax: OPTS <SP> cmd [<SP> option] (set option for command).'), 'PASS': dict( perm=None, auth=False, arg=None, help='Syntax: PASS [<SP> password] (set user password).'), 'PASV': dict( perm=None, auth=True, arg=False, help='Syntax: PASV (open passive data connection).'), 'PORT': dict( perm=None, auth=True, arg=True, help='Syntax: PORT <sp> h,h,h,h,p,p (open active data connection).'), 'PWD': dict( perm=None, auth=True, arg=False, help='Syntax: PWD (get current working directory).'), 'QUIT': dict( perm=None, auth=False, arg=False, help='Syntax: QUIT (quit current session).'), 'REIN': dict( perm=None, auth=True, arg=False, help='Syntax: REIN (flush account).'), 'REST': dict( perm=None, auth=True, arg=True, help='Syntax: REST <SP> offset (set file offset).'), 'RETR': dict( perm='r', auth=True, arg=True, help='Syntax: RETR <SP> file-name (retrieve a file).'), 'RMD': dict( perm='d', auth=True, arg=True, help='Syntax: RMD <SP> dir-name (remove directory).'), 'RNFR': dict( perm='f', auth=True, arg=True, help='Syntax: RNFR <SP> file-name (rename (source name)).'), 'RNTO': dict( perm='f', auth=True, arg=True, help='Syntax: RNTO <SP> file-name (rename (destination name)).'), 'SITE': dict( perm=None, auth=False, arg=True, help='Syntax: SITE <SP> site-command (execute SITE command).'), 'SITE HELP': dict( perm=None, auth=False, arg=None, help='Syntax: SITE HELP [<SP> cmd] (show SITE command help).'), 'SITE CHMOD': dict( perm='M', auth=True, arg=True, help='Syntax: SITE CHMOD <SP> mode path (change file mode).'), 'SIZE': dict( perm='l', auth=True, arg=True, help='Syntax: SIZE <SP> file-name (get file size).'), 'STAT': dict( perm='l', auth=False, arg=None, help='Syntax: STAT [<SP> path name] (server stats [list files]).'), 'STOR': dict( perm='w', auth=True, arg=True, help='Syntax: STOR <SP> file-name (store a file).'), 'STOU': dict( perm='w', auth=True, arg=None, help='Syntax: STOU [<SP> name] (store a file with a unique name).'), 'STRU': dict( perm=None, auth=True, arg=True, help='Syntax: STRU <SP> type (noop; set file structure).'), 'SYST': dict( perm=None, auth=False, arg=False, help='Syntax: SYST (get operating system type).'), 'TYPE': dict( perm=None, auth=True, arg=True, help='Syntax: TYPE <SP> [A | I] (set transfer type).'), 'USER': dict( perm=None, auth=False, arg=True, help='Syntax: USER <SP> user-name (set username).'), 'XCUP': dict( perm='e', auth=True, arg=False, help='Syntax: XCUP (obsolete; go to parent directory).'), 'XCWD': dict( perm='e', auth=True, arg=None, help='Syntax: XCWD [<SP> dir-name] (obsolete; change directory).'), 'XMKD': dict( perm='m', auth=True, arg=True, help='Syntax: XMKD <SP> dir-name (obsolete; create directory).'), 'XPWD': dict( perm=None, auth=True, arg=False, help='Syntax: XPWD (obsolete; get current dir).'), 'XRMD': dict( perm='d', auth=True, arg=True, help='Syntax: XRMD <SP> dir-name (obsolete; remove directory).'), } if not hasattr(os, 'chmod'): del proto_cmds['SITE CHMOD'] def _strerror(err): if isinstance(err, EnvironmentError): try: return os.strerror(err.errno) except AttributeError: if not hasattr(os, 'strerror'): return err.strerror raise else: return str(err) def _is_ssl_sock(sock): return SSL is not None and isinstance(sock, SSL.Connection) def _support_hybrid_ipv6(): try: if not socket.has_ipv6: return False with contextlib.closing(socket.socket(socket.AF_INET6)) as sock: return not sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY) except (socket.error, AttributeError): return False SUPPORTS_HYBRID_IPV6 = _support_hybrid_ipv6() class _FileReadWriteError(OSError): class _GiveUpOnSendfile(Exception): class PassiveDTP(Acceptor): timeout = 30 backlog = None def __init__(self, cmd_channel, extmode=False): self.cmd_channel = cmd_channel self.log = cmd_channel.log self.log_exception = cmd_channel.log_exception Acceptor.__init__(self, ioloop=cmd_channel.ioloop) local_ip = self.cmd_channel.socket.getsockname()[0] if local_ip in self.cmd_channel.masquerade_address_map: masqueraded_ip = self.cmd_channel.masquerade_address_map[local_ip] elif self.cmd_channel.masquerade_address: masqueraded_ip = self.cmd_channel.masquerade_address else: masqueraded_ip = None if self.cmd_channel.server.socket.family != socket.AF_INET: af = self.bind_af_unspecified((local_ip, 0)) self.socket.close() self.del_channel() else: af = self.cmd_channel.socket.family self.create_socket(af, socket.SOCK_STREAM) if self.cmd_channel.passive_ports is None: self.bind((local_ip, 0)) else: ports = list(self.cmd_channel.passive_ports) while ports: port = ports.pop(random.randint(0, len(ports) - 1)) self.set_reuse_addr() try: self.bind((local_ip, port)) except socket.error as err: if err.errno == errno.EADDRINUSE: if ports: continue else: self.bind((local_ip, 0)) self.cmd_channel.log( "Can't find a valid passive port in the " "configured range. A random kernel-assigned " "port will be used.", logfun=logger.warning ) else: raise else: break self.listen(self.backlog or self.cmd_channel.server.backlog) port = self.socket.getsockname()[1] if not extmode: ip = masqueraded_ip or local_ip if ip.startswith('::ffff:'): ip = ip[7:] resp = '227 Entering passive mode (%s,%d,%d).' % ( ip.replace('.', ','), port // 256, port % 256) self.cmd_channel.respond(resp) else: self.cmd_channel.respond('229 Entering extended passive mode ' '(|||%d|).' % port) if self.timeout: self.call_later(self.timeout, self.handle_timeout) def handle_accepted(self, sock, addr): if not self.cmd_channel.connected: return self.close() if self.cmd_channel.remote_ip != addr[0]: if not self.cmd_channel.permit_foreign_addresses: try: sock.close() except socket.error: pass msg = '425 Rejected data connection from foreign address ' '%s:%s.' % (addr[0], addr[1]) self.cmd_channel.respond_w_warning(msg) return else: msg = 'Established data connection with foreign address ' '%s:%s.' % (addr[0], addr[1]) self.cmd_channel.log(msg, logfun=logger.warning) self.close() if self.cmd_channel.connected: handler = self.cmd_channel.dtp_handler(sock, self.cmd_channel) if handler.connected: self.cmd_channel.data_channel = handler self.cmd_channel._on_dtp_connection() def handle_timeout(self): if self.cmd_channel.connected: self.cmd_channel.respond("421 Passive data channel timed out.", logfun=logger.info) self.close() def handle_error(self): try: raise except Exception: logger.error(traceback.format_exc()) try: self.close() except Exception: logger.critical(traceback.format_exc()) def close(self): debug("call: close()", inst=self) Acceptor.close(self) class ActiveDTP(Connector): timeout = 30 def __init__(self, ip, port, cmd_channel): Connector.__init__(self, ioloop=cmd_channel.ioloop) self.cmd_channel = cmd_channel self.log = cmd_channel.log self.log_exception = cmd_channel.log_exception self._idler = None if self.timeout: self._idler = self.ioloop.call_later(self.timeout, self.handle_timeout, _errback=self.handle_error) if ip.count('.') == 4: self._cmd = "PORT" self._normalized_addr = "%s:%s" % (ip, port) else: self._cmd = "EPRT" self._normalized_addr = "[%s]:%s" % (ip, port) source_ip = self.cmd_channel.socket.getsockname()[0] try: self.connect_af_unspecified((ip, port), (source_ip, 0)) except (socket.gaierror, socket.error): self.handle_close() def readable(self): return False def handle_write(self): pass def handle_connect(self): self.del_channel() if self._idler is not None and not self._idler.cancelled: self._idler.cancel() if not self.cmd_channel.connected: return self.close() err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err != 0: raise socket.error(err) msg = 'Active data connection established.' self.cmd_channel.respond('200 ' + msg) self.cmd_channel.log_cmd(self._cmd, self._normalized_addr, 200, msg) if not self.cmd_channel.connected: return self.close() handler = self.cmd_channel.dtp_handler(self.socket, self.cmd_channel) self.cmd_channel.data_channel = handler self.cmd_channel._on_dtp_connection() def handle_timeout(self): if self.cmd_channel.connected: msg = "Active data channel timed out." self.cmd_channel.respond("421 " + msg, logfun=logger.info) self.cmd_channel.log_cmd( self._cmd, self._normalized_addr, 421, msg) self.close() def handle_close(self): if not self._closed: self.close() if self.cmd_channel.connected: msg = "Can't connect to specified address." self.cmd_channel.respond("425 " + msg) self.cmd_channel.log_cmd( self._cmd, self._normalized_addr, 425, msg) def handle_error(self): try: raise except (socket.gaierror, socket.error): pass except Exception: self.log_exception(self) try: self.handle_close() except Exception: logger.critical(traceback.format_exc()) def close(self): debug("call: close()", inst=self) if not self._closed: Connector.close(self) if self._idler is not None and not self._idler.cancelled: self._idler.cancel() class DTPHandler(AsyncChat): timeout = 300 ac_in_buffer_size = 65536 ac_out_buffer_size = 65536 def __init__(self, sock, cmd_channel): self.cmd_channel = cmd_channel self.file_obj = None self.receive = False self.transfer_finished = False self.tot_bytes_sent = 0 self.tot_bytes_received = 0 self.cmd = None self.log = cmd_channel.log self.log_exception = cmd_channel.log_exception self._data_wrapper = None self._lastdata = 0 self._had_cr = False self._start_time = timer() self._resp = () self._offset = None self._filefd = None self._idler = None self._initialized = False try: AsyncChat.__init__(self, sock, ioloop=cmd_channel.ioloop) except socket.error as err: AsyncChat.__init__( self, socket.socket(), ioloop=cmd_channel.ioloop) self.close() if err.errno == errno.EINVAL: return self.handle_error() return if not self.connected: self.close() return if self.timeout: self._idler = self.ioloop.call_every(self.timeout, self.handle_timeout, _errback=self.handle_error) def __repr__(self): return '<%s(%s)>' % (self.__class__.__name__, self.cmd_channel.get_repr_info(as_str=True)) __str__ = __repr__ def use_sendfile(self): if not self.cmd_channel.use_sendfile: return False if self.file_obj is None or not hasattr(self.file_obj, "fileno"): return False try: self.file_obj.fileno() except OSError: return False if self.cmd_channel._current_type != 'i': return False return True def push(self, data): self._initialized = True self.modify_ioloop_events(self.ioloop.WRITE) self._wanted_io_events = self.ioloop.WRITE AsyncChat.push(self, data) def push_with_producer(self, producer): self._initialized = True self.modify_ioloop_events(self.ioloop.WRITE) self._wanted_io_events = self.ioloop.WRITE if self.use_sendfile(): self._offset = producer.file.tell() self._filefd = self.file_obj.fileno() try: self.initiate_sendfile() except _GiveUpOnSendfile: pass else: self.initiate_send = self.initiate_sendfile return debug("starting transfer using send()", self) AsyncChat.push_with_producer(self, producer) def close_when_done(self): asynchat.async_chat.close_when_done(self) def initiate_send(self): asynchat.async_chat.initiate_send(self) def initiate_sendfile(self): try: sent = sendfile(self._fileno, self._filefd, self._offset, self.ac_out_buffer_size) except OSError as err: if err.errno in _ERRNOS_RETRY or err.errno == errno.EBUSY: return elif err.errno in _ERRNOS_DISCONNECTED: self.handle_close() else: if self.tot_bytes_sent == 0: logger.warning( "sendfile() failed; falling back on using plain send") raise _GiveUpOnSendfile else: raise else: if sent == 0: self.discard_buffers() self.handle_close() else: self._offset += sent self.tot_bytes_sent += sent def _posix_ascii_data_wrapper(self, chunk): if self._had_cr: chunk = b'\r' + chunk if chunk.endswith(b'\r'): self._had_cr = True chunk = chunk[:-1] else: self._had_cr = False return chunk.replace(b'\r\n', b(os.linesep)) def enable_receiving(self, type, cmd): self._initialized = True self.modify_ioloop_events(self.ioloop.READ) self._wanted_io_events = self.ioloop.READ self.cmd = cmd if type == 'a': if os.linesep == '\r\n': self._data_wrapper = None else: self._data_wrapper = self._posix_ascii_data_wrapper elif type == 'i': self._data_wrapper = None else: raise TypeError("unsupported type") self.receive = True def get_transmitted_bytes(self): return self.tot_bytes_sent + self.tot_bytes_received def get_elapsed_time(self): return timer() - self._start_time
MIT License
cupy/cupy
cupyx/scipy/ndimage/morphology.py
binary_erosion
python
def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False): return _binary_erosion(input, structure, iterations, mask, output, border_value, origin, 0, brute_force)
Multidimensional binary erosion with a given structuring element. Binary erosion is a mathematical morphology operation used for image processing. Args: input(cupy.ndarray): The input binary array_like to be eroded. Non-zero (True) elements form the subset to be eroded. structure(cupy.ndarray, optional): The structuring element used for the erosion. Non-zero elements are considered True. If no structuring element is provided an element is generated with a square connectivity equal to one. (Default value = None). iterations(int, optional): The erosion is repeated ``iterations`` times (one, by default). If iterations is less than 1, the erosion is repeated until the result does not change anymore. Only an integer of iterations is accepted. mask(cupy.ndarray or None, optional): If a mask is given, only those elements with a True value at the corresponding mask element are modified at each iteration. (Default value = None) output(cupy.ndarray, optional): Array of the same shape as input, into which the output is placed. By default, a new array is created. border_value(int (cast to 0 or 1), optional): Value at the border in the output array. (Default value = 0) origin(int or tuple of ints, optional): Placement of the filter, by default 0. brute_force(boolean, optional): Memory condition: if False, only the pixels whose value was changed in the last iteration are tracked as candidates to be updated (eroded) in the current iteration; if True all pixels are considered as candidates for erosion, regardless of what happened in the previous iteration. Returns: cupy.ndarray: The result of binary erosion. .. warning:: This function may synchronize the device. .. seealso:: :func:`scipy.ndimage.binary_erosion`
https://github.com/cupy/cupy/blob/a466b03ef0afd7c1ce1615e3f48da64ae38c1320/cupyx/scipy/ndimage/morphology.py#L265-L308
import operator import warnings import numpy import cupy from cupyx.scipy.ndimage import _filters_core from cupyx.scipy.ndimage import _util from cupyx.scipy.ndimage import filters @cupy.memoize(for_each_device=True) def _get_binary_erosion_kernel( w_shape, int_type, offsets, center_is_true, border_value, invert, masked, all_weights_nonzero ): if invert: border_value = int(not border_value) true_val = 0 false_val = 1 else: true_val = 1 false_val = 0 if masked: pre = """ bool mv = (bool)mask[i]; bool _in = (bool)x[i]; if (!mv) {{ y = cast<Y>(_in); return; }} else if ({center_is_true} && _in == {false_val}) {{ y = cast<Y>(_in); return; }}""".format(center_is_true=int(center_is_true), false_val=false_val) else: pre = """ bool _in = (bool)x[i]; if ({center_is_true} && _in == {false_val}) {{ y = cast<Y>(_in); return; }}""".format(center_is_true=int(center_is_true), false_val=false_val) pre = pre + """ y = cast<Y>({true_val});""".format(true_val=true_val) found = """ if ({{cond}}) {{{{ if (!{border_value}) {{{{ y = cast<Y>({false_val}); return; }}}} }}}} else {{{{ bool nn = {{value}} ? {true_val} : {false_val}; if (!nn) {{{{ y = cast<Y>({false_val}); return; }}}} }}}}""".format(true_val=int(true_val), false_val=int(false_val), border_value=int(border_value),) name = 'binary_erosion' if false_val: name += '_invert' return _filters_core._generate_nd_kernel( name, pre, found, '', 'constant', w_shape, int_type, offsets, 0, ctype='Y', has_weights=True, has_structure=False, has_mask=masked, binary_morphology=True, all_weights_nonzero=all_weights_nonzero) def _center_is_true(structure, origin): coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, origin)]) return bool(structure[coor]) def iterate_structure(structure, iterations, origin=None): if iterations < 2: return structure.copy() ni = iterations - 1 shape = [ii + ni * (ii - 1) for ii in structure.shape] pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))] slc = tuple( slice(pos[ii], pos[ii] + structure.shape[ii], None) for ii in range(len(shape)) ) out = cupy.zeros(shape, bool) out[slc] = structure != 0 out = binary_dilation(out, structure, iterations=ni) if origin is None: return out else: origin = _util._fix_sequence_arg(origin, structure.ndim, 'origin', int) origin = [iterations * o for o in origin] return out, origin def generate_binary_structure(rank, connectivity): if connectivity < 1: connectivity = 1 if rank < 1: return cupy.asarray(True, dtype=bool) output = numpy.fabs(numpy.indices([3] * rank) - 1) output = numpy.add.reduce(output, 0) output = output <= connectivity return cupy.asarray(output) def _binary_erosion(input, structure, iterations, mask, output, border_value, origin, invert, brute_force=True): try: iterations = operator.index(iterations) except TypeError: raise TypeError('iterations parameter should be an integer') if input.dtype.kind == 'c': raise TypeError('Complex type not supported') if structure is None: structure = generate_binary_structure(input.ndim, 1) all_weights_nonzero = input.ndim == 1 center_is_true = True default_structure = True else: structure = structure.astype(dtype=bool, copy=False) default_structure = False if structure.ndim != input.ndim: raise RuntimeError('structure and input must have same dimensionality') if not structure.flags.c_contiguous: structure = cupy.ascontiguousarray(structure) if structure.size < 1: raise RuntimeError('structure must not be empty') if mask is not None: if mask.shape != input.shape: raise RuntimeError('mask and input must have equal sizes') if not mask.flags.c_contiguous: mask = cupy.ascontiguousarray(mask) masked = True else: masked = False origin = _util._fix_sequence_arg(origin, input.ndim, 'origin', int) if isinstance(output, cupy.ndarray): if output.dtype.kind == 'c': raise TypeError('Complex output type not supported') else: output = bool output = _util._get_output(output, input) temp_needed = cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS') if temp_needed: temp = output output = _util._get_output(output.dtype, input) if structure.ndim == 0: if float(structure): output[...] = cupy.asarray(input, dtype=bool) else: output[...] = ~cupy.asarray(input, dtype=bool) return output origin = tuple(origin) int_type = _util._get_inttype(input) offsets = _filters_core._origins_to_offsets(origin, structure.shape) if not default_structure: nnz = int(cupy.count_nonzero(structure)) all_weights_nonzero = nnz == structure.size if all_weights_nonzero: center_is_true = True else: center_is_true = _center_is_true(structure, origin) erode_kernel = _get_binary_erosion_kernel( structure.shape, int_type, offsets, center_is_true, border_value, invert, masked, all_weights_nonzero, ) if iterations == 1: if masked: output = erode_kernel(input, structure, mask, output) else: output = erode_kernel(input, structure, output) elif center_is_true and not brute_force: raise NotImplementedError( 'only brute_force iteration has been implemented' ) else: if cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS'): raise ValueError('output and input may not overlap in memory') tmp_in = cupy.empty_like(input, dtype=output.dtype) tmp_out = output if iterations >= 1 and not iterations & 1: tmp_in, tmp_out = tmp_out, tmp_in if masked: tmp_out = erode_kernel(input, structure, mask, tmp_out) else: tmp_out = erode_kernel(input, structure, tmp_out) changed = not (input == tmp_out).all() ii = 1 while ii < iterations or ((iterations < 1) and changed): tmp_in, tmp_out = tmp_out, tmp_in if masked: tmp_out = erode_kernel(tmp_in, structure, mask, tmp_out) else: tmp_out = erode_kernel(tmp_in, structure, tmp_out) changed = not (tmp_in == tmp_out).all() ii += 1 if not changed and (not ii & 1): break output = tmp_out if temp_needed: temp[...] = output output = temp return output
MIT License
initstring/linkedin2username
linkedin2username.py
login
python
def login(args): session = requests.session() if args.proxy: print(PC.warn_box + "Using a proxy, ignoring SSL errors." " Don't get pwned.") session.verify = False from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) session.proxies.update(args.proxy_dict) mobile_agent = ('Mozilla/5.0 (Linux; U; Android 4.4.2; en-us; SCH-I535 ' 'Build/KOT49H) AppleWebKit/534.30 (KHTML, like Gecko) ' 'Version/4.0 Mobile Safari/534.30') session.headers.update({'User-Agent': mobile_agent, 'X-RestLi-Protocol-Version': '2.0.0'}) anon_response = session.get('https://www.linkedin.com/login') login_csrf = re.findall(r'name="loginCsrfParam" value="(.*?)"', anon_response.text) if login_csrf: login_csrf = login_csrf[0] else: print("Having trouble loading login page... try the command again.") sys.exit() auth_payload = { 'session_key': args.username, 'session_password': args.password, 'isJsEnabled': 'false', 'loginCsrfParam': login_csrf } response = session.post('https://www.linkedin.com/checkpoint/lg/login-submit' '?loginSubmitSource=GUEST_HOME', data=auth_payload, allow_redirects=False) if response.status_code == 302 or response.status_code == 303: redirect = response.headers['Location'] if 'feed' in redirect: print(PC.ok_box + "Successfully logged in.\n") return session if 'challenge' in redirect: print(PC.warn_box + "LinkedIn doesn't like something about this" " login. Maybe you're being sneaky on a VPN or something." " You may get an email with a verification token. You can" " ignore the email. Log in from a web browser and try" " again.\n") return False if 'captcha' in redirect: print(PC.warn_box + "You've triggered a CAPTCHA. Oops. Try logging" " in with your web browser first and come back later.") return False if 'add-phone' in redirect: print(PC.warn_box + "LinkedIn is prompting to add your phone" " number to your profile. Please handle that in the web and" " then try again.") return False if 'manage-account' in redirect: print(PC.warn_box + "LinkedIn has some account notification for you" " to check. Please log in first via the web and clear that.") return False if 'add-email' in redirect: print(PC.warn_box + "LinkedIn wants you to add an email address to" " your account. Log in via the web first and do that.") return False print(PC.warn_box + "Some unknown redirection occurred. If this" " persists, please open an issue on github wih the DEBUG" " message below:\n") print("DEBUG INFO:") print("LOCATION: {}".format(redirect)) print("RESPONSE TEXT:\n{}".format(response.text)) return False if '<title>LinkedIn Login' in response.text: print(PC.warn_box + "You've been returned to a login page. Check your" " username and password and try again.\n") return False print(PC.warn_box + "Some unknown error logging in. If this persists," "please open an issue on github.\n") print("DEBUG INFO:") print("RESPONSE CODE: {}".format(response.status_code)) print("RESPONSE TEXT:\n{}".format(response.text)) return False
Creates a new authenticated session. Note that a mobile user agent is used. Parsing using the desktop results proved extremely difficult, as shared connections would be returned in a manner that was indistinguishable from the desired targets. The other header matters as well, otherwise advanced search functions (region and keyword) will not work. The function will check for common failure scenarios - the most common is logging in from a new location. Accounts using multi-factor auth are not yet supported and will produce an error.
https://github.com/initstring/linkedin2username/blob/0ff92f6f7459180d3a54f6553d2e527b5809ea32/linkedin2username.py#L198-L312
import os import sys import re import time import argparse import getpass from distutils.version import StrictVersion import urllib.parse import requests CURRENT_REL = '0.21' BANNER = r""" .__ .__________ | | |__\_____ \ __ __ | | | |/ ____/| | \ | |_| / \| | / |____/__\_______ \____/ linkedin2username Spray away. github.com/initstring """ GEO_REGIONS = { 'r0':'us:0', 'r1':'ca:0', 'r2':'gb:0', 'r3':'au:0|nz:0', 'r4':'cn:0|hk:0', 'r5':'jp:0|kr:0|my:0|np:0|ph:0|sg:0|lk:0|tw:0|th:0|vn:0', 'r6':'in:0', 'r7':'at:0|be:0|bg:0|hr:0|cz:0|dk:0|fi:0', 'r8':'fr:0|de:0', 'r9':'gr:0|hu:0|ie:0|it:0|lt:0|nl:0|no:0|pl:0|pt:0', 'r10':'ro:0|ru:0|rs:0|sk:0|es:0|se:0|ch:0|tr:0|ua:0', 'r11':('ar:0|bo:0|br:0|cl:0|co:0|cr:0|do:0|ec:0|gt:0|mx:0|pa:0|pe:0' '|pr:0|tt:0|uy:0|ve:0'), 'r12':'af:0|bh:0|il:0|jo:0|kw:0|pk:0|qa:0|sa:0|ae:0'} if sys.version_info < (3, 0): print("\nSorry mate, you'll need to use Python 3+ on this one...\n") sys.exit(1) class PC: green = '\033[92m' blue = '\033[94m' orange = '\033[93m' endc = '\033[0m' ok_box = blue + '[*] ' + endc note_box = green + '[+] ' + endc warn_box = orange + '[!] ' + endc def parse_arguments(): desc = ('OSINT tool to generate lists of probable usernames from a' ' given company\'s LinkedIn page. This tool may break when' ' LinkedIn changes their site. Please open issues on GitHub' ' to report any inconsistencies, and they will be quickly fixed.') parser = argparse.ArgumentParser(description=desc) parser.add_argument('-u', '--username', type=str, action='store', required=True, help='A valid LinkedIn username.') parser.add_argument('-c', '--company', type=str, action='store', required=True, help='Company name exactly as typed in the company ' 'linkedin profile page URL.') parser.add_argument('-p', '--password', type=str, action='store', help='Specify your password in clear-text on the ' 'command line. If not specified, will prompt and ' 'obfuscate as you type.') parser.add_argument('-n', '--domain', type=str, action='store', default='', help='Append a domain name to username output. ' '[example: "-n uber.com" would output jschmoe@uber.com]' ) parser.add_argument('-d', '--depth', type=int, action='store', default=False, help='Search depth (how many loops of 25). If unset, ' 'will try to grab them all.') parser.add_argument('-s', '--sleep', type=int, action='store', default=0, help='Seconds to sleep between search loops.' ' Defaults to 0.') parser.add_argument('-x', '--proxy', type=str, action='store', default=False, help='Proxy server to use. WARNING: WILL DISABLE SSL ' 'VERIFICATION. [example: "-p https://localhost:8080"]') parser.add_argument('-k', '--keywords', type=str, action='store', default=False, help='Filter results by a a list of command separated ' 'keywords. Will do a separate loop for each keyword, ' 'potentially bypassing the 1,000 record limit. ' '[example: "-k \'sales,human resources,information ' 'technology\']') parser.add_argument('-g', '--geoblast', default=False, action="store_true", help='Attempts to bypass the 1,000 record search limit' ' by running multiple searches split across geographic' ' regions.') args = parser.parse_args() args.proxy_dict = {"https" : args.proxy} if args.domain: args.domain = '@' + args.domain if args.keywords: args.keywords = args.keywords.split(',') if args.keywords and args.geoblast: print("Sorry, keywords and geoblast are currently not compatible. " "Use one or the other.") sys.exit() args.password = args.password or getpass.getpass() return args def check_li2u_version(): latest_rel_regex = r'/initstring/linkedin2username/tree/(.*?)"' session = requests.session() rel_url = 'https://github.com/initstring/linkedin2username/releases' rel_chars = re.compile(r'[^0-9\.]') response = session.get(rel_url) latest_rel = re.findall(latest_rel_regex, response.text) if latest_rel[0]: latest_rel = rel_chars.sub('', latest_rel[0]) else: return if CURRENT_REL == latest_rel: print("") print(PC.ok_box + "Using version {}, which is the latest on" " GitHub.\n".format(CURRENT_REL)) return if StrictVersion(CURRENT_REL) > StrictVersion(latest_rel): print("") print(PC.warn_box + "Using version {}, which is NEWER than {}, the" " latest official release. Good luck!\n" .format(CURRENT_REL, latest_rel)) return if StrictVersion(CURRENT_REL) < StrictVersion(latest_rel): print("") print(PC.warn_box + "You are using {}, but {} is available.\n" " LinkedIn changes often - this version may not work.\n" " https://github.com/initstring/linkedin2username.\n" .format(CURRENT_REL, latest_rel)) return
MIT License
lcx366/ggtools
ggtools/gg/landmask.py
landmask
python
def landmask(lmax,trunc_lat=None): home = getenv('HOME') direc = home + '/src/etopo-data/' etopo_file = 'etopo5.nc' url = 'https://raw.githubusercontent.com/dcherian/tools/master/ROMS/arango/bathymetry/etopo5.nc' if not path.exists(direc): makedirs(direc) print('Downloading the ETOPO5 Earth Surface Topography Data Set',end=' ... ') urlretrieve(url, direc + etopo_file) print('Finished') ds = xr.open_dataset(direc + etopo_file) topo_grids = np.flip(np.array(ds['topo']),0)[:-1,:] lats = np.flip(np.array(ds['topo_lat']))[:-1] topo_grids_class = SHGrid.from_array(topo_grids) topo_coeffs_class = topo_grids_class.expand(lmax_calc=lmax) topo_grids_class = topo_coeffs_class.expand() topo_grids = topo_grids_class.data lats = topo_grids_class.lats() land_mask = topo_grids > 0 if trunc_lat is not None: land_mask[lats < trunc_lat] = False return land_mask
Establish a land window function based on the global terrain data ETOPO5. The land area has a value of 1 and the sea area has a value of 0. To set the value of the Antarctica region to 0, just set trunc_lat to -60. Usage: land_win = land_mask(lmax) land_win = land_mask(lmax,-60) Inputs: lmax -> [int] Degree of the spherical harmonic expansion Parameters: trunc_lat -> [optional, float, default = None] Truncated latitude. If None, the global land area will be assigned a value of 1, otherwise, only the land area north of the truncated latitude will be assigned a value of 1, and the remaining regions will be assigned a value of 0. Outputs: land_win [bool, 2d array] land window function. The grid satisfies the sampling theorem of Driscoll and Healy (1994). For more information, please refer to https://shtools.oca.eu/shtools/public/pyshexpanddh.html
https://github.com/lcx366/ggtools/blob/7909da988d90de50c82532d97121a3fbcfc0263a/ggtools/gg/landmask.py#L7-L57
import numpy as np import xarray as xr from os import getenv,path,makedirs from urllib.request import urlretrieve from pyshtools.shclasses import SHCoeffs,SHGrid
MIT License
adafruit/adafruit_python_charlcd
examples/char_lcd_rgb_pwm.py
hsv_to_rgb
python
def hsv_to_rgb(hsv): h, s, v = hsv if s == 0: return (v, v, v) h /= 60.0 i = math.floor(h) f = h-i p = v*(1.0-s) q = v*(1.0-s*f) t = v*(1.0-s*(1.0-f)) if i == 0: return (v, t, p) elif i == 1: return (q, v, p) elif i == 2: return (p, v, t) elif i == 3: return (p, q, v) elif i == 4: return (t, p, v) else: return (v, p, q)
Converts a tuple of hue, saturation, value to a tuple of red, green blue. Hue should be an angle from 0.0 to 359.0. Saturation and value should be a value from 0.0 to 1.0, where saturation controls the intensity of the hue and value controls the brightness.
https://github.com/adafruit/adafruit_python_charlcd/blob/c126e6b673074c12a03f4bd36afb2fe40272341e/examples/char_lcd_rgb_pwm.py#L9-L36
import math import time import Adafruit_CharLCD as LCD
MIT License
hopeit-git/hopeit.engine
engine/src/hopeit/server/config.py
replace_env_vars
python
def replace_env_vars(config_json: str) -> str: result = config_json env_re = re.compile('\\${([^}{]+)}', re.IGNORECASE) for match in env_re.finditer(result): expr = match.group(0) var_name = match.group(1) value = os.getenv(var_name.upper()) if value: result = result.replace(expr, value) missing_env_vars = env_re.findall(result) assert len(missing_env_vars) == 0, f"Cannot get value from OS environment vars: {missing_env_vars}" return result
Replaces env variables matching form ${VAR_NAME} with its string value :param config_json: input configuratio json as string :return: str, with replaced values :raise: AssertionError if variables matching ${VAR_NAME} form are not replaced
https://github.com/hopeit-git/hopeit.engine/blob/79137e483a9577f336524f7e3b22a26a8d5a7ea3/engine/src/hopeit/server/config.py#L121-L141
from enum import Enum from typing import TypeVar, List, Optional import re import os from dataclasses import dataclass, field from hopeit.dataobjects import dataobject from hopeit.server.names import auto_path_prefixed from hopeit.server.version import ENGINE_VERSION __all__ = ['StreamsConfig', 'LoggingConfig', 'AuthType', 'AuthConfig', 'ServerConfig', 'parse_server_config_json', 'replace_env_vars', 'replace_config_args'] DEFAULT_STR = "<<DEFAULT>>" ConfigType = TypeVar("ConfigType") @dataobject @dataclass class StreamsConfig: stream_manager: str = "hopeit.streams.NoStreamManager" connection_str: str = '<<NoStreamManager>>' delay_auto_start_seconds: int = 3 @dataobject @dataclass class LoggingConfig: log_level: str = 'INFO' log_path: str = 'logs/' class AuthType(str, Enum): UNSECURED = 'Unsecured' BASIC = 'Basic' BEARER = 'Bearer' REFRESH = 'Refresh' @dataobject @dataclass class AuthConfig: secrets_location: str auth_passphrase: str enabled: bool = True create_keys: bool = False domain: Optional[str] = None encryption_algorithm: str = 'RS256' default_auth_methods: List[AuthType] = field(default_factory=list) def __post_init__(self): if len(self.default_auth_methods) == 0: self.default_auth_methods.append(AuthType.UNSECURED) @staticmethod def no_auth(): return AuthConfig('.secrets/', '', enabled=False) @dataobject @dataclass class APIConfig: docs_path: Optional[str] = None @dataobject @dataclass class ServerConfig: streams: StreamsConfig = field(default_factory=StreamsConfig) logging: LoggingConfig = field(default_factory=LoggingConfig) auth: AuthConfig = field(default_factory=AuthConfig.no_auth) api: APIConfig = field(default_factory=APIConfig) engine_version: str = field(default=ENGINE_VERSION) def __post_init__(self): self.engine_version = ENGINE_VERSION def parse_server_config_json(config_json: str) -> ServerConfig: effective_json = replace_env_vars(config_json) parsed_config = ServerConfig.from_json(effective_json) replace_config_args( parsed_config=parsed_config, config_classes=tuple([StreamsConfig]) ) return parsed_config
Apache License 2.0
chrisb2/water-system
file_logger.py
File.close_log
python
def close_log(): if File.__instance is not None: File.__instance.__close_file() File.__instance = None File.__logger = None
Static method to close the file logger.
https://github.com/chrisb2/water-system/blob/df70f0ee8c15051bb8ee5fe05fca484d81135a13/file_logger.py#L20-L25
import io import logging class File: __logger = None __instance = None @staticmethod def logger(): if File.__logger is None: File() return File.__logger @staticmethod
MIT License
mikkelschubert/paleomix
paleomix/common/makefile.py
_safe_coerce_to_lowercase
python
def _safe_coerce_to_lowercase(value): if isinstance(value, str): return value.lower() return value
Returns strings as lowercase, and any other types of value unchanged.
https://github.com/mikkelschubert/paleomix/blob/5c6414060088ba178ff1c400bdbd45d2f6b1aded/paleomix/common/makefile.py#L828-L832
import copy import logging from typing import Any, Dict, Optional, Tuple import paleomix.common.yaml as yaml from paleomix.common.fileutils import fspath from paleomix.common.utilities import group_by_pred class MakefileError(RuntimeError): def read_makefile(filename: str, specification: Dict[Any, Any]): try: with open(fspath(filename)) as handle: data = yaml.safe_load(handle) except (yaml.YAMLError, yaml.DuplicateKeyError) as error: raise MakefileError(error) return process_makefile(data, specification) def process_makefile( data: Any, specification: Any, path: Tuple[str, ...] = (), apply_defaults: bool = True, ) -> Any: if isinstance(specification, WithoutDefaults): specification = specification.specification data = process_makefile(data, specification, path, apply_defaults=False) elif isinstance(specification, PreProcessMakefile): data, specification = specification(path, data) data = process_makefile(data, specification, path, apply_defaults) elif _is_spec(specification): _instantiate_spec(specification)(path, data) elif isinstance(data, (dict, type(None))) and isinstance(specification, dict): if data is None: data = {} _process_default_values(data, specification, path, apply_defaults) for cur_key in data: ref_key = _get_matching_spec_or_value( cur_key, specification, path + (cur_key,) ) data[cur_key] = process_makefile( data[cur_key], specification[ref_key], path + (cur_key,), apply_defaults ) elif isinstance(data, (list, type(None))) and isinstance(specification, list): if not all(map(_is_spec, specification)): raise TypeError( "Lists contains non-specification objects (%r): %r" % (_path_to_str(path), specification) ) elif data is None: data = [] specification = IsListOf(*specification) _instantiate_spec(specification)(path, data) elif not isinstance(specification, (dict, list)): raise TypeError( "Unexpected type in makefile specification at %r: %r!" % (_path_to_str(path), specification) ) else: raise MakefileError( "Inconsistency between makefile specification and " "current makefile at %s:\n Expected %s, " "found %s %r!" % ( _path_to_str(path), type(specification).__name__, type(data).__name__, data, ) ) return data DEFAULT_NOT_SET = object() REQUIRED_VALUE = object() class WithoutDefaults: def __init__(self, specification: Any): self.specification = specification class PreProcessMakefile: def __call__(self, path, value): raise NotImplementedError class MakefileSpec: def __init__( self, description: str = "N/A", default: Any = DEFAULT_NOT_SET, ): self.description = description self.default = default if (default not in (DEFAULT_NOT_SET, REQUIRED_VALUE)) and not self.meets_spec( default ): raise ValueError( ( "Default value does not meet requirements:\n" " Expected value: %s\n" " Observed value: %r\n" ) % (description, default) ) def __call__(self, path, value): if not self.meets_spec(value): raise MakefileError( ( "Makefile requirement not met at %r:\n" " Expected value: %s\n" " Observed value: %r\n" " Observed type: %s" ) % (_path_to_str(path), self.description, value, type(value).__name__) ) def meets_spec(self, _value): raise NotImplementedError class IsAny(MakefileSpec): def __init__(self, description: str = "any value", default: Any = DEFAULT_NOT_SET): MakefileSpec.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: return True class IsInt(MakefileSpec): def __init__(self, description: str = "an integer", default: Any = DEFAULT_NOT_SET): MakefileSpec.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: return isinstance(value, int) and not isinstance(value, bool) class IsUnsignedInt(IsInt): def __init__( self, description: str = "an unsigned integer", default: Any = DEFAULT_NOT_SET ): IsInt.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: return IsInt.meets_spec(self, value) and value >= 0 class IsFloat(MakefileSpec): def __init__(self, description: str = "a float", default: Any = DEFAULT_NOT_SET): MakefileSpec.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: return isinstance(value, float) class IsBoolean(MakefileSpec): def __init__(self, description: str = "a boolean", default: Any = DEFAULT_NOT_SET): MakefileSpec.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: return isinstance(value, bool) class IsStr(MakefileSpec): def __init__( self, description: Optional[str] = None, default: Any = DEFAULT_NOT_SET, min_len: int = 1, ): if description is None: if min_len == 0: description = "a string" elif min_len == 1: description = "a non-empty string" elif min_len >= 2: description = "a string at least %s characters long" % (min_len,) else: raise ValueError("min_len must be non-negative") self._min_len = min_len MakefileSpec.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: return isinstance(value, str) and len(value) >= self._min_len class IsNone(MakefileSpec): def __init__( self, description: str = "null or not set", default: Any = DEFAULT_NOT_SET ): if default is not DEFAULT_NOT_SET: raise NotImplementedError("IsNone does not support default values") MakefileSpec.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: return value is None class ValueMissing(MakefileSpec): def __init__(self, description: str = "no values"): MakefileSpec.__init__(self, description, DEFAULT_NOT_SET) def meets_spec(self, _value): return False class DeprecatedOption(MakefileSpec): def __init__(self, spec): self._spec = spec if not isinstance(spec, MakefileSpec): raise ValueError(spec) MakefileSpec.__init__(self, spec.description, spec.default) def __call__(self, path, value): self._spec(path, value) log = logging.getLogger(__name__) log.warning( "option has been deprecated and will be removed in the future: %s", _path_to_str(path), ) def meets_spec(self, value: Any) -> bool: return self._spec.meets_spec(value) class RemovedOption(MakefileSpec): def __init__(self, description: str = "removed settings"): MakefileSpec.__init__(self, description, DEFAULT_NOT_SET) def __call__(self, path, _value): log = logging.getLogger(__name__) log.warning( "option has been removed and no longer has any effect: %s", _path_to_str(path), ) def meets_spec(self, _value): return True class _BinaryOperator(MakefileSpec): def __init__(self, description, default, opfunc, rvalue, key=None, list_kword=None): self._operator = opfunc self._keyfunc = key self._rvalue = rvalue rvalue_repr = _list_values(rvalue, list_kword) if list_kword else rvalue description = description.format(rvalue=rvalue_repr) MakefileSpec.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: if self._keyfunc is not None: value = self._keyfunc(value) return self._operator(value, self._rvalue) class ValueIn(_BinaryOperator): def __init__( self, rvalues, key=None, description: str = "value in {rvalue}", default: Any = DEFAULT_NOT_SET, ): description = description.format(rvalue=_list_values(rvalues, "or")) _BinaryOperator.__init__( self, description=description, default=default, opfunc=self._in_operator, rvalue=rvalues, key=key, ) def _in_operator(self, lvalue, rvalues): return _is_hashable(lvalue) and lvalue in rvalues class ValuesIntersect(_BinaryOperator): def __init__( self, rvalues, key=None, description: str = None, default: Any = DEFAULT_NOT_SET ): if not description: description = "one or more of %s" % (_list_values(rvalues, "and"),) _BinaryOperator.__init__( self, description=description, default=default, opfunc=self._operator, rvalue=rvalues, key=key, ) def _operator(self, lvalue, rvalues): try: return not isinstance(lvalue, dict) and bool( frozenset(lvalue).intersection(rvalues) ) except TypeError: return False class ValuesSubsetOf(_BinaryOperator): def __init__( self, rvalues, key=None, description: str = None, default: Any = DEFAULT_NOT_SET ): description = description or "subset of %s" % (_list_values(rvalues, "and"),) _BinaryOperator.__init__( self, description=description, default=default, opfunc=self._operator, rvalue=rvalues, key=key, ) def _operator(self, lvalue, rvalues): try: return not isinstance(lvalue, dict) and bool( frozenset(lvalue).issubset(rvalues) ) except TypeError: return False class _MultipleSpecs(MakefileSpec): def __init__( self, specs, kwargs, name, prefix="", postfix="", join_by=" ", fmt="%s" ): self._specs = [_instantiate_spec(spec) for spec in specs] if not self._specs: raise ValueError("No specification given to %r" % (name.title(),)) elif not all((spc.default is DEFAULT_NOT_SET) for spc in self._specs): raise ValueError( "Default values cannot be set in specs given to logical operators" ) description = [(fmt % (spec.description,)) for spec in self._specs] description = "%s%s%s" % (prefix, join_by.join(description), postfix) default_value = kwargs.get("default", DEFAULT_NOT_SET) MakefileSpec.__init__(self, description, default_value) class And(_MultipleSpecs): def __init__(self, *specs, **kwargs): _MultipleSpecs.__init__(self, specs, kwargs, "And", join_by=" and ", fmt="(%s)") def meets_spec(self, value: Any) -> bool: return all(spec.meets_spec(value) for spec in self._specs) class Or(_MultipleSpecs): def __init__(self, *specs, **kwargs): _MultipleSpecs.__init__(self, specs, kwargs, "Or", join_by=" or ", fmt="(%s)") def meets_spec(self, value: Any) -> bool: return any(spec.meets_spec(value) for spec in self._specs) class Not(_MultipleSpecs): def __init__(self, spec, **kwargs): _MultipleSpecs.__init__(self, [spec], kwargs, "Not", prefix="not ", fmt="(%s)") def meets_spec(self, value: Any) -> bool: return not self._specs[0].meets_spec(value) class StringIn(_BinaryOperator): def __init__( self, rvalues, key=None, description: str = "one of {rvalue}, case-insentive", default: Any = DEFAULT_NOT_SET, ): description = description.format(rvalue=_list_values(rvalues, "or")) rvalues = frozenset(map(_safe_coerce_to_lowercase, rvalues)) _BinaryOperator.__init__( self, description, default, self._string_in_operator, rvalues ) @classmethod def _string_in_operator(cls, lvalue, rvalues): if not _is_hashable(lvalue): return False return _safe_coerce_to_lowercase(lvalue) in rvalues class StringStartsWith(IsStr): def __init__(self, prefix, default: Any = DEFAULT_NOT_SET): assert prefix and isinstance(prefix, str) self._prefix = prefix description = "a string with prefix %r" % (prefix,) IsStr.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: return super(StringStartsWith, self).meets_spec(value) and value.startswith( self._prefix ) class StringEndsWith(IsStr): def __init__(self, postfix, default: Any = DEFAULT_NOT_SET): assert postfix and isinstance(postfix, str) self._postfix = postfix description = "a string with postfix %r" % (postfix,) IsStr.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: return super(StringEndsWith, self).meets_spec(value) and value.endswith( self._postfix ) class IsListOf(_MultipleSpecs): def __init__(self, *specs, **kwargs): _MultipleSpecs.__init__( self, specs, kwargs, "IsListOf", prefix="[", postfix=", ...]", join_by=" or ", fmt="(%s)", ) def meets_spec(self, value: Any) -> bool: if not isinstance(value, list): return False return all( any(spec.meets_spec(lstvalue) for spec in self._specs) for lstvalue in value ) class IsDictOf(MakefileSpec): def __init__(self, key_spec, value_spec, default: Any = DEFAULT_NOT_SET): self._key_spec = _instantiate_spec(key_spec) self._value_spec = _instantiate_spec(value_spec) if self._key_spec.default is not DEFAULT_NOT_SET: raise ValueError("Default values cannot be set in key-specs") elif self._value_spec.default is not DEFAULT_NOT_SET: raise ValueError("Default values cannot be set in value-specs") description = "{(%s) : (%s)}" % ( self._key_spec.description, self._value_spec.description, ) MakefileSpec.__init__(self, description, default) def meets_spec(self, value: Any) -> bool: if not isinstance(value, dict): return False for (key, value) in value.items(): if not ( self._key_spec.meets_spec(key) and self._value_spec.meets_spec(value) ): return False return True def _is_hashable(value): try: hash(value) return True except TypeError: return False def _is_spec(spec: Any) -> bool: if isinstance(spec, MakefileSpec): return True elif isinstance(spec, type) and issubclass(spec, MakefileSpec): return True return False def _instantiate_spec(spec): if isinstance(spec, MakefileSpec): return spec elif isinstance(spec, type) and issubclass(spec, MakefileSpec): return spec() else: raise TypeError("Specifications must derive from 'MakefileSpec'")
MIT License
ratijas/dropsql
src/dropSQL/fs/table.py
Table._write
python
def _write(self, page: int, offset: int, record: bytes) -> None: chunk = record[:BLOCK_SIZE - offset] block = self.get_or_allocate_page(page) block.override(offset, chunk) self.storage.write_block(block) i = 1 while (i + 1) * BLOCK_SIZE - offset <= len(record): chunk = record[i * BLOCK_SIZE - offset:(i + 1) * BLOCK_SIZE - offset] block = self.get_or_allocate_page(page + i) block.override(0, chunk) self.storage.write_block(block) i += 1 chunk = record[i * BLOCK_SIZE - offset:] if chunk: block = self.get_or_allocate_page(page + i) block.override(0, chunk) self.storage.write_block(block)
Write down record (bytes) onto the page (virtual index) starting with given offset. Record may cross page boundary.
https://github.com/ratijas/dropsql/blob/936be7403fa409a7da1aa7e9f4afeeddc3f0a041/src/dropSQL/fs/table.py#L404-L433
import struct from typing import * from dropSQL.ast.column_def import ColumnDef from dropSQL.ast.ty import * from dropSQL.engine.types import * from dropSQL.generic import * from dropSQL.parser.tokens import Identifier from .block import * from .block_storage import BlockStorage RAW_TYPE = Union[bytes, int, float] LVL0 = DIRECT_POINTERS LVL1 = LVL0 + POINTERS_PER_BLOCK LVL2 = LVL1 + POINTERS_PER_BLOCK ** 2 LVL3 = LVL2 + POINTERS_PER_BLOCK ** 3 POINTERS_PER_LVL1 = POINTERS_PER_BLOCK POINTERS_PER_LVL2 = POINTERS_PER_BLOCK ** 2 POINTERS_PER_LVL3 = POINTERS_PER_BLOCK ** 3 class Descriptor: N_POINTERS = DIRECT_POINTERS + 3 __slots__ = ['table', 'block', 'name', 'pointers', 'records', 'columns'] def __init__(self, table: 'Table', block: Block, table_name: Identifier, pointers: List[int], records: int, columns: List[ColumnDef]) -> None: super().__init__() assert len(pointers) == Descriptor.N_POINTERS self.table = table self.block = block self.name = table_name self.pointers = pointers self.records = records self.columns = columns @classmethod def empty(cls, table: 'Table') -> 'Descriptor': return Descriptor(table, Block.empty(1 + table.index), Identifier(''), [0] * Descriptor.N_POINTERS, 0, []) @classmethod def decode(cls, table: 'Table') -> 'Descriptor': try: block = table.storage.read_block(1 + table.index) except AssertionError: return cls.empty(table) else: base = BLOCK_SIZE - (Descriptor.N_POINTERS * POINTER_SIZE) - POINTER_SIZE def get_pointer(b: Block, i: int) -> int: return int.from_bytes(b[base + i * POINTER_SIZE:base + (i + 1) * POINTER_SIZE], byteorder=BYTEORDER) table_name = Identifier((block.split(b'\0')[0]).decode("UTF-8"), True) pointers = [get_pointer(block, i) for i in range(Descriptor.N_POINTERS)] records = int.from_bytes(block[BLOCK_SIZE - POINTER_SIZE:], byteorder=BYTEORDER) columns: List[ColumnDef] = [] column_descriptors = block[block.find(b'\0') + 1: base] i = 0 while column_descriptors[i] != 0: null = column_descriptors.find(b'\0', i) name = column_descriptors[i:null].decode("UTF-8") ty = Ty.decode(column_descriptors[null + 1: null + 3]) columns.append(ColumnDef(Identifier(name), ty, False)) i = null + 3 return Descriptor(table, block, table_name, pointers, records, columns) def save(self) -> Result[None, str]: if self.table.descriptor() is not self: return Err('Descriptor is outdated') block = self.block i = 0 data = self.name.identifier.encode('UTF-8') block.override(i, data + b'\0') i += len(data) + 1 for col in self.columns: data = col.name.identifier.encode('UTF-8') block.override(i, data + b'\0') i += len(data) + 1 block.override(i, col.ty.encode()) i += 2 i = BLOCK_SIZE - POINTER_SIZE * (Descriptor.N_POINTERS + 1) for pointer in self.pointers: block.override(i, pointer.to_bytes(POINTER_SIZE, byteorder=BYTEORDER)) i += POINTER_SIZE block.override(i, self.records.to_bytes(POINTER_SIZE, byteorder=BYTEORDER)) self.table.storage.write_block(block) return Ok(None) class Table: def __init__(self, storage: BlockStorage, index: int) -> None: self.storage = storage self.index = index self._descriptor: Optional[Descriptor] = None def descriptor(self) -> Descriptor: if self._descriptor is None: self._descriptor = Descriptor.decode(self) return self._descriptor def get_table_name(self) -> Identifier: return self.descriptor().name def set_table_name(self, new_name: Identifier) -> None: descriptor = self.descriptor() descriptor.name = new_name descriptor.save().ok() def get_columns(self) -> List[ColumnDef]: return self.descriptor().columns def count_records(self) -> int: return self.descriptor().records def add_column(self, column: ColumnDef): assert self.count_records() == 0, 'Adding column to non-empty table' descriptor = self.descriptor() descriptor.columns.append(column) descriptor.save().ok() def get_or_allocate_page(self, index: int) -> Block: pointer = self._get_page_pointer(index) if pointer == 0: self._allocate_page(index) pointer = self._get_page_pointer(index) return self.storage.read_block(pointer) def _get_page_pointer(self, index: int) -> int: if 0 <= index < LVL0: pointer = self._get_lvl0(index) elif LVL0 <= index < LVL1: pointer = self._get_lvl1(index) elif LVL1 <= index < LVL2: pointer = self._get_lvl2(index) elif LVL2 <= index < LVL3: pointer = self._get_lvl3(index) else: pointer = 0 return pointer def _allocate_page(self, index: int) -> None: assert self._get_page_pointer(index) == 0 if 0 <= index < LVL0: self._allocate_lvl0(index) elif LVL0 <= index < LVL1: self._allocate_lvl1(index) elif LVL1 <= index < LVL2: self._allocate_lvl2(index) elif LVL2 <= index < LVL3: self._allocate_lvl3(index) def _allocate_lvl0(self, index: int) -> None: assert 0 <= index < LVL0 assert 0 == self._get_lvl0(index) lvl0 = self.storage.allocate_block().idx descriptor = self.descriptor() descriptor.pointers[index] = lvl0 descriptor.save().ok() def _allocate_lvl1(self, index: int) -> None: assert LVL0 <= index < LVL1 assert 0 == self._get_lvl1(index) index -= LVL0 descriptor = self.descriptor() lvl1 = descriptor.pointers[DIRECT_POINTERS] if lvl1 == 0: lvl1 = self.storage.allocate_block().idx descriptor.pointers[DIRECT_POINTERS] = lvl1 descriptor.save() self._allocate_last_mile(lvl1, index) def _allocate_lvl2(self, index: int) -> None: assert LVL1 <= index < LVL2 assert 0 == self._get_lvl2(index) index -= LVL1 descriptor = self.descriptor() lvl2 = descriptor.pointers[DIRECT_POINTERS + 1] if lvl2 == 0: lvl2 = self.storage.allocate_block().idx descriptor.pointers[DIRECT_POINTERS + 1] = lvl2 descriptor.save() block = self.storage.read_block(lvl2) lvl1 = block.get_pointer(index // POINTERS_PER_LVL1) if lvl1 == 0: lvl1 = self.storage.allocate_block().idx block.set_pointer(index // POINTERS_PER_LVL1, lvl1) self.storage.write_block(block) self._allocate_last_mile(lvl1, index) def _allocate_lvl3(self, index: int) -> None: assert LVL2 <= index < LVL3 assert 0 == self._get_lvl3(index) index -= LVL2 descriptor = self.descriptor() lvl3 = descriptor.pointers[DIRECT_POINTERS + 2] if lvl3 == 0: lvl3 = self.storage.allocate_block().idx descriptor.pointers[DIRECT_POINTERS + 2] = lvl3 descriptor.save() block = self.storage.read_block(lvl3) lvl2 = block.get_pointer(index // POINTERS_PER_LVL2) if lvl2 == 0: lvl2 = self.storage.allocate_block().idx block.set_pointer(index // POINTERS_PER_LVL2, lvl2) self.storage.write_block(block) block = self.storage.read_block(lvl2) lvl1 = block.get_pointer(index // POINTERS_PER_LVL1) if lvl1 == 0: lvl1 = self.storage.allocate_block().idx block.set_pointer(index // POINTERS_PER_LVL1, lvl1) self.storage.write_block(block) self._allocate_last_mile(lvl1, index) def _allocate_last_mile(self, pointer: int, index: int) -> None: lvl0 = self.storage.allocate_block().idx block = self.storage.read_block(pointer) block.set_pointer(index % POINTERS_PER_LVL1, lvl0) self.storage.write_block(block) def _get_lvl0(self, index: int) -> int: assert 0 <= index < LVL0 lvl0 = self.descriptor().pointers[index] return lvl0 def _get_lvl1(self, index: int) -> int: assert LVL0 <= index < LVL1 index -= LVL0 lvl1 = self.descriptor().pointers[DIRECT_POINTERS] if lvl1 == 0: return 0 block = self.storage.read_block(lvl1) lvl0 = block.get_pointer(index) return lvl0 def _get_lvl2(self, index: int) -> int: assert LVL1 <= index < LVL2 index -= LVL1 lvl2 = self.descriptor().pointers[DIRECT_POINTERS + 1] if lvl2 == 0: return 0 block = self.storage.read_block(lvl2) lvl1 = block.get_pointer(index // POINTERS_PER_LVL1) if lvl1 == 0: return 0 block = self.storage.read_block(lvl1) lvl0 = block.get_pointer(index % POINTERS_PER_LVL1) return lvl0 def _get_lvl3(self, index: int) -> int: assert LVL2 <= index < LVL3 index -= LVL2 lvl3 = self.descriptor().pointers[DIRECT_POINTERS + 2] if lvl3 == 0: return 0 block = self.storage.read_block(lvl3) lvl2 = block.get_pointer(index // POINTERS_PER_LVL2) if lvl2 == 0: return 0 block = self.storage.read_block(lvl2) lvl1 = block.get_pointer(index // POINTERS_PER_LVL1) if lvl1 == 0: return 0 block = self.storage.read_block(lvl1) lvl0 = block.get_pointer(index % POINTERS_PER_LVL1) return lvl0 def _increment_record_counter(self) -> int: descriptor = self.descriptor() rc = descriptor.records descriptor.records += 1 descriptor.save().ok() return rc def page_and_offset(self, record: int) -> (int, int): record_offset = self.record_size * record page = record_offset // BLOCK_SIZE offset = record_offset % BLOCK_SIZE return page, offset def _read_record(self, index: int) -> bytes: page, offset = self.page_and_offset(index) return self._read(page, offset, self.record_size) def _read(self, page: int, offset: int, n: int) -> bytes: record = bytearray() block = self.get_or_allocate_page(page) chunk = block[offset:min(offset + n, BLOCK_SIZE)] record.extend(chunk) first = len(chunk) i = 1 while first + (i + 1) * BLOCK_SIZE <= n: block = self.get_or_allocate_page(page + i) record.extend(block) i += 1 if len(record) < n: block = self.get_or_allocate_page(page + i) chunk = block[:n - len(record)] record.extend(chunk) assert len(record) == n return bytes(record) def _write_record(self, record: bytes, index: int) -> None: page, offset = self.page_and_offset(index) self._write(page, offset, record)
MIT License
aroberge/friendly-traceback
friendly/token_utils.py
dedent
python
def dedent(tokens, nb): line = untokenize(tokens) line = line[nb:] return tokenize(line)
Given a list of tokens, produces an equivalent list corresponding to a line of code with the first nb characters removed.
https://github.com/aroberge/friendly-traceback/blob/ea2164baf71f9595b16e685b3fd207056074d04d/friendly/token_utils.py#L355-L361
import ast import keyword import tokenize as py_tokenize import sys from io import StringIO from . import debug_helper _token_format = "type={type} string={string} start={start} end={end} line={line}" class Token: def __init__(self, token): self.type = token[0] self.string = token[1] self.start = self.start_row, self.start_col = token[2] self.end = self.end_row, self.end_col = token[3] self.line = token[4] def copy(self): return Token((self.type, self.string, self.start, self.end, self.line)) def __eq__(self, other): if hasattr(other, "string"): return self.string == other.string if isinstance(other, str): return self.string == other raise TypeError( "A token can only be compared to another token or to a string." ) def __repr__(self): return _token_format.format( type="%s (%s)" % (self.type, py_tokenize.tok_name[self.type]), string=repr(self.string), start=str(self.start), end=str(self.end), line=repr(self.line), ) def __str__(self): return self.string def is_comment(self): return self.type == py_tokenize.COMMENT def is_identifier(self): return self.string.isidentifier() and not self.is_keyword() def is_name(self): return self.type == py_tokenize.NAME def is_keyword(self): return keyword.iskeyword(self.string) or self.string in ["__debug__", "..."] def is_number(self): return self.type == py_tokenize.NUMBER def is_operator(self): return self.type == py_tokenize.OP def is_float(self): return self.is_number() and isinstance(ast.literal_eval(self.string), float) def is_integer(self): return self.is_number() and isinstance(ast.literal_eval(self.string), int) def is_complex(self): return self.is_number() and isinstance(ast.literal_eval(self.string), complex) def is_space(self): return self.type in ( py_tokenize.INDENT, py_tokenize.DEDENT, py_tokenize.NEWLINE, py_tokenize.NL, py_tokenize.ENDMARKER, ) def is_string(self): return self.type == py_tokenize.STRING def immediately_before(self, other): if not isinstance(other, Token): return False return self.end_row == other.start_row and self.end_col == other.start_col def immediately_after(self, other): if not isinstance(other, Token): return False return other.immediately_before(self) def is_assignment(op): ops = [ "=", "+=", "-=", "*=", "@=", "/=", "//=", "%=", "**=", ">>=", "<<=", "&=", "^=", "|=", ] if sys.version_info >= (3, 8): ops.append(":=") return op in ops or (hasattr(op, "string") and op.string in ops) def is_bitwise(op): ops = ["^", "&", "|", "<<", ">>", "~"] return op in ops or (hasattr(op, "string") and op.string in ops) def is_comparison(op): ops = ["<", ">", "<=", ">=", "==", "!="] return op in ops or (hasattr(op, "string") and op.string in ops) def is_math_op(op): ops = ["+", "-", "*", "**", "@", "/", "//", "%"] return op in ops or (hasattr(op, "string") and op.string in ops) def is_operator(op): part_ops = ["!", ":"] return ( is_assignment(op) or is_bitwise(op) or is_comparison(op) or is_math_op(op) or op in part_ops or (hasattr(op, "string") and op.string in part_ops) ) def fix_empty_line(source, tokens): nb = 0 for char in reversed(source): if char in (" ", "\t"): nb += 1 else: break tokens[-1].string = source[-nb:] def tokenize(source): tokens = [] try: for tok in py_tokenize.generate_tokens(StringIO(source).readline): token = Token(tok) tokens.append(token) except IndentationError as e: try: _, linenumber, col, line = e.args[1] type_ = py_tokenize.NAME start = (linenumber, col) end = (linenumber, len(line)) string = line[col:].strip() token = Token((type_, string, start, end, line)) tokens.append(token) return tokens except Exception as e: debug_helper.log( "after IndentationError, error from token_utils.tokenize()" ) debug_helper.log(repr(e)) return tokens except (py_tokenize.TokenError, Exception): return tokens if source.endswith((" ", "\t")): fix_empty_line(source, tokens) return tokens def get_significant_tokens(source): try: tokens = tokenize(source) except Exception as e: debug_helper.log("Exception from token_utils.get_significant_tokens()") debug_helper.log_error(e) return [] return remove_meaningless_tokens(tokens) def remove_meaningless_tokens(tokens): new_tokens = [] for tok in tokens: if not tok.string.strip() or tok.is_comment(): continue new_tokens.append(tok) return new_tokens def get_lines(source): lines = [] current_row = -1 new_line = [] try: for tok in py_tokenize.generate_tokens(StringIO(source).readline): token = Token(tok) if token.start_row != current_row: current_row = token.start_row if new_line: lines.append(new_line) new_line = [] new_line.append(token) lines.append(new_line) except (py_tokenize.TokenError, Exception): debug_helper.log("Exception raise in token_utils.get_lines") return lines if source.endswith((" ", "\t")): fix_empty_line(source, lines[-1]) return lines def strip_comment(line): tokens = [] try: for tok in py_tokenize.generate_tokens(StringIO(line).readline): token = Token(tok) if token.is_comment(): continue tokens.append(token) except py_tokenize.TokenError: pass return untokenize(tokens) def find_substring_index(main, substring): main_tokens = [tok.string for tok in get_significant_tokens(main)] sub_tokens = [tok.string for tok in get_significant_tokens(substring)] for index, token in enumerate(main_tokens): if token == sub_tokens[0]: for i, tok in enumerate(main_tokens[index : index + len(sub_tokens)]): if tok != sub_tokens[i]: break else: return index return -1
MIT License
google-research/scenic
scenic/train_lib/pretrain_utils.py
restore_pretrained_checkpoint
python
def restore_pretrained_checkpoint( checkpoint_path: str, train_state: Optional[train_utils.TrainState] = None, assert_exist: bool = False, step: Optional[int] = None) -> train_utils.TrainState: if assert_exist: glob_path = os.path.join(checkpoint_path, 'checkpoint_*') if not gfile.glob(glob_path): raise ValueError('No checkpoint for the pretrained model is found in: ' f'{checkpoint_path}') restored_train_state = checkpoints.restore_checkpoint(checkpoint_path, None, step) if restored_train_state is None: raise ValueError('No checkpoint for the pretrained model is found in: ' f'{checkpoint_path}') (restored_params, restored_model_state) = get_params_and_model_state_dict(restored_train_state) restored_params = flax.core.freeze(restored_params) restored_model_state = flax.core.freeze(restored_model_state) if train_state: new_train_state = train_state new_optimizer = train_state.optimizer.replace( target=inspect_params( expected_params=train_state.optimizer.target, restored_params=restored_params, fail_if_extra=False, fail_if_missing=False, fail_if_shapes_mismatch=False)) else: new_train_state = train_utils.TrainState() new_optimizer = {'target': restored_params} new_train_state = new_train_state.replace( optimizer=new_optimizer, model_state=restored_model_state, global_step=int(restored_train_state['global_step']), rng=restored_train_state['rng'], accum_train_time=restored_train_state.get('accum_train_time', 0)) return new_train_state
Restores the last checkpoint. First restores the checkpoint, which is an instance of TrainState that holds the state of training. This function also take care converting pre-Linen checkpoints. Args: checkpoint_path: Directory for saving the checkpoint. train_state: An instance of TrainState that holds the state of training. assert_exist: Assert that there is at least one checkpoint exists in the given path. step: Step number to load or None to load latest. If specified, checkpoint_path must be a directory. Returns: Training state and an int which is the current step.
https://github.com/google-research/scenic/blob/185b77ccc82291f59ea4c744cf288bfda09ee1b9/scenic/train_lib/pretrain_utils.py#L169-L226
import collections import os import re from typing import Any, Dict, Mapping, List, Optional, Union, Tuple from absl import logging import flax from flax.training import checkpoints import jax import numpy as np from scenic.train_lib import train_utils from tensorflow.io import gfile PyTree = Union[Mapping[str, Mapping], Any] def get_params_and_model_state_dict( restored_train_state: PyTree) -> Tuple[PyTree, Optional[PyTree]]: restored_params = restored_train_state['optimizer']['target'] restored_model_state = restored_train_state.get('model_state') if 'params' in restored_params: restored_params = restored_params['params'] restored_params = dict(checkpoints.convert_pre_linen(restored_params)) if restored_model_state: restored_model_state = checkpoints.convert_pre_linen( flax.traverse_util.unflatten_dict({ tuple(k.split('/')[1:]): v for k, v in restored_model_state.items() })) return restored_params, restored_model_state def _replace_dict(model: PyTree, restored: PyTree, ckpt_prefix_path: Optional[List[str]] = None, model_prefix_path: Optional[List[str]] = None, name_mapping: Optional[Mapping[str, str]] = None, skip_regex: Optional[str] = None) -> PyTree: model = flax.core.unfreeze(model) restored = flax.core.unfreeze(restored) if ckpt_prefix_path: for p in ckpt_prefix_path: restored = restored[p] if model_prefix_path: for p in reversed(model_prefix_path): restored = {p: restored} restored_flat = flax.traverse_util.flatten_dict( dict(restored), keep_empty_nodes=True) model_flat = flax.traverse_util.flatten_dict( dict(model), keep_empty_nodes=True) for m_key, m_params in restored_flat.items(): for name, to_replace in name_mapping.items(): m_key = tuple(to_replace if k == name else k for k in m_key) m_key_str = '/'.join(m_key) if m_key not in model_flat: logging.warning( '%s in checkpoint doesn\'t exist in model. Skip.', m_key_str) continue if skip_regex and re.findall(skip_regex, m_key_str): logging.info('Skip loading parameter %s.', m_key_str) continue logging.info('Loading %s from checkpoint into model', m_key_str) model_flat[m_key] = m_params return flax.core.freeze(flax.traverse_util.unflatten_dict(model_flat)) def init_from_pretrain_state( train_state: train_utils.TrainState, pretrain_state: Mapping[str, Any], ckpt_prefix_path: Optional[List[str]] = None, model_prefix_path: Optional[List[str]] = None, name_mapping: Optional[Mapping[str, str]] = None, skip_regex: Optional[str] = None) -> train_utils.TrainState: name_mapping = name_mapping or {} (restored_params, restored_model_state) = get_params_and_model_state_dict(pretrain_state) model_params = train_state.optimizer.target model_params = _replace_dict(model_params, restored_params, ckpt_prefix_path, model_prefix_path, name_mapping, skip_regex) new_optimizer = train_state.optimizer.replace( target=model_params) train_state = train_state.replace( optimizer=new_optimizer) if (restored_model_state is not None and train_state.model_state is not None and train_state.model_state): if model_prefix_path: model_prefix_path = ['batch_stats'] + model_prefix_path if 'batch_stats' in restored_model_state: ckpt_prefix_path = ckpt_prefix_path or [] ckpt_prefix_path = ['batch_stats'] + ckpt_prefix_path elif 'batch_stats' not in restored_model_state: model_prefix_path = ['batch_stats'] if ckpt_prefix_path and ckpt_prefix_path[0] != 'batch_stats': ckpt_prefix_path = ['batch_stats'] + ckpt_prefix_path model_state = _replace_dict(train_state.model_state, restored_model_state, ckpt_prefix_path, model_prefix_path, name_mapping, skip_regex) train_state = train_state.replace( model_state=model_state) return train_state
Apache License 2.0
christophercrouzet/revl
revl.py
Context.__init__
python
def __init__(self, **kwargs): self.dg = OpenMaya.MDGModifier() self.dag = OpenMaya.MDagModifier() self.transforms = [] self.__dict__.update(kwargs)
Constructor. Parameters ---------- kwargs Keyword arguments to define additional attributes.
https://github.com/christophercrouzet/revl/blob/ed6be7c5ffc454b94a2c984fba3adfc204d2d741/revl.py#L66-L77
__all__ = ['NULL_OBJ', 'Context', 'Command', 'Primitive', 'PrimitiveType', 'validate', 'run', 'pickTransform', 'createDagNode', 'createDgNode', 'createPrimitive', 'createTransform', 'unparent'] __title__ = 'revl' __version__ = '0.2.0' __summary__ = "Helps to benchmark code for Autodesk Maya" __url__ = 'https://github.com/christophercrouzet/revl' __author__ = "Christopher Crouzet" __contact__ = 'christopher.crouzet@gmail.com' __license__ = "MIT" import collections import numbers import random import sys from maya import OpenMaya if sys.version_info[0] == 2: _BUILTIN_MODULE = '__builtin__' def _iteritems(d, **kwargs): return d.iteritems(**kwargs) _range = xrange else: _BUILTIN_MODULE = 'builtins' def _iteritems(d, **kwargs): return iter(d.items(**kwargs)) _range = range _SEQUENCE_TYPES = (list, tuple) NULL_OBJ = OpenMaya.MObject().kNullObj class Context(object):
MIT License
uwbmrb/pynmrstar
pynmrstar/loop.py
Loop.print_tree
python
def print_tree(self) -> None: print(repr(self))
Prints a summary, tree style, of the loop.
https://github.com/uwbmrb/pynmrstar/blob/c6e3cdccb4aa44dfbc3b4e984837a6bcde3cf171/pynmrstar/loop.py#L818-L821
import json import warnings from copy import deepcopy from csv import reader as csv_reader, writer as csv_writer from io import StringIO from itertools import chain from typing import TextIO, BinaryIO, Union, List, Optional, Any, Dict, Callable, Tuple from pynmrstar import definitions, utils, entry as entry_mod from pynmrstar._internal import _json_serialize, _interpret_file from pynmrstar.exceptions import InvalidStateError from pynmrstar.parser import Parser from pynmrstar.schema import Schema class Loop(object): def __contains__(self, item: Any) -> bool: if isinstance(item, (list, tuple)): to_process: List[str] = list(item) elif isinstance(item, str): to_process = [item] else: return False lc_tags = self._lc_tags for tag in to_process: if utils.format_tag(tag).lower() not in lc_tags: return False return True def __eq__(self, other) -> bool: if not isinstance(other, Loop): return False return (self.category, self._tags, self.data) == (other.category, other._tags, other.data) def __getitem__(self, item: Union[int, str, List[str], Tuple[str]]) -> list: try: return self.data[item] except TypeError: if isinstance(item, tuple): item = list(item) return self.get_tag(tags=item) def __init__(self, **kwargs) -> None: self._tags: List[str] = [] self.data: List[List[Any]] = [] self.category: Optional[str] = None self.source: str = "unknown" star_buffer: StringIO = StringIO("") if 'source' in kwargs: self.source = kwargs['source'] if 'category' in kwargs: self.category = utils.format_category(kwargs['category']) return if len(kwargs) == 0: raise ValueError("You should not directly instantiate a Loop using this method. Instead use the " "class methods: Loop.from_scratch(), Loop.from_string(), Loop.from_template(), " "Loop.from_file(), and Loop.from_json().") if 'the_string' in kwargs: star_buffer = StringIO(kwargs['the_string']) self.source = "from_string()" elif 'file_name' in kwargs: star_buffer = _interpret_file(kwargs['file_name']) self.source = f"from_file('{kwargs['file_name']}')" elif 'tag_prefix' in kwargs: tags = Loop._get_tags_from_schema(kwargs['tag_prefix'], all_tags=kwargs['all_tags'], schema=kwargs['schema']) for tag in tags: self.add_tag(tag) return if 'csv' in kwargs and kwargs['csv']: csv_file = csv_reader(star_buffer) self.add_tag(next(csv_file)) for row in csv_file: self.add_data(row, convert_data_types=kwargs.get('convert_data_types', False)) self.source = f"from_csv('{kwargs['csv']}')" return tmp_entry = entry_mod.Entry.from_scratch(0) star_buffer = StringIO(f"data_0 save_internaluseyoushouldntseethis_frame _internal.use internal " f"{star_buffer.read()} save_") parser = Parser(entry_to_parse_into=tmp_entry) parser.parse(star_buffer.read(), source=self.source, convert_data_types=kwargs.get('convert_data_types', False)) if len(tmp_entry[0].loops) > 1: raise ValueError("You attempted to parse one loop but the source you provided had more than one loop. " "Please either parse all loops as a saveframe or only parse one loop. Loops detected: " + str(tmp_entry[0].loops)) self._tags = tmp_entry[0][0].tags self.data = tmp_entry[0][0].data self.category = tmp_entry[0][0].category def __iter__(self) -> list: for row in self.data: yield row def __len__(self) -> int: return len(self.data) def __lt__(self, other) -> bool: if not isinstance(other, Loop): return NotImplemented return self.category < other.category def __repr__(self) -> str: return f"<pynmrstar.Loop '{self.category}'>" def __setitem__(self, key: str, item: Any) -> None: tag = utils.format_tag(key) if tag not in self._tags: raise ValueError(f"Cannot assign to tag '{key}' as it does not exist in this loop.") tag_id = self._tags.index(tag) if len(self[key]) != len(item): raise ValueError("To assign to a tag you must provide a list (or iterable) of a length equal to the " f"number of values that currently exist for that tag. The tag '{key}' currently has" f" {len(self[key])} values and you supplied {len(item)} values.") for pos, row in enumerate(self.data): row[tag_id] = item[pos] def __str__(self, skip_empty_loops: bool = False, skip_empty_tags: bool = False) -> str: if len(self.data) == 0: if skip_empty_loops: return "" else: if len(self._tags) == 0: return "\n loop_\n\n stop_\n" if len(self._tags) == 0: raise InvalidStateError("Impossible to print data if there are no associated tags. Error in loop " f"'{self.category}' which contains data but hasn't had any tags added.") self._check_tags_match_data() if skip_empty_tags: has_data = [not all([_ in definitions.NULL_VALUES for _ in column]) for column in zip(*self.data)] return self.filter([tag for x, tag in enumerate(self._tags) if has_data[x]]).format() return_chunks = ["\n loop_\n"] format_string = " %-s\n" if self.category is None: raise InvalidStateError("The category was never set for this loop. Either add a tag with the category " "intact, specify it when generating the loop, or set it using Loop.set_category().") if self.category is None: for tag in self._tags: return_chunks.append(format_string % tag) else: for tag in self._tags: return_chunks.append(format_string % (self.category + "." + tag)) return_chunks.append("\n") if len(self.data) != 0: working_data = [] title_widths = [4]*len(self.data[0]) for row_pos, row in enumerate(self.data): clean_row = [] for col_pos, x in enumerate(row): try: clean_val = utils.quote_value(x) clean_row.append(clean_val) length = len(clean_val) + 3 if length > title_widths[col_pos] and "\n" not in clean_val: title_widths[col_pos] = length except ValueError: raise InvalidStateError('Cannot generate NMR-STAR for entry, as empty strings are not valid ' 'tag values in NMR-STAR. Please either replace the empty strings with' ' None objects, or set pynmrstar.definitions.STR_CONVERSION_DICT[' '\'\'] = None.\n' f'Loop: {self.category} Row: {row_pos} Column: {col_pos}') working_data.append(clean_row) format_string = " " + "%-*s" * len(self._tags) + " \n" for datum in working_data: for pos, item in enumerate(datum): if "\n" in item: datum[pos] = "\n;\n%s;\n" % item tag_width_list = [d for d in zip(title_widths, datum)] return_chunks.append(format_string % tuple(chain.from_iterable(tag_width_list))) return "".join(return_chunks) + "\n stop_\n" @property def _lc_tags(self) -> Dict[str, int]: return {_[1].lower(): _[0] for _ in enumerate(self._tags)} @property def empty(self) -> bool: for row in self.data: for col in row: if col not in definitions.NULL_VALUES: return False return True @property def tags(self) -> List[str]: return self._tags @classmethod def from_file(cls, the_file: Union[str, TextIO, BinaryIO], csv: bool = False, convert_data_types: bool = False): return cls(file_name=the_file, csv=csv, convert_data_types=convert_data_types) @classmethod def from_json(cls, json_dict: Union[dict, str]): if not isinstance(json_dict, dict): try: json_dict = json.loads(json_dict) except (TypeError, ValueError): raise ValueError("The JSON you provided was neither a Python dictionary nor a JSON string.") for check in ['tags', 'category', 'data']: if check not in json_dict: raise ValueError(f"The JSON you provide must be a dictionary and must contain the key '{check}' - even" f" if the key points to None.") ret = Loop.from_scratch() ret._tags = json_dict['tags'] ret.category = json_dict['category'] ret.data = json_dict['data'] ret.source = "from_json()" return ret @classmethod def from_scratch(cls, category: str = None, source: str = "from_scratch()"): return cls(category=category, source=source) @classmethod def from_string(cls, the_string: str, csv: bool = False, convert_data_types: bool = False): return cls(the_string=the_string, csv=csv, convert_data_types=convert_data_types) @classmethod def from_template(cls, tag_prefix: str, all_tags: bool = False, schema: Schema = None): schema = utils.get_schema(schema) return cls(tag_prefix=tag_prefix, all_tags=all_tags, schema=schema, source=f"from_template({schema.version})") @staticmethod def _get_tags_from_schema(category: str, schema: Schema = None, all_tags: bool = False) -> List[str]: schema = utils.get_schema(schema) if not category.startswith("_"): category = "_" + category if not category.endswith("."): category = category + "." tags = [] for item in schema.schema_order: if item.lower().startswith(category.lower()): if all_tags: tags.append(item) else: if schema.schema[item.lower()]["public"] != "I": tags.append(item) if len(tags) == 0: raise InvalidStateError(f"The tag prefix '{category}' has no corresponding tags in the dictionary.") return tags def _check_tags_match_data(self) -> bool: if len(self.data) > 0: for x, row in enumerate(self.data): if len(self._tags) != len(row): raise InvalidStateError(f"The number of tags must match the width of the data. Error in loop " f"'{self.category}'. In this case, there are {len(self._tags)} tags, and " f"row number {x} has {len(row)} tags.") return True def add_data(self, the_list: List[Any], rearrange: bool = False, convert_data_types: bool = False): if not rearrange: if len(the_list) != len(self._tags): raise ValueError("The list must have the same number of elements as the number of tags when adding a " "single row of values! Insert tag names first by calling Loop.add_tag().") self.data.append(the_list) return processed_data = [the_list[x:x + len(self._tags)] for x in range(0, len(the_list), len(self._tags))] if len(processed_data[-1]) != len(self._tags): raise ValueError(f"The number of data elements in the list you provided is not an even multiple of the " f"number of tags which are set in the loop. Please either add missing tags using " f"Loop.add_tag() or modify the list of tag values you are adding to be an even multiple " f"of the number of tags. Error in loop '{self.category}'.") if convert_data_types: schema = utils.get_schema() for row in processed_data: for tag_id, datum in enumerate(row): row[tag_id] = schema.convert_tag(self.category + "." + self._tags[tag_id], datum) self.data.extend(processed_data) def add_data_by_tag(self, tag_name: str, value) -> None: warnings.warn("Deprecated: It is recommended to use Loop.add_data() instead for most use cases.", DeprecationWarning) if "." in tag_name: supplied_category = utils.format_category(str(tag_name)) if supplied_category.lower() != self.category.lower(): raise ValueError(f"Category provided in your tag '{supplied_category}' does not match this loop's " f"category '{self.category}'.") pos = self.tag_index(tag_name) if pos is None: raise ValueError(f"The tag '{tag_name}' to which you are attempting to add data does not yet exist. Create " f"the tags using Loop.add_tag() before adding data.") if len(self.data) == 0: self.data.append([]) if len(self.data[-1]) == len(self._tags): self.data.append([]) if len(self.data[-1]) != pos: raise ValueError("You cannot add data out of tag order.") self.data[-1].append(value) def add_missing_tags(self, schema: 'Schema' = None, all_tags: bool = False) -> None: self.add_tag(Loop._get_tags_from_schema(self.category, schema=schema, all_tags=all_tags), ignore_duplicates=True, update_data=True) self.sort_tags() try: self.sort_rows("Ordinal") except ValueError: pass except TypeError: ordinal_idx = self.tag_index("Ordinal") for pos, row in enumerate(self.data): row[ordinal_idx] = pos + 1 def add_tag(self, name: Union[str, List[str]], ignore_duplicates: bool = False, update_data: bool = False) -> None: if isinstance(name, (list, tuple)): for item in name: self.add_tag(item, ignore_duplicates=ignore_duplicates, update_data=update_data) return name = name.strip() if "." in name: if name[0] != ".": category = name[0:name.index(".")] if category[:1] != "_": category = "_" + category if self.category is None: self.category = category elif self.category.lower() != category.lower(): raise ValueError("One loop cannot have tags with different categories (or tags that don't " f"match the loop category)! The loop category is '{self.category}' while " f"the category in the tag was '{category}'.") name = name[name.index(".") + 1:] else: name = name[1:] if self.tag_index(name) is not None: if ignore_duplicates: return else: raise ValueError(f"There is already a tag with the name '{name}' in the loop '{self.category}'.") if name in definitions.NULL_VALUES: raise ValueError(f"Cannot use a null-equivalent value as a tag name. Invalid tag name: '{name}'") if "." in name: raise ValueError(f"There cannot be more than one '.' in a tag name. Invalid tag name: '{name}'") for char in str(name): if char in utils.definitions.WHITESPACE: raise ValueError(f"Tag names can not contain whitespace characters. Invalid tag name: '{name}") self._tags.append(name) if update_data: for row in self.data: row.append(None) def clear_data(self) -> None: self.data = [] def compare(self, other) -> List[str]: diffs = [] if self is other: return [] if isinstance(other, str): if str(self) == other: return [] else: return ['String was not exactly equal to loop.'] elif not isinstance(other, Loop): return ['Other object is not of class Loop.'] if str(other) == str(self): return [] try: if str(self.category).lower() != str(other.category).lower(): diffs.append(f"\t\tCategory of loops does not match: '{self.category}' vs '{other.category}'.") if ([x.lower() for x in self._tags] != [x.lower() for x in other.tags]): diffs.append(f"\t\tLoop tag names do not match for loop with category '{self.category}'.") else: if self.data != other.data: self_data = sorted(deepcopy(self.data)) other_data = sorted(deepcopy(other.data)) if self_data != other_data: diffs.append(f"\t\tLoop data does not match for loop with category '{self.category}'.") except AttributeError as err: diffs.append(f"\t\tAn exception occurred while comparing: '{err}'.") return diffs def delete_tag(self, tag: Union[str, List[str]]) -> None: warnings.warn('Please use remove_tag() instead.', DeprecationWarning) return self.remove_tag(tag) def delete_data_by_tag_value(self, tag: str, value: Any, index_tag: str = None) -> List[List[Any]]: warnings.warn('Please use remove_data_by_tag_value() instead.', DeprecationWarning) return self.remove_data_by_tag_value(tag, value, index_tag) def filter(self, tag_list: Union[str, List[str], Tuple[str]], ignore_missing_tags: bool = False): result = Loop.from_scratch() valid_tags = [] if not isinstance(tag_list, (list, tuple)): tag_list = [tag_list] for tag in tag_list: tag_match_index = self.tag_index(tag) if tag_match_index is None: if not ignore_missing_tags: raise KeyError(f"Cannot filter tag '{tag}' as it isn't present in this loop.") continue valid_tags.append(tag) result.add_tag(self._tags[tag_match_index]) results = self.get_tag(valid_tags) if len(valid_tags) == 1: for item in results: result.add_data([item]) else: for row in results: assert isinstance(row, list) result.add_data(row) if result.category is None: result.category = self.category return result def format(self, skip_empty_loops: bool = True, skip_empty_tags: bool = False) -> str: return self.__str__(skip_empty_loops=skip_empty_loops, skip_empty_tags=skip_empty_tags) def get_data_as_csv(self, header: bool = True, show_category: bool = True) -> str: csv_buffer = StringIO() csv_writer_object = csv_writer(csv_buffer) if header: if show_category: csv_writer_object.writerow( [str(self.category) + "." + str(x) for x in self._tags]) else: csv_writer_object.writerow([str(x) for x in self._tags]) for row in self.data: data = [] for piece in row: data.append(piece) csv_writer_object.writerow(data) csv_buffer.seek(0) return csv_buffer.read().replace('\r\n', '\n') def get_json(self, serialize: bool = True) -> Union[dict, str]: loop_dict = { "category": self.category, "tags": self._tags, "data": self.data } if serialize: return json.dumps(loop_dict, default=_json_serialize) else: return loop_dict def get_tag_names(self) -> List[str]: if not self.category: raise InvalidStateError("You never set the category of this loop. You must set the category before calling " "this method, either by setting the loop category directly when creating the loop " "using the Loop.from_scratch() class method, by calling loop.set_category(), or by " "adding a fully qualified tag which includes the loop category (for example, " "adding '_Citation_author.Family_name' rather than just 'Family_name').") return [self.category + "." + x for x in self._tags] def get_tag(self, tags: Optional[Union[str, List[str]]] = None, whole_tag: bool = False, dict_result: bool = False) -> Union[List[Any], List[Dict[str, Any]]]: if tags is None: if not dict_result: return self.data else: tags = [self._tags] if not isinstance(tags, list): tags = [tags] lower_tags = deepcopy(tags) for pos, item in enumerate([str(x) for x in lower_tags]): if "." in item and utils.format_category(item).lower() != self.category.lower(): raise ValueError(f"Cannot fetch data with tag '{item}' because the category does not match the " f"category of this loop '{self.category}'.") lower_tags[pos] = utils.format_tag(item).lower() tags_lower = [x.lower() for x in self._tags] tag_mapping = dict(zip(reversed(tags_lower), reversed(range(len(tags_lower))))) tag_ids = [] for pos, query in enumerate(lower_tags): if str(query) in tag_mapping: tag_ids.append(tag_mapping[query]) elif isinstance(query, int): tag_ids.append(query) else: raise KeyError(f"Could not locate the tag with name or ID: '{tags[pos]}' in loop '{self.category}'.") if not dict_result: if whole_tag: result = [[[self.category + "." + self._tags[col_id], row[col_id]] for col_id in tag_ids] for row in self.data] else: result = [[row[col_id] for col_id in tag_ids] for row in self.data] if len(lower_tags) == 1: return [x[0] for x in result] else: return result else: if whole_tag: result = [dict((self.category + "." + self._tags[col_id], row[col_id]) for col_id in tag_ids) for row in self.data] else: result = [dict((self._tags[col_id], row[col_id]) for col_id in tag_ids) for row in self.data] return result
MIT License
demisto/demisto-py
demisto_client/demisto_api/models/incident_type.py
IncidentType.locked
python
def locked(self, locked): self._locked = locked
Sets the locked of this IncidentType. :param locked: The locked of this IncidentType. # noqa: E501 :type: bool
https://github.com/demisto/demisto-py/blob/95d29e07693d27c133f7fe6ef9da13e4b6dbf542/demisto_client/demisto_api/models/incident_type.py#L429-L437
import pprint import re import six from demisto_client.demisto_api.models.reputation_calc_alg import ReputationCalcAlg class IncidentType(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'autorun': 'bool', 'closure_script': 'str', 'color': 'str', 'commit_message': 'str', 'days': 'int', 'days_r': 'int', 'default': 'bool', 'disabled': 'bool', 'hours': 'int', 'hours_r': 'int', 'id': 'str', 'locked': 'bool', 'modified': 'datetime', 'name': 'str', 'playbook_id': 'str', 'pre_processing_script': 'str', 'prev_name': 'str', 'readonly': 'bool', 'reputation_calc': 'ReputationCalcAlg', 'should_commit': 'bool', 'sla': 'int', 'sla_reminder': 'int', 'sort_values': 'list[str]', 'system': 'bool', 'vc_should_ignore': 'bool', 'version': 'int', 'weeks': 'int', 'weeks_r': 'int' } attribute_map = { 'autorun': 'autorun', 'closure_script': 'closureScript', 'color': 'color', 'commit_message': 'commitMessage', 'days': 'days', 'days_r': 'daysR', 'default': 'default', 'disabled': 'disabled', 'hours': 'hours', 'hours_r': 'hoursR', 'id': 'id', 'locked': 'locked', 'modified': 'modified', 'name': 'name', 'playbook_id': 'playbookId', 'pre_processing_script': 'preProcessingScript', 'prev_name': 'prevName', 'readonly': 'readonly', 'reputation_calc': 'reputationCalc', 'should_commit': 'shouldCommit', 'sla': 'sla', 'sla_reminder': 'slaReminder', 'sort_values': 'sortValues', 'system': 'system', 'vc_should_ignore': 'vcShouldIgnore', 'version': 'version', 'weeks': 'weeks', 'weeks_r': 'weeksR' } def __init__(self, autorun=None, closure_script=None, color=None, commit_message=None, days=None, days_r=None, default=None, disabled=None, hours=None, hours_r=None, id=None, locked=None, modified=None, name=None, playbook_id=None, pre_processing_script=None, prev_name=None, readonly=None, reputation_calc=None, should_commit=None, sla=None, sla_reminder=None, sort_values=None, system=None, vc_should_ignore=None, version=None, weeks=None, weeks_r=None): self._autorun = None self._closure_script = None self._color = None self._commit_message = None self._days = None self._days_r = None self._default = None self._disabled = None self._hours = None self._hours_r = None self._id = None self._locked = None self._modified = None self._name = None self._playbook_id = None self._pre_processing_script = None self._prev_name = None self._readonly = None self._reputation_calc = None self._should_commit = None self._sla = None self._sla_reminder = None self._sort_values = None self._system = None self._vc_should_ignore = None self._version = None self._weeks = None self._weeks_r = None self.discriminator = None if autorun is not None: self.autorun = autorun if closure_script is not None: self.closure_script = closure_script if color is not None: self.color = color if commit_message is not None: self.commit_message = commit_message if days is not None: self.days = days if days_r is not None: self.days_r = days_r if default is not None: self.default = default if disabled is not None: self.disabled = disabled if hours is not None: self.hours = hours if hours_r is not None: self.hours_r = hours_r if id is not None: self.id = id if locked is not None: self.locked = locked if modified is not None: self.modified = modified if name is not None: self.name = name if playbook_id is not None: self.playbook_id = playbook_id if pre_processing_script is not None: self.pre_processing_script = pre_processing_script if prev_name is not None: self.prev_name = prev_name if readonly is not None: self.readonly = readonly if reputation_calc is not None: self.reputation_calc = reputation_calc if should_commit is not None: self.should_commit = should_commit if sla is not None: self.sla = sla if sla_reminder is not None: self.sla_reminder = sla_reminder if sort_values is not None: self.sort_values = sort_values if system is not None: self.system = system if vc_should_ignore is not None: self.vc_should_ignore = vc_should_ignore if version is not None: self.version = version if weeks is not None: self.weeks = weeks if weeks_r is not None: self.weeks_r = weeks_r @property def autorun(self): return self._autorun @autorun.setter def autorun(self, autorun): self._autorun = autorun @property def closure_script(self): return self._closure_script @closure_script.setter def closure_script(self, closure_script): self._closure_script = closure_script @property def color(self): return self._color @color.setter def color(self, color): self._color = color @property def commit_message(self): return self._commit_message @commit_message.setter def commit_message(self, commit_message): self._commit_message = commit_message @property def days(self): return self._days @days.setter def days(self, days): self._days = days @property def days_r(self): return self._days_r @days_r.setter def days_r(self, days_r): self._days_r = days_r @property def default(self): return self._default @default.setter def default(self, default): self._default = default @property def disabled(self): return self._disabled @disabled.setter def disabled(self, disabled): self._disabled = disabled @property def hours(self): return self._hours @hours.setter def hours(self, hours): self._hours = hours @property def hours_r(self): return self._hours_r @hours_r.setter def hours_r(self, hours_r): self._hours_r = hours_r @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def locked(self): return self._locked @locked.setter
Apache License 2.0
radio-astro-tools/radio-beam
radio_beam/commonbeam.py
fits_in_largest
python
def fits_in_largest(beams, large_beam=None): if large_beam is None: large_beam = beams.largest_beam() large_hdr_keywords = large_beam.to_header_keywords() majors = beams.major.to(u.deg).value minors = beams.minor.to(u.deg).value pas = beams.pa.to(u.deg).value atol_limit = 1e-12 for major, minor, pa in zip(majors, minors, pas): equal = abs(large_hdr_keywords['BMAJ'] - major) < atol_limit equal = equal and (abs(large_hdr_keywords['BMIN'] - minor) < atol_limit) iscircular = (major - minor) / minor < 1e-6 if not iscircular: equal = equal and (abs(((large_hdr_keywords['BPA'] % np.pi) - (pa % np.pi))) < atol_limit) if equal: continue out = deconvolve_optimized(large_hdr_keywords, {'BMAJ': major, 'BMIN': minor, 'BPA': pa}, failure_returns_pointlike=True) if np.any([ax == 0. for ax in out[:2]]): return False return True
Test if all beams can be deconvolved by the largest beam
https://github.com/radio-astro-tools/radio-beam/blob/39b9a6a77332040dd1fa1e244290bb81280819a7/radio_beam/commonbeam.py#L331-L375
import numpy as np import astropy.units as u try: from scipy import optimize as opt from scipy.spatial import ConvexHull HAS_SCIPY = True except ImportError: HAS_SCIPY = False from .beam import Beam from .utils import BeamError, transform_ellipse, deconvolve_optimized __all__ = ['commonbeam', 'common_2beams', 'getMinVolEllipse', 'common_manybeams_mve'] def commonbeam(beams, method='pts', **method_kwargs): if beams.size == 1: return beams[0] elif fits_in_largest(beams): return beams.largest_beam() else: if beams.size == 2: try: return common_2beams(beams) except (ValueError, BeamError): pass if method == 'pts': return common_manybeams_mve(beams, **method_kwargs) elif method == 'opt': return common_manybeams_opt(beams, **method_kwargs) else: raise ValueError("method must be 'pts' or 'opt'.") def common_2beams(beams, check_deconvolution=True): if beams.size != 2: raise BeamError("This method is only valid for two beams.") if (~beams.isfinite).all(): raise BeamError("All beams in the object are invalid.") large_beam = beams.largest_beam() large_major = large_beam.major.to(u.arcsec) large_minor = large_beam.minor.to(u.arcsec) if beams.argmax() == 0: small_beam = beams[1] else: small_beam = beams[0] small_major = small_beam.major.to(u.arcsec) small_minor = small_beam.minor.to(u.arcsec) if small_beam == large_beam: return large_beam deconv_beam = large_beam.deconvolve(small_beam, failure_returns_pointlike=True) if deconv_beam.isfinite: return large_beam if small_beam.iscircular(): common_beam = Beam(large_beam.major, small_beam.major, large_beam.pa) return common_beam pa_diff = ((small_beam.pa.to(u.rad).value - large_beam.pa.to(u.rad).value + np.pi / 2. + np.pi) % np.pi - np.pi / 2.) * u.rad if np.isclose(np.abs(pa_diff).value, np.pi / 2.): larger_major = large_beam.major >= small_beam.major major = large_major if larger_major else small_major minor = small_major if larger_major else small_major pa = large_beam.pa if larger_major else small_beam.pa conv_beam = Beam(major=major, minor=minor, pa=pa) return conv_beam else: major_comb = np.sqrt(large_major * small_major) p = major_comb / large_major q = major_comb / large_minor trans_major_sc, trans_minor_sc, trans_pa_sc = transform_ellipse(small_major, small_minor, pa_diff, p, q) trans_minor_sc = major_comb trans_major_unsc, trans_minor_unsc, trans_pa_unsc = transform_ellipse(trans_major_sc, trans_minor_sc, trans_pa_sc, 1 / p, 1 / q) trans_major = trans_major_unsc.to(u.arcsec) trans_minor = trans_minor_unsc.to(u.arcsec) trans_pa = trans_pa_unsc + large_beam.pa epsilon = 100 * np.finfo(trans_major.dtype).eps * trans_major.unit trans_beam = Beam(major=trans_major + epsilon, minor=trans_minor + epsilon, pa=trans_pa) if check_deconvolution: deconv_large_beam = trans_beam.deconvolve(large_beam, failure_returns_pointlike=True) deconv_prob_beam = trans_beam.deconvolve(small_beam, failure_returns_pointlike=True) if not deconv_large_beam.isfinite or not deconv_prob_beam.isfinite: raise BeamError("Failed to find common beam that both beams can " "be deconvolved by.") common_beam = trans_beam return common_beam def boundingcircle(bmaj, bmin, bpa): thisone = np.argmax(bmaj) return bmaj[thisone], bmaj[thisone], bpa[thisone] def PtoA(bmaj, bmin, bpa): A = np.zeros((2, 2)) A[0, 0] = np.cos(bpa)**2 / bmaj**2 + np.sin(bpa)**2 / bmin**2 A[1, 0] = np.cos(bpa) * np.sin(bpa) * (1 / bmaj**2 - 1 / bmin**2) A[0, 1] = A[1, 0] A[1, 1] = np.sin(bpa)**2 / bmaj**2 + np.cos(bpa)**2 / bmin**2 return A def BinsideA(B, A): try: np.linalg.cholesky(B - A) return True except np.linalg.LinAlgError: return False def myobjective_regularized(p, bmajvec, bminvec, bpavec): if p[0] < p[1]: return 1e30 if (p[0] <= bmajvec).any(): return 1e30 A = PtoA(*p) test = np.zeros_like(bmajvec) for idx, (bmx, bmn, bp) in enumerate(zip(bmajvec, bminvec, bpavec)): test[idx] = BinsideA(PtoA(bmx, bmn, bp), A) obj = 1 / np.linalg.det(A) if np.all(test): return obj else: return obj * 1e30 def common_manybeams_opt(beams, p0=None, opt_method='Nelder-Mead', optdict={'maxiter': 5000, 'ftol': 1e-14, 'maxfev': 5000}, verbose=False, brute=False, brute_steps=40): raise NotImplementedError("This method is not fully tested. Remove this " "line for testing purposes.") if not HAS_SCIPY: raise ImportError("common_manybeams_opt requires scipy.optimize.") bmaj = beams.major.value bmin = beams.minor.value bpa = beams.pa.to(u.rad).value if p0 is None: p0 = boundingcircle(bmaj, bmin, bpa) p0 = (1.1 * p0[0], 1.1 * p0[1], p0[2]) if brute: maj_range = [beams.major.max(), 1.5 * beams.major.max()] maj_step = (maj_range[1] - maj_range[0]) / brute_steps min_range = [beams.minor.min(), 1.5 * beams.major.max()] min_step = (min_range[1] - min_range[0]) / brute_steps rranges = (slice(maj_range[0], maj_range[1], maj_step), slice(min_range[0], min_range[1], min_step), slice(0, 179.9, 180. / brute_steps)) result = opt.brute(myobjective_regularized, rranges, args=(bmaj, bmin, bpa), full_output=True, finish=opt.fmin) params = result[0] else: result = opt.minimize(myobjective_regularized, p0, method=opt_method, args=(bmaj, bmin, bpa), options=optdict, tol=1e-14) params = result.x if verbose: print(result.viewitems()) if not result.success: raise Warning("Optimization failed") com_beam = Beam(params[0] * beams.major.unit, params[1] * beams.major.unit, (params[2] % np.pi) * u.rad) if not fits_in_largest(beams, com_beam): raise BeamError("Could not find common beam to deconvolve all beams.") return com_beam
BSD 3-Clause New or Revised License
tensorflow/graphics
tensorflow_graphics/io/exr.py
read_exr
python
def read_exr(filename, channel_names=None): exr = OpenEXR.InputFile(filename) if channel_names is None: remaining_channel_names = list(exr.header()['channels'].keys()) conventional_rgba_names = ['R', 'G', 'B', 'A'] present_rgba_names = [] for name in conventional_rgba_names: if name in remaining_channel_names: present_rgba_names.append(name) remaining_channel_names.remove(name) channel_names = present_rgba_names + sorted(remaining_channel_names) return channels_to_ndarray(exr, channel_names), channel_names
Opens an EXR file and copies the requested channels into an ndarray. The Python OpenEXR wrapper uses a dictionary for the channel header, so the ordering of the channels in the underlying file is lost. If channel_names is not passed, this function orders the output channels with any present RGBA channels first, followed by the remaining channels in alphabetical order. By convention, RGBA channels are named 'R', 'G', 'B', 'A', so this function looks for those strings. Args: filename: The name of the EXR file. channel_names: A list of strings naming the channels to read. If None, all channels will be read. Returns: A numpy array containing the image data, and a list of the corresponding channel names.
https://github.com/tensorflow/graphics/blob/d0817aec7dee35635814e925a59d83955459d93c/tensorflow_graphics/io/exr.py#L80-L111
from __future__ import absolute_import from __future__ import division from __future__ import print_function import Imath import numpy as np import OpenEXR from six.moves import range from six.moves import zip _np_to_exr = { np.float16: Imath.PixelType.HALF, np.float32: Imath.PixelType.FLOAT, np.uint32: Imath.PixelType.UINT, } _exr_to_np = dict(list(zip(list(_np_to_exr.values()), list(_np_to_exr.keys())))) def channels_to_ndarray(exr, channel_names): channels_header = exr.header()['channels'] window = exr.header()['dataWindow'] width = window.max.x - window.min.x + 1 height = window.max.y - window.min.y + 1 def read_channel(channel): channel_type = channels_header[channel].type try: numpy_type = _exr_to_np[channel_type.v] except KeyError: raise RuntimeError('Unknown EXR channel type: %s' % str(channel_type)) flat_buffer = np.frombuffer(exr.channel(channel), numpy_type) return np.reshape(flat_buffer, [height, width]) channels = [read_channel(c) for c in channel_names] if any([channels[0].dtype != c.dtype for c in channels[1:]]): raise ValueError('Channels have mixed datatypes: %s' % ', '.join([str(c.dtype) for c in channels])) return np.stack(channels, axis=-1)
Apache License 2.0
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/climate/wink.py
WinkAC.current_operation
python
def current_operation(self): if not self.wink.is_on(): current_op = STATE_OFF else: wink_mode = self.wink.current_mode() if wink_mode == "auto_eco": wink_mode = "eco" current_op = WINK_STATE_TO_HA.get(wink_mode) if current_op is None: current_op = STATE_UNKNOWN return current_op
Return current operation ie. auto_eco, cool_only, fan_only.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/climate/wink.py#L429-L440
import asyncio import logging from homeassistant.components.climate import ( ATTR_CURRENT_HUMIDITY, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW, ATTR_TEMPERATURE, STATE_AUTO, STATE_COOL, STATE_ECO, STATE_ELECTRIC, STATE_FAN_ONLY, STATE_GAS, STATE_HEAT, STATE_HEAT_PUMP, STATE_HIGH_DEMAND, STATE_PERFORMANCE, SUPPORT_AUX_HEAT, SUPPORT_AWAY_MODE, SUPPORT_FAN_MODE, SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE_HIGH, SUPPORT_TARGET_TEMPERATURE_LOW, ClimateDevice) from homeassistant.components.wink import DOMAIN, WinkDevice from homeassistant.const import ( PRECISION_TENTHS, STATE_OFF, STATE_ON, STATE_UNKNOWN, TEMP_CELSIUS) from homeassistant.helpers.temperature import display_temp as show_temp _LOGGER = logging.getLogger(__name__) ATTR_ECO_TARGET = 'eco_target' ATTR_EXTERNAL_TEMPERATURE = 'external_temperature' ATTR_OCCUPIED = 'occupied' ATTR_RHEEM_TYPE = 'rheem_type' ATTR_SCHEDULE_ENABLED = 'schedule_enabled' ATTR_SMART_TEMPERATURE = 'smart_temperature' ATTR_TOTAL_CONSUMPTION = 'total_consumption' ATTR_VACATION_MODE = 'vacation_mode' ATTR_HEAT_ON = 'heat_on' ATTR_COOL_ON = 'cool_on' DEPENDENCIES = ['wink'] SPEED_LOW = 'low' SPEED_MEDIUM = 'medium' SPEED_HIGH = 'high' HA_STATE_TO_WINK = { STATE_AUTO: 'auto', STATE_COOL: 'cool_only', STATE_ECO: 'eco', STATE_ELECTRIC: 'electric_only', STATE_FAN_ONLY: 'fan_only', STATE_GAS: 'gas', STATE_HEAT: 'heat_only', STATE_HEAT_PUMP: 'heat_pump', STATE_HIGH_DEMAND: 'high_demand', STATE_OFF: 'off', STATE_PERFORMANCE: 'performance', } WINK_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_WINK.items()} SUPPORT_FLAGS_THERMOSTAT = ( SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_HIGH | SUPPORT_TARGET_TEMPERATURE_LOW | SUPPORT_OPERATION_MODE | SUPPORT_AWAY_MODE | SUPPORT_FAN_MODE | SUPPORT_AUX_HEAT) SUPPORT_FLAGS_AC = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE | SUPPORT_FAN_MODE) SUPPORT_FLAGS_HEATER = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE | SUPPORT_AWAY_MODE) def setup_platform(hass, config, add_devices, discovery_info=None): import pywink for climate in pywink.get_thermostats(): _id = climate.object_id() + climate.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_devices([WinkThermostat(climate, hass)]) for climate in pywink.get_air_conditioners(): _id = climate.object_id() + climate.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_devices([WinkAC(climate, hass)]) for water_heater in pywink.get_water_heaters(): _id = water_heater.object_id() + water_heater.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_devices([WinkWaterHeater(water_heater, hass)]) class WinkThermostat(WinkDevice, ClimateDevice): @property def supported_features(self): return SUPPORT_FLAGS_THERMOSTAT @asyncio.coroutine def async_added_to_hass(self): self.hass.data[DOMAIN]['entities']['climate'].append(self) @property def temperature_unit(self): return TEMP_CELSIUS @property def device_state_attributes(self): data = {} target_temp_high = self.target_temperature_high target_temp_low = self.target_temperature_low if target_temp_high is not None: data[ATTR_TARGET_TEMP_HIGH] = show_temp( self.hass, self.target_temperature_high, self.temperature_unit, PRECISION_TENTHS) if target_temp_low is not None: data[ATTR_TARGET_TEMP_LOW] = show_temp( self.hass, self.target_temperature_low, self.temperature_unit, PRECISION_TENTHS) if self.external_temperature: data[ATTR_EXTERNAL_TEMPERATURE] = show_temp( self.hass, self.external_temperature, self.temperature_unit, PRECISION_TENTHS) if self.smart_temperature: data[ATTR_SMART_TEMPERATURE] = self.smart_temperature if self.occupied: data[ATTR_OCCUPIED] = self.occupied if self.eco_target: data[ATTR_ECO_TARGET] = self.eco_target if self.heat_on: data[ATTR_HEAT_ON] = self.heat_on if self.cool_on: data[ATTR_COOL_ON] = self.cool_on current_humidity = self.current_humidity if current_humidity is not None: data[ATTR_CURRENT_HUMIDITY] = current_humidity return data @property def current_temperature(self): return self.wink.current_temperature() @property def current_humidity(self): if self.wink.current_humidity() is not None: if self.wink.current_humidity() < 1: return self.wink.current_humidity() * 100 return self.wink.current_humidity() return None @property def external_temperature(self): return self.wink.current_external_temperature() @property def smart_temperature(self): return self.wink.current_smart_temperature() @property def eco_target(self): return self.wink.eco_target() @property def occupied(self): return self.wink.occupied() @property def heat_on(self): return self.wink.heat_on() @property def cool_on(self): return self.wink.cool_on() @property def current_operation(self): if not self.wink.is_on(): current_op = STATE_OFF else: current_op = WINK_STATE_TO_HA.get(self.wink.current_hvac_mode()) if current_op == 'aux': return STATE_HEAT if current_op is None: current_op = STATE_UNKNOWN return current_op @property def target_humidity(self): target_hum = None if self.wink.current_humidifier_mode() == 'on': if self.wink.current_humidifier_set_point() is not None: target_hum = self.wink.current_humidifier_set_point() * 100 elif self.wink.current_dehumidifier_mode() == 'on': if self.wink.current_dehumidifier_set_point() is not None: target_hum = self.wink.current_dehumidifier_set_point() * 100 else: target_hum = None return target_hum @property def target_temperature(self): if self.current_operation != STATE_AUTO and not self.is_away_mode_on: if self.current_operation == STATE_COOL: return self.wink.current_max_set_point() elif self.current_operation == STATE_HEAT: return self.wink.current_min_set_point() return None @property def target_temperature_low(self): if self.current_operation == STATE_AUTO: return self.wink.current_min_set_point() return None @property def target_temperature_high(self): if self.current_operation == STATE_AUTO: return self.wink.current_max_set_point() return None @property def is_away_mode_on(self): return self.wink.away() @property def is_aux_heat_on(self): if 'aux' not in self.wink.hvac_modes(): return None if self.wink.current_hvac_mode() == 'aux': return True return False def set_temperature(self, **kwargs): target_temp = kwargs.get(ATTR_TEMPERATURE) target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW) target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH) if target_temp is not None: if self.current_operation == STATE_COOL: target_temp_high = target_temp if self.current_operation == STATE_HEAT: target_temp_low = target_temp if target_temp_low is not None: target_temp_low = target_temp_low if target_temp_high is not None: target_temp_high = target_temp_high self.wink.set_temperature(target_temp_low, target_temp_high) def set_operation_mode(self, operation_mode): op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode) if self.is_aux_heat_on and op_mode_to_set == STATE_HEAT: return self.wink.set_operation_mode(op_mode_to_set) @property def operation_list(self): op_list = ['off'] modes = self.wink.hvac_modes() for mode in modes: if mode == 'aux': continue ha_mode = WINK_STATE_TO_HA.get(mode) if ha_mode is not None: op_list.append(ha_mode) else: error = "Invalid operation mode mapping. " + mode + " doesn't map. Please report this." _LOGGER.error(error) return op_list def turn_away_mode_on(self): self.wink.set_away_mode() def turn_away_mode_off(self): self.wink.set_away_mode(False) @property def current_fan_mode(self): if self.wink.current_fan_mode() == 'on': return STATE_ON elif self.wink.current_fan_mode() == 'auto': return STATE_AUTO return None @property def fan_list(self): if self.wink.has_fan(): return self.wink.fan_modes() return None def set_fan_mode(self, fan_mode): self.wink.set_fan_mode(fan_mode.lower()) def turn_aux_heat_on(self): self.wink.set_operation_mode('aux') def turn_aux_heat_off(self): self.set_operation_mode(STATE_HEAT) @property def min_temp(self): minimum = 7 min_min = self.wink.min_min_set_point() min_max = self.wink.min_max_set_point() if self.current_operation == STATE_HEAT: if min_min: return_value = min_min else: return_value = minimum elif self.current_operation == STATE_COOL: if min_max: return_value = min_max else: return_value = minimum elif self.current_operation == STATE_AUTO: if min_min and min_max: return_value = min(min_min, min_max) else: return_value = minimum else: return_value = minimum return return_value @property def max_temp(self): maximum = 35 max_min = self.wink.max_min_set_point() max_max = self.wink.max_max_set_point() if self.current_operation == STATE_HEAT: if max_min: return_value = max_min else: return_value = maximum elif self.current_operation == STATE_COOL: if max_max: return_value = max_max else: return_value = maximum elif self.current_operation == STATE_AUTO: if max_min and max_max: return_value = min(max_min, max_max) else: return_value = maximum else: return_value = maximum return return_value class WinkAC(WinkDevice, ClimateDevice): @property def supported_features(self): return SUPPORT_FLAGS_AC @property def temperature_unit(self): return TEMP_CELSIUS @property def device_state_attributes(self): data = {} target_temp_high = self.target_temperature_high target_temp_low = self.target_temperature_low if target_temp_high is not None: data[ATTR_TARGET_TEMP_HIGH] = show_temp( self.hass, self.target_temperature_high, self.temperature_unit, PRECISION_TENTHS) if target_temp_low is not None: data[ATTR_TARGET_TEMP_LOW] = show_temp( self.hass, self.target_temperature_low, self.temperature_unit, PRECISION_TENTHS) data[ATTR_TOTAL_CONSUMPTION] = self.wink.total_consumption() data[ATTR_SCHEDULE_ENABLED] = self.wink.schedule_enabled() return data @property def current_temperature(self): return self.wink.current_temperature() @property
MIT License
tum-pbs/phiflow
phi/math/_nd.py
upsample2x
python
def upsample2x(grid: Tensor, padding: Extrapolation = extrapolation.BOUNDARY, dims: tuple or None = None) -> Tensor: for i, dim in enumerate(grid.shape.spatial.only(dims)): left, center, right = shift(grid, (-1, 0, 1), dim.names, padding, None) interp_left = 0.25 * left + 0.75 * center interp_right = 0.75 * center + 0.25 * right stacked = math.stack([interp_left, interp_right], spatial('_interleave')) grid = math.pack_dims(stacked, (dim.name, '_interleave'), dim) return grid
Resamples a regular grid to double the number of spatial sample points per dimension. The grid values at the new points are determined via linear interpolation. Args: grid: half-size grid padding: grid extrapolation dims: dims along which up-sampling is applied. If None, up-sample along all spatial dims. grid: Tensor: padding: Extrapolation: (Default value = extrapolation.BOUNDARY) dims: tuple or None: (Default value = None) Returns: double-size grid
https://github.com/tum-pbs/phiflow/blob/4a85f8a5029aa4e30a791daa659f2c8e1536e37e/phi/math/_nd.py#L437-L462
from __future__ import division from typing import Tuple import numpy as np from . import _ops as math from . import extrapolation as extrapolation from ._config import GLOBAL_AXIS_ORDER from ._ops import stack from ._shape import Shape, channel, batch, spatial from ._tensors import Tensor, TensorLike, variable_values from ._tensors import wrap from .extrapolation import Extrapolation def vec_abs(vec: Tensor, vec_dim: str or tuple or list or Shape = None): return math.sqrt(math.sum_(vec ** 2, dim=vec.shape.channel if vec_dim is None else vec_dim)) def vec_squared(vec: Tensor, vec_dim: str or tuple or list or Shape = None): return math.sum_(vec ** 2, dim=vec.shape.channel if vec_dim is None else vec_dim) def vec_normalize(vec: Tensor, vec_dim: str or tuple or list or Shape = None): return vec / vec_abs(vec, vec_dim=vec_dim) def cross_product(vec1: Tensor, vec2: Tensor) -> Tensor: vec1 = math.tensor(vec1) vec2 = math.tensor(vec2) spatial_rank = vec1.vector.size if 'vector' in vec1.shape else vec2.vector.size if spatial_rank == 2: assert vec2.vector.exists if vec1.vector.exists: v1_x, v1_y = vec1.vector.unstack() v2_x, v2_y = vec2.vector.unstack() if GLOBAL_AXIS_ORDER.is_x_first: return v1_x * v2_y - v1_y * v2_x else: return - v1_x * v2_y + v1_y * v2_x else: v2_x, v2_y = vec2.vector.unstack() if GLOBAL_AXIS_ORDER.is_x_first: return vec1 * math.stack([-v2_y, v2_x], channel('vector')) else: return vec1 * math.stack([v2_y, -v2_x], channel('vector')) elif spatial_rank == 3: raise NotImplementedError(f'spatial_rank={spatial_rank} not yet implemented') else: raise AssertionError(f'dims = {spatial_rank}. Vector product not available in > 3 dimensions') def normalize_to(target: Tensor, source: float or Tensor, epsilon=1e-5): target_total = math.sum_(target) denominator = math.maximum(target_total, epsilon) if epsilon is not None else target_total source_total = math.sum_(source) return target * (source_total / denominator) def l1_loss(x) -> Tensor: if isinstance(x, Tensor): return math.sum_(abs(x), x.shape.non_batch) elif isinstance(x, TensorLike): return sum([l1_loss(getattr(x, a)) for a in variable_values(x)]) else: raise ValueError(x) def l2_loss(x) -> Tensor: if isinstance(x, Tensor): if x.dtype.kind == complex: x = abs(x) return math.sum_(x ** 2, x.shape.non_batch) * 0.5 elif isinstance(x, TensorLike): return sum([l2_loss(getattr(x, a)) for a in variable_values(x)]) else: raise ValueError(x) def frequency_loss(x, frequency_falloff: float = 100, threshold=1e-5, ignore_mean=False) -> Tensor: if isinstance(x, Tensor): if ignore_mean: x -= math.mean(x, x.shape.non_batch) k_squared = vec_squared(math.fftfreq(x.shape.spatial)) weights = math.exp(-0.5 * k_squared * frequency_falloff ** 2) diff_fft = abs_square(math.fft(x) * weights) diff_fft = math.sqrt(math.maximum(diff_fft, threshold)) return l2_loss(diff_fft) elif isinstance(x, TensorLike): return sum([frequency_loss(getattr(x, a), frequency_falloff, threshold, ignore_mean) for a in variable_values(x)]) else: raise ValueError(x) def abs_square(complex_values: Tensor) -> Tensor: return math.imag(complex_values) ** 2 + math.real(complex_values) ** 2 def shift(x: Tensor, offsets: tuple, dims: tuple or None = None, padding: Extrapolation or None = extrapolation.BOUNDARY, stack_dim: Shape or None = channel('shift')) -> list: if stack_dim is None: assert len(dims) == 1 x = wrap(x) dims = dims if dims is not None else x.shape.spatial.names pad_lower = max(0, -min(offsets)) pad_upper = max(0, max(offsets)) if padding: x = math.pad(x, {axis: (pad_lower, pad_upper) for axis in dims}, mode=padding) offset_tensors = [] for offset in offsets: components = [] for dimension in dims: if padding: slices = {dim: slice(pad_lower + offset, (-pad_upper + offset) or None) if dim == dimension else slice(pad_lower, -pad_upper or None) for dim in dims} else: slices = {dim: slice(pad_lower + offset, (-pad_upper + offset) or None) if dim == dimension else slice(None, None) for dim in dims} components.append(x[slices]) offset_tensors.append(stack(components, stack_dim) if stack_dim is not None else components[0]) return offset_tensors def extrapolate_valid_values(values: Tensor, valid: Tensor, distance_cells: int = 1) -> Tuple[Tensor, Tensor]: def binarize(x): return math.divide_no_nan(x, x) distance_cells = min(distance_cells, max(values.shape.sizes)) for _ in range(distance_cells): valid = binarize(valid) valid_values = valid * values overlap = valid for dim in values.shape.spatial.names: values_l, values_r = shift(valid_values, (-1, 1), dims=dim, padding=extrapolation.ZERO) valid_values = math.sum_(values_l + values_r + valid_values, dim='shift') mask_l, mask_r = shift(overlap, (-1, 1), dims=dim, padding=extrapolation.ZERO) overlap = math.sum_(mask_l + mask_r + overlap, dim='shift') extp = math.divide_no_nan(valid_values, overlap) values = math.where(valid, values, math.where(binarize(overlap), extp, values)) valid = overlap return values, binarize(valid) def spatial_gradient(grid: Tensor, dx: float or int = 1, difference: str = 'central', padding: Extrapolation or None = extrapolation.BOUNDARY, dims: tuple or None = None, stack_dim: Shape = channel('gradient')): grid = wrap(grid) if difference.lower() == 'central': left, right = shift(grid, (-1, 1), dims, padding, stack_dim=stack_dim) return (right - left) / (dx * 2) elif difference.lower() == 'forward': left, right = shift(grid, (0, 1), dims, padding, stack_dim=stack_dim) return (right - left) / dx elif difference.lower() == 'backward': left, right = shift(grid, (-1, 0), dims, padding, stack_dim=stack_dim) return (right - left) / dx else: raise ValueError('Invalid difference type: {}. Can be CENTRAL or FORWARD'.format(difference)) def laplace(x: Tensor, dx: Tensor or float = 1, padding: Extrapolation = extrapolation.BOUNDARY, dims: tuple or None = None): if isinstance(dx, (tuple, list)): dx = wrap(dx, batch('_laplace')) elif isinstance(dx, Tensor) and dx.vector.exists: dx = math.rename_dims(dx, 'vector', batch('_laplace')) if isinstance(x, Extrapolation): return x.spatial_gradient() left, center, right = shift(wrap(x), (-1, 0, 1), dims, padding, stack_dim=batch('_laplace')) result = (left + right - 2 * center) / dx result = math.sum_(result, '_laplace') return result def fourier_laplace(grid: Tensor, dx: Tensor or Shape or float or list or tuple, times: int = 1): frequencies = math.fft(math.to_complex(grid)) k_squared = math.sum_(math.fftfreq(grid.shape) ** 2, 'vector') fft_laplace = -(2 * np.pi) ** 2 * k_squared result = math.real(math.ifft(frequencies * fft_laplace ** times)) return math.cast(result / wrap(dx) ** 2, grid.dtype) def fourier_poisson(grid: Tensor, dx: Tensor or Shape or float or list or tuple, times: int = 1): frequencies = math.fft(math.to_complex(grid)) k_squared = math.sum_(math.fftfreq(grid.shape) ** 2, 'vector') fft_laplace = -(2 * np.pi) ** 2 * k_squared result = math.real(math.ifft(math.divide_no_nan(frequencies, math.to_complex(fft_laplace ** times)))) return math.cast(result * wrap(dx) ** 2, grid.dtype) def downsample2x(grid: Tensor, padding: Extrapolation = extrapolation.BOUNDARY, dims: tuple or None = None) -> Tensor: dims = grid.shape.spatial.only(dims).names odd_dimensions = [dim for dim in dims if grid.shape.get_size(dim) % 2 != 0] grid = math.pad(grid, {dim: (0, 1) for dim in odd_dimensions}, padding) for dim in dims: grid = (grid[{dim: slice(1, None, 2)}] + grid[{dim: slice(0, None, 2)}]) / 2 return grid
MIT License
polyai-ldn/conversational-datasets
baselines/vector_based.py
VectorMappingMethod._build_mapping_graph
python
def _build_mapping_graph(self, contexts_train, contexts_dev, responses_train, responses_dev): self._session = tf.Session(graph=tf.Graph()) with self._session.graph.as_default(): def read_batch(contexts, responses, batch_size): dataset = tf.data.Dataset.from_tensor_slices( (contexts, responses)) dataset = dataset.shuffle(batch_size * 8) dataset = dataset.batch(batch_size) return dataset.make_initializable_iterator() self._train_iterator = read_batch( contexts_train, responses_train, batch_size=self._TRAIN_BATCH_SIZE) self._dev_iterator = read_batch( contexts_dev, responses_dev, batch_size=100) (contexts_batch_train, responses_batch_train) = self._train_iterator.get_next() (contexts_batch_dev, responses_batch_dev) = self._dev_iterator.get_next() self._regularizer = tf.placeholder(dtype=tf.float32, shape=None) self._create_train_op( self._compute_similarities( contexts_batch_train, responses_batch_train, is_train=True) ) dev_batch_size = tf.shape(contexts_batch_dev)[0] similarities = self._compute_similarities( contexts_batch_dev, responses_batch_dev, is_train=False) self._accuracy = tf.metrics.accuracy( labels=tf.range(dev_batch_size), predictions=tf.argmax(similarities, 1) ) encoding_dim = int(contexts_batch_train.shape[1]) self._fed_context_encodings = tf.placeholder( dtype=tf.float32, shape=[None, encoding_dim] ) self._fed_response_encodings = tf.placeholder( dtype=tf.float32, shape=[None, encoding_dim] ) self._similarities = self._compute_similarities( self._fed_context_encodings, self._fed_response_encodings ) self._local_init_op = tf.local_variables_initializer() self._reset_op = tf.global_variables_initializer() self._saver = tf.train.Saver(max_to_keep=1)
Build the graph that applies a learned mapping to the vectors.
https://github.com/polyai-ldn/conversational-datasets/blob/50f626ad0d0e825835bd054f6a58006afa95a8e5/baselines/vector_based.py#L352-L411
import abc import itertools import shutil import tempfile import glog import numpy as np import tensorflow as tf import tensorflow_hub import tensorflow_text import tf_sentencepiece from sklearn.model_selection import train_test_split from tqdm import tqdm import bert.run_classifier import bert.tokenization from baselines import method class Encoder(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod def encode_context(self, contexts): pass def encode_response(self, responses): return self.encode_context(responses) class TfHubEncoder(Encoder): def __init__(self, uri): self._session = tf.Session(graph=tf.Graph()) with self._session.graph.as_default(): glog.info("Loading %s model from tensorflow hub", uri) embed_fn = tensorflow_hub.Module(uri) self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string) self._context_embeddings = embed_fn(self._fed_texts) init_ops = ( tf.global_variables_initializer(), tf.tables_initializer()) glog.info("Initializing graph.") self._session.run(init_ops) def encode_context(self, contexts): return self._session.run( self._context_embeddings, {self._fed_texts: contexts}) class USEDualEncoder(Encoder): def __init__(self, uri): self._session = tf.Session(graph=tf.Graph()) with self._session.graph.as_default(): glog.info("Loading %s model from tensorflow hub", uri) embed_fn = tensorflow_hub.Module(uri) self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string) self._context_embeddings = embed_fn( dict(input=self._fed_texts), signature="question_encoder", as_dict=True, )['outputs'] empty_strings = tf.fill( tf.shape(self._fed_texts), "" ) self._response_embeddings = embed_fn( dict(input=self._fed_texts, context=empty_strings), signature="response_encoder", as_dict=True, )['outputs'] init_ops = ( tf.global_variables_initializer(), tf.tables_initializer()) glog.info("Initializing graph.") self._session.run(init_ops) def encode_context(self, contexts): return self._session.run( self._context_embeddings, {self._fed_texts: contexts}) def encode_response(self, responses): return self._session.run( self._response_embeddings, {self._fed_texts: responses}) class ConveRTEncoder(Encoder): def __init__(self, uri): self._session = tf.Session(graph=tf.Graph()) with self._session.graph.as_default(): glog.info("Loading %s model from tensorflow hub", uri) embed_fn = tensorflow_hub.Module(uri) self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string) self._context_embeddings = embed_fn( self._fed_texts, signature="encode_context") self._response_embeddings = embed_fn( self._fed_texts, signature="encode_response") init_ops = ( tf.global_variables_initializer(), tf.tables_initializer()) glog.info("Initializing graph.") self._session.run(init_ops) def encode_context(self, contexts): return self._session.run( self._context_embeddings, {self._fed_texts: contexts}) def encode_response(self, responses): return self._session.run( self._response_embeddings, {self._fed_texts: responses}) class BERTEncoder(Encoder): def __init__(self, uri): if not tf.test.is_gpu_available(): glog.warning( "No GPU detected, BERT will run a lot slower than with a GPU.") self._session = tf.Session(graph=tf.Graph()) with self._session.graph.as_default(): glog.info("Loading %s model from tensorflow hub", uri) embed_fn = tensorflow_hub.Module(uri, trainable=False) self._tokenizer = self._create_tokenizer_from_hub_module(uri) self._input_ids = tf.placeholder( name="input_ids", shape=[None, None], dtype=tf.int32) self._input_mask = tf.placeholder( name="input_mask", shape=[None, None], dtype=tf.int32) self._segment_ids = tf.zeros_like(self._input_ids) bert_inputs = dict( input_ids=self._input_ids, input_mask=self._input_mask, segment_ids=self._segment_ids ) embeddings = embed_fn( inputs=bert_inputs, signature="tokens", as_dict=True)[ "sequence_output" ] mask = tf.expand_dims( tf.cast(self._input_mask, dtype=tf.float32), -1) self._embeddings = tf.reduce_sum(mask * embeddings, axis=1) init_ops = ( tf.global_variables_initializer(), tf.tables_initializer()) glog.info("Initializing graph.") self._session.run(init_ops) def encode_context(self, contexts): return self._session.run(self._embeddings, self._feed_dict(contexts)) @staticmethod def _create_tokenizer_from_hub_module(uri): with tf.Graph().as_default(): bert_module = tensorflow_hub.Module(uri, trainable=False) tokenization_info = bert_module( signature="tokenization_info", as_dict=True) with tf.Session() as sess: vocab_file, do_lower_case = sess.run( [ tokenization_info["vocab_file"], tokenization_info["do_lower_case"] ]) return bert.tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) def _feed_dict(self, texts, max_seq_len=128): all_ids = [] for text in texts: tokens = ["[CLS]"] + self._tokenizer.tokenize(text) tokens = tokens[:(max_seq_len - 1)] tokens.append("[SEP]") ids = self._tokenizer.convert_tokens_to_ids(tokens) all_ids.append(ids) max_seq_len = max(map(len, all_ids)) input_ids = [] input_mask = [] for ids in all_ids: mask = [1] * len(ids) while len(ids) < max_seq_len: ids.append(0) mask.append(0) input_ids.append(ids) input_mask.append(mask) return {self._input_ids: input_ids, self._input_mask: input_mask} class VectorSimilarityMethod(method.BaselineMethod): def __init__(self, encoder): self._encoder = encoder def train(self, contexts, responses): pass def rank_responses(self, contexts, responses): contexts_matrix = self._encoder.encode_context(contexts) responses_matrix = self._encoder.encode_response(responses) responses_matrix /= np.linalg.norm( responses_matrix, axis=1, keepdims=True) similarities = np.matmul(contexts_matrix, responses_matrix.T) return np.argmax(similarities, axis=1) class VectorMappingMethod(method.BaselineMethod): def __init__( self, encoder, learning_rates=(10.0, 3.0, 1.0, 0.3, 0.01), regularizers=(0, 0.1, 0.01, 0.001), ): self._encoder = encoder self._learning_rates = learning_rates self._regularizers = regularizers def train(self, contexts, responses): glog.info( "Training on %i contexts and responses.", len(contexts)) (contexts_train, contexts_dev, responses_train, responses_dev ) = self._create_train_and_dev(contexts, responses) glog.info( "Created a training set of size %i, and a dev set of size %i.", contexts_train.shape[0], contexts_dev.shape[0]) self._build_mapping_graph( contexts_train, contexts_dev, responses_train, responses_dev ) self._grid_search() _ENCODING_BATCH_SIZE = 100 _TRAIN_BATCH_SIZE = 256 _MAX_EPOCHS = 100 def _create_train_and_dev(self, contexts, responses): glog.info("Encoding the train set.") context_encodings = [] response_encodings = [] for i in tqdm(range(0, len(contexts), self._ENCODING_BATCH_SIZE)): contexts_batch = contexts[i:i + self._ENCODING_BATCH_SIZE] responses_batch = responses[i:i + self._ENCODING_BATCH_SIZE] context_encodings.append( self._encoder.encode_context(contexts_batch)) response_encodings.append( self._encoder.encode_response(responses_batch)) context_encodings = np.concatenate( context_encodings).astype(np.float32) response_encodings = np.concatenate( response_encodings).astype(np.float32) return train_test_split( context_encodings, response_encodings, test_size=0.2)
Apache License 2.0
pmelchior/scarlet
scarlet/operator.py
find_relevant_dim
python
def find_relevant_dim(Y, Q, Vs): max_t = 0 index = -1 for i in range(len(Vs)): Y_p = proj_dist(Y, Vs[i]) Q_p = proj_dist(Q, Vs[i]) if Y_p < 0: t = -Y_p / (Q_p - Y_p) else: t = -2 if t > max_t: max_t = t index = i return index
Finds a dimension relevant to the problem by 'raycasting' from Y to Q
https://github.com/pmelchior/scarlet/blob/134fac69465c2eea46b6909c6f401e1b17cdd85b/scarlet/operator.py#L425-L439
from functools import partial import numpy as np from proxmin.operators import prox_unity_plus from . import fft from . import interpolation def sort_by_radius(shape, center=None): if center is None: cx = (shape[1] - 1) >> 1 cy = (shape[0] - 1) >> 1 else: cy, cx = int(center[0]), int(center[1]) x = np.arange(shape[1]) y = np.arange(shape[0]) X, Y = np.meshgrid(x, y) X = X - cx Y = Y - cy distance = np.sqrt(X ** 2 + Y ** 2) didx = np.argsort(distance.flatten()) return didx def _prox_weighted_monotonic(X, step, weights, didx, offsets, min_gradient=0.1): from . import operators_pybind11 operators_pybind11.prox_weighted_monotonic( X.reshape(-1), weights, offsets, didx, min_gradient ) return X def prox_weighted_monotonic(shape, neighbor_weight="flat", min_gradient=0.1, center=None): height, width = shape didx = sort_by_radius(shape, center) coords = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)] offsets = np.array([width * y + x for y, x in coords]) weights = getRadialMonotonicWeights( shape, neighbor_weight=neighbor_weight, center=center ) result = partial( _prox_weighted_monotonic, weights=weights, didx=didx[1:], offsets=offsets, min_gradient=min_gradient, ) return result def get_center(image, center, radius=1): cy, cx = int(center[0]), int(center[1]) y0 = np.max([cy - radius, 0]) x0 = np.max([cx - radius, 0]) ySlice = slice(y0, cy + radius+1) xSlice = slice(x0, cx + radius+1) subset = image[ySlice, xSlice] center = np.unravel_index(np.argmax(subset), subset.shape) return center[0]+y0, center[1]+x0 def prox_monotonic_mask(X, step, center, center_radius=1, variance=0.0, max_iter=3): from scarlet.operators_pybind11 import get_valid_monotonic_pixels, linear_interpolate_invalid_pixels if center_radius > 0: i, j = get_center(X, center, center_radius) else: i,j = int(np.round(center[0])), int(np.round(center[1])) unchecked = np.ones(X.shape, dtype=bool) unchecked[i, j] = False orphans = np.zeros(X.shape, dtype=bool) bounds = np.array([i, i, j, j], dtype=np.int32) get_valid_monotonic_pixels(i, j, X, unchecked, orphans, variance, bounds, 0) model = X.copy() it = 0 while np.sum(orphans & unchecked) > 0 and it < max_iter: it += 1 all_i, all_j = np.where(orphans) linear_interpolate_invalid_pixels(all_i, all_j, unchecked, model, orphans, variance, True, bounds) valid = ~unchecked & ~orphans model = model * valid return valid, model, bounds def prox_cone(X, step, G=None): k, n = X.shape for i in range(k): Y = X[i] Vs = [] for j in range(0, n): add = G[j] Vs.append(add) Q = find_Q(Vs, n) for j in range(n): index = find_relevant_dim(Y, Q, Vs) if index != -1: Y, Q, Vs = use_relevant_dim(Y, Q, Vs, index) else: break X[i] = Y return X def uncentered_operator(X, func, center=None, fill=None, **kwargs): if center is None: py, px = np.unravel_index(np.argmax(X), X.shape) else: py, px = center cy, cx = np.array(X.shape) // 2 if py == cy and px == cx: return func(X, **kwargs) dy = int(2 * (py - cy)) dx = int(2 * (px - cx)) if not X.shape[0] % 2: dy += 1 if not X.shape[1] % 2: dx += 1 if dx < 0: xslice = slice(None, dx) else: xslice = slice(dx, None) if dy < 0: yslice = slice(None, dy) else: yslice = slice(dy, None) if fill is not None: _X = np.ones(X.shape, X.dtype) * fill _X[yslice, xslice] = func(X[yslice, xslice], **kwargs) X[:] = _X else: X[yslice, xslice] = func(X[yslice, xslice], **kwargs) return X def prox_sdss_symmetry(X, step): Xs = np.fliplr(np.flipud(X)) X[:] = np.min([X, Xs], axis=0) return X def prox_soft_symmetry(X, step, strength=1): pads = [[0, 0], [0, 0]] slices = [slice(None), slice(None)] if X.shape[0] % 2 == 0: pads[0][1] = 1 slices[0] = slice(0, X.shape[0]) if X.shape[1] % 2 == 0: pads[1][1] = 1 slices[1] = slice(0, X.shape[1]) X = fft.fast_zero_pad(X, pads) Xs = np.fliplr(np.flipud(X)) X = 0.5 * strength * (X + Xs) + (1 - strength) * X return X[tuple(slices)] def prox_kspace_symmetry(X, step, shift=None, padding=10): fft_shape = fft._get_fft_shape(X, X, padding=padding) dy, dx = shift X = fft.Fourier(X) X_fft = X.fft(fft_shape, (0, 1)) zeroMask = X.image <= 0 shifter_y, shifter_x = interpolation.mk_shifter(fft_shape) result_fft = X_fft * np.exp(shifter_y[:, np.newaxis] * (-dy)) result_fft *= np.exp(shifter_x[np.newaxis, :] * (-dx)) result_fft = result_fft.real result_fft = result_fft * np.exp(shifter_y[:, np.newaxis] * dy) result_fft = result_fft * np.exp(shifter_x[np.newaxis, :] * dx) result = fft.Fourier.from_fft(result_fft, fft_shape, X.image.shape, [0, 1]) result.image[zeroMask] = 0 return np.real(result.image) def prox_uncentered_symmetry( X, step, center=None, algorithm="kspace", fill=None, shift=None, strength=0.5 ): if algorithm == "kspace" and (shift is None or np.all(shift == 0)): algorithm = "soft" strength = 1 if algorithm == "kspace": return uncentered_operator( X, prox_kspace_symmetry, center, shift=shift, step=step, fill=fill ) if algorithm == "sdss": return uncentered_operator(X, prox_sdss_symmetry, center, step=step, fill=fill) if algorithm == "soft" or algorithm == "kspace" and shift is None: return uncentered_operator( X, prox_soft_symmetry, center, step=step, strength=strength, fill=fill ) msg = "algorithm must be one of 'soft', 'sdss', 'kspace', recieved '{0}''" raise ValueError(msg.format(algorithm)) def proj(A, B): return A - (A * B).sum() * B / (B ** 2).sum() def proj_dist(A, B): return (A * B).sum() / (B ** 2).sum() ** 0.5 def use_relevant_dim(Y, Q, Vs, index): projector = Vs[index] del Vs[index] Y = proj(Y, projector) Q = proj(Y, projector) for i in range(len(Vs)): Vs[i] = proj(Vs[i], projector) return Y, Q, Vs
MIT License
demille/emailhooks
django_nonrel/django/contrib/admin/options.py
ModelAdmin.get_form
python
def get_form(self, request, obj=None, **kwargs): if self.declared_fieldsets: fields = flatten_fieldsets(self.declared_fieldsets) else: fields = None if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude: exclude.extend(self.form._meta.exclude) exclude = exclude or None defaults = { "form": self.form, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), } defaults.update(kwargs) return modelform_factory(self.model, **defaults)
Returns a Form class for use in the admin add view. This is used by add_view and change_view.
https://github.com/demille/emailhooks/blob/16dc3b295ac9d35a20e8d0db52760db2b7e8a822/django_nonrel/django/contrib/admin/options.py#L471-L499
import copy from functools import update_wrapper, partial import warnings from django import forms from django.conf import settings from django.forms.formsets import all_valid from django.forms.models import (modelform_factory, modelformset_factory, inlineformset_factory, BaseInlineFormSet) from django.contrib.contenttypes.models import ContentType from django.contrib.admin import widgets, helpers from django.contrib.admin.util import unquote, flatten_fieldsets, get_deleted_objects, model_format_dict from django.contrib.admin.templatetags.admin_static import static from django.contrib import messages from django.views.decorators.csrf import csrf_protect from django.core.exceptions import PermissionDenied, ValidationError from django.core.paginator import Paginator from django.core.urlresolvers import reverse from django.db import models, transaction, router from django.db.models.constants import LOOKUP_SEP from django.db.models.related import RelatedObject from django.db.models.fields import BLANK_CHOICE_DASH, FieldDoesNotExist from django.db.models.sql.constants import QUERY_TERMS from django.http import Http404, HttpResponse, HttpResponseRedirect from django.shortcuts import get_object_or_404 from django.template.response import SimpleTemplateResponse, TemplateResponse from django.utils.decorators import method_decorator from django.utils.datastructures import SortedDict from django.utils.html import escape, escapejs from django.utils.safestring import mark_safe from django.utils import six from django.utils.text import capfirst, get_text_list from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from django.utils.encoding import force_text HORIZONTAL, VERTICAL = 1, 2 get_ul_class = lambda x: 'radiolist%s' % ((x == HORIZONTAL) and ' inline' or '') class IncorrectLookupParameters(Exception): pass FORMFIELD_FOR_DBFIELD_DEFAULTS = { models.DateTimeField: { 'form_class': forms.SplitDateTimeField, 'widget': widgets.AdminSplitDateTime }, models.DateField: {'widget': widgets.AdminDateWidget}, models.TimeField: {'widget': widgets.AdminTimeWidget}, models.TextField: {'widget': widgets.AdminTextareaWidget}, models.URLField: {'widget': widgets.AdminURLFieldWidget}, models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget}, models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget}, models.CharField: {'widget': widgets.AdminTextInputWidget}, models.ImageField: {'widget': widgets.AdminFileWidget}, models.FileField: {'widget': widgets.AdminFileWidget}, } csrf_protect_m = method_decorator(csrf_protect) class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)): raw_id_fields = () fields = None exclude = None fieldsets = None form = forms.ModelForm filter_vertical = () filter_horizontal = () radio_fields = {} prepopulated_fields = {} formfield_overrides = {} readonly_fields = () ordering = None def __init__(self): overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy() overrides.update(self.formfield_overrides) self.formfield_overrides = overrides def formfield_for_dbfield(self, db_field, **kwargs): request = kwargs.pop("request", None) if db_field.choices: return self.formfield_for_choice_field(db_field, request, **kwargs) if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)): if db_field.__class__ in self.formfield_overrides: kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs) if isinstance(db_field, models.ForeignKey): formfield = self.formfield_for_foreignkey(db_field, request, **kwargs) elif isinstance(db_field, models.ManyToManyField): formfield = self.formfield_for_manytomany(db_field, request, **kwargs) if formfield and db_field.name not in self.raw_id_fields: related_modeladmin = self.admin_site._registry.get( db_field.rel.to) can_add_related = bool(related_modeladmin and related_modeladmin.has_add_permission(request)) formfield.widget = widgets.RelatedFieldWidgetWrapper( formfield.widget, db_field.rel, self.admin_site, can_add_related=can_add_related) return formfield for klass in db_field.__class__.mro(): if klass in self.formfield_overrides: kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs) return db_field.formfield(**kwargs) return db_field.formfield(**kwargs) def formfield_for_choice_field(self, db_field, request=None, **kwargs): if db_field.name in self.radio_fields: if 'widget' not in kwargs: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) if 'choices' not in kwargs: kwargs['choices'] = db_field.get_choices( include_blank=db_field.blank, blank_choice=[('', _('None'))] ) return db_field.formfield(**kwargs) def formfield_for_foreignkey(self, db_field, request=None, **kwargs): db = kwargs.get('using') if db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel, self.admin_site, using=db) elif db_field.name in self.radio_fields: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) kwargs['empty_label'] = db_field.blank and _('None') or None return db_field.formfield(**kwargs) def formfield_for_manytomany(self, db_field, request=None, **kwargs): if not db_field.rel.through._meta.auto_created: return None db = kwargs.get('using') if db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel, self.admin_site, using=db) kwargs['help_text'] = '' elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)): kwargs['widget'] = widgets.FilteredSelectMultiple(db_field.verbose_name, (db_field.name in self.filter_vertical)) return db_field.formfield(**kwargs) def _declared_fieldsets(self): if self.fieldsets: return self.fieldsets elif self.fields: return [(None, {'fields': self.fields})] return None declared_fieldsets = property(_declared_fieldsets) def get_ordering(self, request): return self.ordering or () def get_readonly_fields(self, request, obj=None): return self.readonly_fields def get_prepopulated_fields(self, request, obj=None): return self.prepopulated_fields def queryset(self, request): qs = self.model._default_manager.get_query_set() ordering = self.get_ordering(request) if ordering: qs = qs.order_by(*ordering) return qs def lookup_allowed(self, lookup, value): model = self.model for l in model._meta.related_fkey_lookups: for k, v in widgets.url_params_from_lookup_dict(l).items(): if k == lookup and v == value: return True parts = lookup.split(LOOKUP_SEP) if len(parts) > 1 and parts[-1] in QUERY_TERMS: parts.pop() rel_name = None for part in parts[:-1]: try: field, _, _, _ = model._meta.get_field_by_name(part) except FieldDoesNotExist: return True if hasattr(field, 'rel'): model = field.rel.to rel_name = field.rel.get_related_field().name elif isinstance(field, RelatedObject): model = field.model rel_name = model._meta.pk.name else: rel_name = None if rel_name and len(parts) > 1 and parts[-1] == rel_name: parts.pop() if len(parts) == 1: return True clean_lookup = LOOKUP_SEP.join(parts) return clean_lookup in self.list_filter or clean_lookup == self.date_hierarchy def to_field_allowed(self, request, to_field): opts = self.model._meta try: field = opts.get_field(to_field) except FieldDoesNotExist: return False if opts.many_to_many and field.primary_key: return True registered_models = set() for model, admin in self.admin_site._registry.items(): registered_models.add(model) for inline in admin.inlines: registered_models.add(inline.model) for related_object in (opts.get_all_related_objects(include_hidden=True) + opts.get_all_related_many_to_many_objects()): related_model = related_object.model if (any(issubclass(model, related_model) for model in registered_models) and related_object.field.rel.get_related_field() == field): return True return False def has_add_permission(self, request): opts = self.opts return request.user.has_perm(opts.app_label + '.' + opts.get_add_permission()) def has_change_permission(self, request, obj=None): opts = self.opts return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission()) def has_delete_permission(self, request, obj=None): opts = self.opts return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission()) class ModelAdmin(BaseModelAdmin): list_display = ('__str__',) list_display_links = () list_filter = () list_select_related = False list_per_page = 100 list_max_show_all = 200 list_editable = () search_fields = () date_hierarchy = None save_as = False save_on_top = False paginator = Paginator inlines = [] add_form_template = None change_form_template = None change_list_template = None delete_confirmation_template = None delete_selected_confirmation_template = None object_history_template = None actions = [] action_form = helpers.ActionForm actions_on_top = True actions_on_bottom = False actions_selection_counter = True def __init__(self, model, admin_site): self.model = model self.opts = model._meta self.admin_site = admin_site super(ModelAdmin, self).__init__() def get_inline_instances(self, request, obj=None): inline_instances = [] for inline_class in self.inlines: inline = inline_class(self.model, self.admin_site) if request: if not (inline.has_add_permission(request) or inline.has_change_permission(request, obj) or inline.has_delete_permission(request, obj)): continue if not inline.has_add_permission(request): inline.max_num = 0 inline_instances.append(inline) return inline_instances def get_urls(self): from django.conf.urls import patterns, url def wrap(view): def wrapper(*args, **kwargs): return self.admin_site.admin_view(view)(*args, **kwargs) return update_wrapper(wrapper, view) info = self.model._meta.app_label, self.model._meta.module_name urlpatterns = patterns('', url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info), url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info), url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info), url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info), url(r'^(.+)/$', wrap(self.change_view), name='%s_%s_change' % info), ) return urlpatterns def urls(self): return self.get_urls() urls = property(urls) @property def media(self): extra = '' if settings.DEBUG else '.min' js = [ 'core.js', 'admin/RelatedObjectLookups.js', 'jquery%s.js' % extra, 'jquery.init.js' ] if self.actions is not None: js.append('actions%s.js' % extra) if self.prepopulated_fields: js.extend(['urlify.js', 'prepopulate%s.js' % extra]) if self.opts.get_ordered_objects(): js.extend(['getElementsBySelector.js', 'dom-drag.js' , 'admin/ordering.js']) return forms.Media(js=[static('admin/js/%s' % url) for url in js]) def get_model_perms(self, request): return { 'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request), } def get_fieldsets(self, request, obj=None): if self.declared_fieldsets: return self.declared_fieldsets form = self.get_form(request, obj) fields = list(form.base_fields) + list(self.get_readonly_fields(request, obj)) return [(None, {'fields': fields})]
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/tests/util/test_async.py
RunThreadsafeTests.add_coroutine
python
def add_coroutine(self, a, b, fail, invalid, cancel): yield from asyncio.sleep(0.05, loop=self.loop) if cancel: asyncio.tasks.Task.current_task(self.loop).cancel() yield return self.add_callback(a, b, fail, invalid)
Wait 0.05 second and return a + b.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/tests/util/test_async.py#L143-L149
import asyncio from unittest.mock import MagicMock, patch from unittest import TestCase import pytest from homeassistant.util import async_ as hasync @patch('asyncio.coroutines.iscoroutine') @patch('concurrent.futures.Future') @patch('threading.get_ident') def test_run_coroutine_threadsafe_from_inside_event_loop( mock_ident, _, mock_iscoroutine): coro = MagicMock() loop = MagicMock() loop._thread_ident = None mock_ident.return_value = 5 mock_iscoroutine.return_value = True hasync.run_coroutine_threadsafe(coro, loop) assert len(loop.call_soon_threadsafe.mock_calls) == 1 loop._thread_ident = 5 mock_ident.return_value = 5 mock_iscoroutine.return_value = True with pytest.raises(RuntimeError): hasync.run_coroutine_threadsafe(coro, loop) assert len(loop.call_soon_threadsafe.mock_calls) == 1 loop._thread_ident = 1 mock_ident.return_value = 5 mock_iscoroutine.return_value = False with pytest.raises(TypeError): hasync.run_coroutine_threadsafe(coro, loop) assert len(loop.call_soon_threadsafe.mock_calls) == 1 loop._thread_ident = 1 mock_ident.return_value = 5 mock_iscoroutine.return_value = True hasync.run_coroutine_threadsafe(coro, loop) assert len(loop.call_soon_threadsafe.mock_calls) == 2 @patch('asyncio.coroutines.iscoroutine') @patch('concurrent.futures.Future') @patch('threading.get_ident') def test_fire_coroutine_threadsafe_from_inside_event_loop( mock_ident, _, mock_iscoroutine): coro = MagicMock() loop = MagicMock() loop._thread_ident = None mock_ident.return_value = 5 mock_iscoroutine.return_value = True hasync.fire_coroutine_threadsafe(coro, loop) assert len(loop.call_soon_threadsafe.mock_calls) == 1 loop._thread_ident = 5 mock_ident.return_value = 5 mock_iscoroutine.return_value = True with pytest.raises(RuntimeError): hasync.fire_coroutine_threadsafe(coro, loop) assert len(loop.call_soon_threadsafe.mock_calls) == 1 loop._thread_ident = 1 mock_ident.return_value = 5 mock_iscoroutine.return_value = False with pytest.raises(TypeError): hasync.fire_coroutine_threadsafe(coro, loop) assert len(loop.call_soon_threadsafe.mock_calls) == 1 loop._thread_ident = 1 mock_ident.return_value = 5 mock_iscoroutine.return_value = True hasync.fire_coroutine_threadsafe(coro, loop) assert len(loop.call_soon_threadsafe.mock_calls) == 2 @patch('concurrent.futures.Future') @patch('threading.get_ident') def test_run_callback_threadsafe_from_inside_event_loop(mock_ident, _): callback = MagicMock() loop = MagicMock() loop._thread_ident = None mock_ident.return_value = 5 hasync.run_callback_threadsafe(loop, callback) assert len(loop.call_soon_threadsafe.mock_calls) == 1 loop._thread_ident = 5 mock_ident.return_value = 5 with pytest.raises(RuntimeError): hasync.run_callback_threadsafe(loop, callback) assert len(loop.call_soon_threadsafe.mock_calls) == 1 loop._thread_ident = 1 mock_ident.return_value = 5 hasync.run_callback_threadsafe(loop, callback) assert len(loop.call_soon_threadsafe.mock_calls) == 2 class RunThreadsafeTests(TestCase): def setUp(self): self.loop = asyncio.new_event_loop() def tearDown(self): executor = self.loop._default_executor if executor is not None: executor.shutdown(wait=True) self.loop.close() @staticmethod def run_briefly(loop): @asyncio.coroutine def once(): pass gen = once() t = loop.create_task(gen) try: loop.run_until_complete(t) finally: gen.close() def add_callback(self, a, b, fail, invalid): if fail: raise RuntimeError("Fail!") if invalid: raise ValueError("Invalid!") return a + b @asyncio.coroutine
MIT License
windelbouwman/ppci
ppci/graph/cfg.py
ControlFlowGraph._calculate_post_dominator_info
python
def _calculate_post_dominator_info(self): self.validate() self._pdom = calculate_post_dominators(self.nodes, self.exit_node) self._spdom = {} for node in self.nodes: self._spdom[node] = self._pdom[node] - {node} self._ipdom = calculate_immediate_post_dominators( self.nodes, self._pdom, self._spdom )
Calculate the post dominator sets iteratively. Post domination is the same as domination, but then starting at the exit node.
https://github.com/windelbouwman/ppci/blob/915c069e0667042c085ec42c78e9e3c9a5295324/ppci/graph/cfg.py#L264-L281
import logging from .digraph import DiGraph, DiNode from . import lt from .algorithm.fixed_point_dominator import calculate_post_dominators from .algorithm.fixed_point_dominator import ( calculate_immediate_post_dominators, ) from collections import namedtuple class DomTreeNode: __slots__ = ("node", "children", "interval") def __init__(self, node, children, interval): self.node = node self.children = children self.interval = interval def below_or_same(self, other): return ( other.interval[0] <= self.interval[0] and self.interval[1] <= other.interval[1] ) def below(self, other): return ( other.interval[0] < self.interval[0] and self.interval[1] < other.interval[1] ) Loop = namedtuple("Loop", ["header", "rest"]) logger = logging.getLogger("cfg") def ir_function_to_graph(ir_function): block_map = {} cfg = ControlFlowGraph() cfg.exit_node = ControlFlowNode(cfg, name=None) block_list = [] worklist = [ir_function.entry] while worklist: block = worklist.pop(0) block_list.append(block) node = ControlFlowNode(cfg, name=block.name) assert block not in block_map block_map[block] = node for successor_block in block.successors: if successor_block not in block_map: if successor_block not in worklist: worklist.append(successor_block) cfg.entry_node = block_map[ir_function.entry] for block in block_list: node = block_map[block] if len(block.successors) == 0: node.add_edge(cfg.exit_node) else: for successor_block in block.successors: successor_node = block_map[successor_block] node.add_edge(successor_node) if len(block.successors) == 2: node.yes = block_map[block.last_instruction.lab_yes] node.no = block_map[block.last_instruction.lab_no] logger.debug( "created cfg for %s with %s nodes", ir_function.name, len(cfg) ) return cfg, block_map class ControlFlowGraph(DiGraph): def __init__(self): super().__init__() self.entry_node = None self.exit_node = None self._idom = None self._pdom = None self._spdom = None self._ipdom = None self._reach = None self.root_tree = None def validate(self): assert self.entry_node assert self.exit_node def dominates(self, one, other): if self._idom is None: self._calculate_dominator_info() return self.tree_map[other].below_or_same(self.tree_map[one]) def strictly_dominates(self, one, other): if self._idom is None: self._calculate_dominator_info() return self.tree_map[other].below(self.tree_map[one]) def post_dominates(self, one, other): if self._pdom is None: self._calculate_post_dominator_info() return one in self._pdom[other] def get_immediate_dominator(self, node): if self._idom is None: self._calculate_dominator_info() return self._idom.get(node, None) def get_immediate_post_dominator(self, node): if self._ipdom is None: self._calculate_post_dominator_info() return self._ipdom[node] def can_reach(self, one, other): if self._reach is None: self.calculate_reach() return other in self._reach[one] def _calculate_dominator_info(self): self.validate() self._idom = lt.calculate_idom(self, self.entry_node) self._calculate_dominator_tree() def _legacy_dom_sets(self): self._dom = {} for parent, t in pre_order(self.root_tree): if parent: self._dom[t.node] = {t.node} | self._dom[parent.node] else: self._dom[t.node] = {t.node} logger.debug("Ugh %s, %s", t.node, len(self._dom[t.node])) logger.debug("calculate sdom") self._sdom = {} for node in self.nodes: if node not in self._dom: self._dom[node] = {node} self._sdom[node] = set() else: self._sdom[node] = self._dom[node] - {node} logger.debug("calculate sdom --> DONE") def _calculate_dominator_tree(self): self.tree_map = {} for node in self.nodes: self.tree_map[node] = DomTreeNode(node, list(), None) for node in self.nodes: idom_node = self.get_immediate_dominator(node) if idom_node: parent = self.tree_map[idom_node] node = self.tree_map[node] parent.children.append(node) self.root_tree = self.tree_map[self.entry_node] self._number_dominator_tree() def _number_dominator_tree(self): t = 0 worklist = [self.root_tree] discovered = {} while worklist: node = worklist[-1] if node.node in discovered: node.interval = (discovered[node.node], t) worklist.pop() else: discovered[node.node] = t for child in node.children: worklist.append(child) t += 1
BSD 2-Clause Simplified License
mraardvark/pyupdi
updi/physical.py
UpdiPhysical.receive
python
def receive(self, size): response = bytearray() timeout = 1 while size and timeout: character = self.ser.read() if character: response.append(ord(character)) size -= 1 else: timeout -= 1 self._loginfo("receive", response) return response
Receives a frame of a known number of chars from UPDI
https://github.com/mraardvark/pyupdi/blob/46cbed546a87947d7bf42f4b182db573175b031e/updi/physical.py#L83-L104
import logging import time import serial import updi.constants as constants class UpdiPhysical(object): def __init__(self, port, baud=115200): self.logger = logging.getLogger("phy") self.port = port self.baud = baud self.ser = None self.initialise_serial(self.port, self.baud) self.send([constants.UPDI_BREAK]) def initialise_serial(self, port, baud): self.logger.info("Opening {} at {} baud".format(port, baud)) self.ser = serial.Serial(port, baud, parity=serial.PARITY_EVEN, timeout=1, stopbits=serial.STOPBITS_TWO) def _loginfo(self, msg, data): if data and isinstance(data[0], str): i_data = [ord(x) for x in data] else: i_data = data data_str = "[" + ", ".join([hex(x) for x in i_data]) + "]" self.logger.info(msg + ' : ' + data_str) def send_double_break(self): self.logger.info("Sending double break") self.ser.close() temporary_serial = serial.Serial(self.port, 300, stopbits=serial.STOPBITS_ONE, timeout=1) temporary_serial.write([constants.UPDI_BREAK, constants.UPDI_BREAK]) temporary_serial.read(2) temporary_serial.close() self.initialise_serial(self.port, self.baud) def send(self, command): self.logger.info("send %d bytes", len(command)) self._loginfo("data: ", command) self.ser.write(command) echo = self.ser.read(len(command))
MIT License
sondren1288/simple-maths
Python/norms.py
l1_norm
python
def l1_norm(lst): return sum([abs(x) for x in lst])
Calculates the l1 norm of a list of numbers
https://github.com/sondren1288/simple-maths/blob/2f4efdb909195341fb6a1dc233d4a12137542b65/Python/norms.py#L1-L5
MIT License
opensourcesec/cirtkit
integrations/carbonblack/cbcmd.py
CblrCli._file_path_fixup
python
def _file_path_fixup(self, path): if (self._is_path_absolute(path)): return path elif (self._is_path_drive_relative(path)): return self.cwd[:2] + path else: return ntpath.join(self.cwd + '\\', path)
We have a pseudo-cwd that we use to base off all commands. This means we need to figure out if a given path is relative, absolute, or file relative and calculate against the pseudo cwd. This function takes in a given file path arguemnt and performs the fixups.
https://github.com/opensourcesec/cirtkit/blob/58b8793ada69320ffdbdd4ecdc04a3bb2fa83c37/integrations/carbonblack/cbcmd.py#L322-L340
from lib.common.out import * from lib.core.storage import store_sample, get_sample_path from lib.core.session import __sessions__ from lib.core.investigation import __project__ from lib.core.database import Database import os import cmd import requests import threading import simplejson import ntpath import time import subprocess import traceback from re import match from optparse import OptionParser class HttpException(Exception): pass class QuitException(Exception): pass class CmdSensorError(Exception): pass class CmdSensorWindowsError(Exception): def __init__(self, hr): self.hr = hr def __str__(self): return "HRESULT: 0x%x\n" % self.hr class CliArgsException(Exception): pass class CliHelpException(Exception): pass class CliAttachError(Exception): pass class CliCommandCanceled(Exception): pass def split_cli(line): parts = line.split(' ') final = [] inQuotes = False while len(parts) > 0: tok = parts.pop(0) if (tok[:1] == '"'): tok = tok[1:] next = parts.pop(0) while(next[-1:] != '"' and len(parts) > 0): tok += ' ' + next next = parts.pop(0) if (next[-1:] == '"'): tok += ' ' + next[:-1] final.append(tok) return final API_CMD_TO_CLI_CMD = { 'create process' : 'exec', 'create directory' : 'mkdir', 'delete file': 'del', 'put file': 'put', 'directory list' : 'dir', 'get file' : 'get', 'kill' : 'kill', 'process list' : 'ps', 'reg create key' : 'reg', 'reg enum key' : 'reg', 'reg query key' : 'reg', 'reg query value' : 'reg', 'reg delete value' : 'reg', 'reg delete key' : 'reg', 'reg set value' : 'reg', 'memdump' : 'memdump'} class CliArgs (OptionParser): def __init__(self, usage=''): OptionParser.__init__(self, add_help_option=False, usage=usage) self.add_option('-h', '--help', action='store_true', help='Display this help message.') def parse_line(self, line): args = split_cli(line) return self.parse_args(args) def parse_args(self, args, values=None): (opts, args) = OptionParser.parse_args(self, args=args, values=values) if (opts.help): self.print_help() raise CliHelpException() return (opts, args) def error(self, msg): raise CliArgsException(msg) class CblrCli(cmd.Cmd): def __init__(self, url, token, httpLog=None, verbose=False): cmd.Cmd.__init__(self) self.token = token self.url = url self.verbose = verbose if httpLog is None: self.logfile = open('integrations/carbonblack/http_cblr.log', "a+") else: self.logfile = None self.session = None self.cwd = None self.keepaliveThread = None self.keepaliveEvent = None self.keepaliveSec = 0 self.stale_sessions = [] sess = self._session_list(hideStale=False) for s in sess: if s['status'] == 'close' or s['status'] == 'timeout': self.stale_sessions.append(s['id']) def emptyline(self): pass def precmd(self, line): print "" return line def cmdloop(self, intro=None): while True: try: cmd.Cmd.cmdloop(self, intro) except CliHelpException: pass except CliCommandCanceled: pass except CliAttachError as e: print "You must attach to a session" continue except CliArgsException as e: print "Error parsing arguments!\n %s" % e continue except HttpException as e: print "Got an HTTP exception: %s" % e continue except CmdSensorWindowsError as e: print "Command Failed: %s" % e continue except Exception as e: print "Error: %s" % e import traceback traceback.print_exc() return def _keepaliveThread(self): self.keepaliveSec = 0 while(not self.keepaliveEvent.is_set()): self.keepaliveEvent.wait(1) self.keepaliveSec += 1 if (self.keepaliveSec >= 60): self.keepaliveSec = 0 try: url = '%s/api/v1/cblr/session/%d/keepalive' % (self.url, self.session) self._doGet(url) except HttpException as e: pass def _needs_attached(self): if (self.session is None): raise CliAttachError() def _loghttp(self, msg): if self.logfile: self.logfile.write(msg + '\n') self.logfile.flush() if self.verbose: print msg def _quit(self): raise KeyboardInterrupt def _doGet(self, url, params=None, retJSON=True): self._loghttp("-------------------------") self._loghttp("GET (url: %s)\n" % url) headers = {'X-Auth-Token': self.token} result = requests.get(url, headers=headers, params=params, verify=False, timeout=120) if result.status_code != 200: raise HttpException("Error processing HTTP get (%s) - %s" % (url, (result.content))) if (retJSON): ret = simplejson.loads(result.content) else: ret = result.content self._loghttp("%r" % ret) self._loghttp("^^^^^^^^^^^^^^^^^^^^^^^^^") return ret def _doPut(self, url, data_dict): self._loghttp("-------------------------") self._loghttp("PUT (url: %s) " % url) self._loghttp("Data: %r\n" % data_dict) headers = {'X-Auth-Token': self.token} result = requests.put(url, data=simplejson.dumps(data_dict), headers=headers, verify=False, timeout=120) if result.status_code != 200: raise HttpException("Error processing HTTP post (%s) - %s" % (url, (result.content))) ret = simplejson.loads(result.content) self._loghttp("%r" % ret) self._loghttp("^^^^^^^^^^^^^^^^^^^^^^^^^") return ret def _doPost(self, url, json_data=None, files=None): data = None headers = {'X-Auth-Token': self.token} self._loghttp("-------------------------") self._loghttp("POST (url: %s) " % url) if (json_data): self._loghttp("Data: %r\n" % json_data) data = simplejson.dumps(json_data) if (files): self._loghttp("Files: %r" % files) result = requests.post(url, data=data, headers=headers, files=files, verify=False, timeout=120) if result.status_code != 200: raise HttpException("Error processing HTTP post (%s) - %s" % (url, (result.content))) ret = simplejson.loads(result.content) self._loghttp("%r" % ret) self._loghttp("^^^^^^^^^^^^^^^^^^^^^^^^^") return ret def _is_path_absolute(self, path): if path.startswith('\\\\'): return True if (path[0].isalpha() and path[1:3] == ':\\'): return True return False def _is_path_drive_relative(self, path): if path == '\\': return True if path[0] == '\\' and path[1] != '\\': return True return False
MIT License
awemulya/kobo-predict
onadata/libs/serializers/password_reset_serializer.py
PasswordReset.save
python
def save(self, subject_template_name='registration/password_reset_subject.txt', email_template_name='api_password_reset_email.html', token_generator=default_token_generator, from_email=None): email = self.email reset_url = self.reset_url active_users = User.objects.filter(email__iexact=email, is_active=True) for user in active_users: if not user.has_usable_password(): continue subject, email = get_password_reset_email( user, reset_url, subject_template_name, email_template_name) send_mail(subject, email, from_email, [user.email])
Generates a one-use only link for resetting password and sends to the user.
https://github.com/awemulya/kobo-predict/blob/f302d084e30fb637d43ec638c701e01a3dddc721/onadata/libs/serializers/password_reset_serializer.py#L71-L91
from django.contrib.auth.models import User from django.contrib.auth.tokens import default_token_generator from django.core.mail import send_mail from django.core.validators import ValidationError from django.template import loader from django.utils.encoding import force_bytes from django.utils.http import urlsafe_base64_decode from django.utils.http import urlsafe_base64_encode from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from urlparse import urlparse def get_password_reset_email(user, reset_url, subject_template_name='registration/password_reset_subject.txt', email_template_name='api_password_reset_email.html', token_generator=default_token_generator): result = urlparse(reset_url) site_name = domain = result.hostname c = { 'email': user.email, 'domain': domain, 'path': result.path, 'site_name': site_name, 'uid': urlsafe_base64_encode(force_bytes(user.pk)), 'username': user.username, 'encoded_username': urlsafe_base64_encode(user.username), 'token': token_generator.make_token(user), 'protocol': result.scheme if result.scheme != '' else 'http', } subject = loader.render_to_string(subject_template_name, c) subject = ''.join(subject.splitlines()) email = loader.render_to_string(email_template_name, c) return subject, email def get_user_from_uid(uid): if uid is None: raise ValidationError(_("uid is required!")) try: uid = urlsafe_base64_decode(uid) user = User.objects.get(pk=uid) except (TypeError, ValueError, OverflowError, User.DoesNotExist): raise ValidationError(_(u"Invalid uid %s") % uid) return user class PasswordResetChange(object): def __init__(self, uid, new_password, token): self.uid = uid self.new_password = new_password self.token = token def save(self): user = get_user_from_uid(self.uid) if user: user.set_password(self.new_password) user.save() class PasswordReset(object): def __init__(self, email, reset_url): self.email = email self.reset_url = reset_url
BSD 2-Clause Simplified License
awslabs/dgl-ke
python/dglke/dataloader/sampler.py
WikiEvalSampler.reset
python
def reset(self): self.cnt = 0 return self
Reset the sampler
https://github.com/awslabs/dgl-ke/blob/30558e069c42038cded08bddd26ac75f153aae75/python/dglke/dataloader/sampler.py#L644-L648
import math import numpy as np import scipy as sp import dgl.backend as F import dgl import os import sys import pickle import time from dgl.base import NID, EID def SoftRelationPartition(edges, n, has_importance=False, threshold=0.05): if has_importance: heads, rels, tails, e_impts = edges else: heads, rels, tails = edges print('relation partition {} edges into {} parts'.format(len(heads), n)) uniq, cnts = np.unique(rels, return_counts=True) idx = np.flip(np.argsort(cnts)) cnts = cnts[idx] uniq = uniq[idx] assert cnts[0] > cnts[-1] edge_cnts = np.zeros(shape=(n,), dtype=np.int64) rel_cnts = np.zeros(shape=(n,), dtype=np.int64) rel_dict = {} rel_parts = [] cross_rel_part = [] for _ in range(n): rel_parts.append([]) large_threshold = int(len(rels) * threshold) capacity_per_partition = int(len(rels) / n) large_threshold = capacity_per_partition if capacity_per_partition < large_threshold else large_threshold num_cross_part = 0 for i in range(len(cnts)): cnt = cnts[i] r = uniq[i] r_parts = [] if cnt > large_threshold: avg_part_cnt = (cnt // n) + 1 num_cross_part += 1 for j in range(n): part_cnt = avg_part_cnt if cnt > avg_part_cnt else cnt r_parts.append([j, part_cnt]) rel_parts[j].append(r) edge_cnts[j] += part_cnt rel_cnts[j] += 1 cnt -= part_cnt cross_rel_part.append(r) else: idx = np.argmin(edge_cnts) r_parts.append([idx, cnt]) rel_parts[idx].append(r) edge_cnts[idx] += cnt rel_cnts[idx] += 1 rel_dict[r] = r_parts for i, edge_cnt in enumerate(edge_cnts): print('part {} has {} edges and {} relations'.format(i, edge_cnt, rel_cnts[i])) print('{}/{} duplicated relation across partitions'.format(num_cross_part, len(cnts))) parts = [] for i in range(n): parts.append([]) rel_parts[i] = np.array(rel_parts[i]) for i, r in enumerate(rels): r_part = rel_dict[r][0] part_idx = r_part[0] cnt = r_part[1] parts[part_idx].append(i) cnt -= 1 if cnt == 0: rel_dict[r].pop(0) else: rel_dict[r][0][1] = cnt for i, part in enumerate(parts): parts[i] = np.array(part, dtype=np.int64) shuffle_idx = np.concatenate(parts) heads[:] = heads[shuffle_idx] rels[:] = rels[shuffle_idx] tails[:] = tails[shuffle_idx] if has_importance: e_impts[:] = e_impts[shuffle_idx] off = 0 for i, part in enumerate(parts): parts[i] = np.arange(off, off + len(part)) off += len(part) cross_rel_part = np.array(cross_rel_part) return parts, rel_parts, num_cross_part > 0, cross_rel_part def BalancedRelationPartition(edges, n, has_importance=False): if has_importance: heads, rels, tails, e_impts = edges else: heads, rels, tails = edges print('relation partition {} edges into {} parts'.format(len(heads), n)) uniq, cnts = np.unique(rels, return_counts=True) idx = np.flip(np.argsort(cnts)) cnts = cnts[idx] uniq = uniq[idx] assert cnts[0] > cnts[-1] edge_cnts = np.zeros(shape=(n,), dtype=np.int64) rel_cnts = np.zeros(shape=(n,), dtype=np.int64) rel_dict = {} rel_parts = [] for _ in range(n): rel_parts.append([]) max_edges = (len(rels) // n) + 1 num_cross_part = 0 for i in range(len(cnts)): cnt = cnts[i] r = uniq[i] r_parts = [] while cnt > 0: idx = np.argmin(edge_cnts) if edge_cnts[idx] + cnt <= max_edges: r_parts.append([idx, cnt]) rel_parts[idx].append(r) edge_cnts[idx] += cnt rel_cnts[idx] += 1 cnt = 0 else: cur_cnt = max_edges - edge_cnts[idx] r_parts.append([idx, cur_cnt]) rel_parts[idx].append(r) edge_cnts[idx] += cur_cnt rel_cnts[idx] += 1 num_cross_part += 1 cnt -= cur_cnt rel_dict[r] = r_parts for i, edge_cnt in enumerate(edge_cnts): print('part {} has {} edges and {} relations'.format(i, edge_cnt, rel_cnts[i])) print('{}/{} duplicated relation across partitions'.format(num_cross_part, len(cnts))) parts = [] for i in range(n): parts.append([]) rel_parts[i] = np.array(rel_parts[i]) for i, r in enumerate(rels): r_part = rel_dict[r][0] part_idx = r_part[0] cnt = r_part[1] parts[part_idx].append(i) cnt -= 1 if cnt == 0: rel_dict[r].pop(0) else: rel_dict[r][0][1] = cnt for i, part in enumerate(parts): parts[i] = np.array(part, dtype=np.int64) shuffle_idx = np.concatenate(parts) heads[:] = heads[shuffle_idx] rels[:] = rels[shuffle_idx] tails[:] = tails[shuffle_idx] if has_importance: e_impts[:] = e_impts[shuffle_idx] off = 0 for i, part in enumerate(parts): parts[i] = np.arange(off, off + len(part)) off += len(part) return parts, rel_parts, num_cross_part > 0 def RandomPartition(edges, n, has_importance=False): if has_importance: heads, rels, tails, e_impts = edges else: heads, rels, tails = edges print('random partition {} edges into {} parts'.format(len(heads), n)) idx = np.random.permutation(len(heads)) heads[:] = heads[idx] rels[:] = rels[idx] tails[:] = tails[idx] if has_importance: e_impts[:] = e_impts[idx] part_size = int(math.ceil(len(idx) / n)) parts = [] for i in range(n): start = part_size * i end = min(part_size * (i + 1), len(idx)) parts.append(idx[start:end]) print('part {} has {} edges'.format(i, len(parts[-1]))) return parts def ConstructGraph(dataset, args): src = [dataset.train[0]] etype_id = [dataset.train[1]] dst = [dataset.train[2]] num_train = len(dataset.train[0]) if args.dataset == "wikikg90M": valid_dict = dataset.valid num_valid = len(valid_dict['h,r->t']['hr']) elif hasattr(dataset, 'valid') and dataset.valid is not None: src.append(dataset.valid[0]) etype_id.append(dataset.valid[1]) dst.append(dataset.valid[2]) num_valid = len(dataset.valid[0]) else: num_valid = 0 if args.dataset == "wikikg90M": test_dict = dataset.test num_test = len(test_dict['h,r->t']['hr']) elif hasattr(dataset, 'test') and dataset.test is not None: src.append(dataset.test[0]) etype_id.append(dataset.test[1]) dst.append(dataset.test[2]) num_test = len(dataset.test[0]) else: num_test = 0 if args.dataset == "wikikg90M": print('|valid|:', num_valid) print('|test|:', num_test) return src = np.concatenate(src) etype_id = np.concatenate(etype_id) dst = np.concatenate(dst) n_entities = dataset.n_entities coo = sp.sparse.coo_matrix((np.ones(len(src)), (src, dst)), shape=[n_entities, n_entities]) g = dgl.DGLGraph(coo, readonly=True, multigraph=True, sort_csr=True) g.edata['tid'] = F.tensor(etype_id, F.int64) if args.has_edge_importance: e_impts = F.tensor(dataset.train[3], F.float32) e_impts_vt = F.zeros((num_valid + num_test,), dtype=F.float32, ctx=F.cpu()) g.edata['impts'] = F.cat([e_impts, e_impts_vt], dim=0) return g class TrainDataset(object): def __init__(self, g, dataset, args, ranks=64, has_importance=False): triples = dataset.train num_train = len(triples[0]) print('|Train|:', num_train) if ranks > 1 and args.rel_part: self.edge_parts, self.rel_parts, self.cross_part, self.cross_rels = SoftRelationPartition(triples, ranks, has_importance=has_importance) elif ranks > 1: self.edge_parts = RandomPartition(triples, ranks, has_importance=has_importance) self.cross_part = True else: self.edge_parts = [np.arange(num_train)] self.rel_parts = [np.arange(dataset.n_relations)] self.cross_part = False self.g = g def create_sampler(self, batch_size, neg_sample_size=2, neg_chunk_size=None, mode='head', num_workers=32, shuffle=True, exclude_positive=False, rank=0): EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler') assert batch_size % neg_sample_size == 0, 'batch_size should be divisible by B' return EdgeSampler(self.g, seed_edges=F.tensor(self.edge_parts[rank]), batch_size=batch_size, neg_sample_size=int(neg_sample_size/neg_chunk_size), chunk_size=neg_chunk_size, negative_mode=mode, num_workers=num_workers, shuffle=shuffle, exclude_positive=exclude_positive, return_false_neg=False) class ChunkNegEdgeSubgraph(dgl.DGLGraph): def __init__(self, subg, num_chunks, chunk_size, neg_sample_size, neg_head): super(ChunkNegEdgeSubgraph, self).__init__(graph_data=subg.sgi.graph, readonly=True, parent=subg._parent) self.ndata[NID] = subg.sgi.induced_nodes.tousertensor() self.edata[EID] = subg.sgi.induced_edges.tousertensor() self.subg = subg self.num_chunks = num_chunks self.chunk_size = chunk_size self.neg_sample_size = neg_sample_size self.neg_head = neg_head @property def head_nid(self): return self.subg.head_nid @property def tail_nid(self): return self.subg.tail_nid def create_neg_subgraph(pos_g, neg_g, chunk_size, neg_sample_size, is_chunked, neg_head, num_nodes): assert neg_g.number_of_edges() % pos_g.number_of_edges() == 0 if (neg_head and len(neg_g.head_nid) == num_nodes) or (not neg_head and len(neg_g.tail_nid) == num_nodes): num_chunks = 1 chunk_size = pos_g.number_of_edges() elif is_chunked: if pos_g.number_of_edges() < chunk_size and neg_g.number_of_edges() % neg_sample_size == 0: num_chunks = 1 chunk_size = pos_g.number_of_edges() elif pos_g.number_of_edges() % chunk_size > 0: return None else: num_chunks = int(pos_g.number_of_edges() / chunk_size) assert num_chunks * chunk_size == pos_g.number_of_edges() else: num_chunks = pos_g.number_of_edges() chunk_size = 1 return ChunkNegEdgeSubgraph(neg_g, num_chunks, chunk_size, neg_sample_size, neg_head) class EvalSampler(object): def __init__(self, g, edges, batch_size, neg_sample_size, neg_chunk_size, mode, num_workers=32, filter_false_neg=True): EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler') self.sampler = EdgeSampler(g, batch_size=batch_size, seed_edges=edges, neg_sample_size=neg_sample_size, chunk_size=neg_chunk_size, negative_mode=mode, num_workers=num_workers, shuffle=False, exclude_positive=False, relations=g.edata['tid'], return_false_neg=filter_false_neg) self.sampler_iter = iter(self.sampler) self.mode = mode self.neg_head = 'head' in mode self.g = g self.filter_false_neg = filter_false_neg self.neg_chunk_size = neg_chunk_size self.neg_sample_size = neg_sample_size def __iter__(self): return self def __next__(self): while True: pos_g, neg_g = next(self.sampler_iter) if self.filter_false_neg: neg_positive = neg_g.edata['false_neg'] neg_g = create_neg_subgraph(pos_g, neg_g, self.neg_chunk_size, self.neg_sample_size, 'chunk' in self.mode, self.neg_head, self.g.number_of_nodes()) if neg_g is not None: break pos_g.ndata['id'] = pos_g.parent_nid neg_g.ndata['id'] = neg_g.parent_nid pos_g.edata['id'] = pos_g._parent.edata['tid'][pos_g.parent_eid] if self.filter_false_neg: neg_g.edata['bias'] = F.astype(-neg_positive, F.float32) return pos_g, neg_g def reset(self): self.sampler_iter = iter(self.sampler) return self class WikiEvalSampler(object): def __init__(self, edges, batch_size, mode): self.edges = edges self.batch_size = batch_size self.mode = mode self.neg_head = 'head' in mode self.cnt = 0 self.mode = 'h,r->t' self.num_edges = len(self.edges['h,r->t']['hr']) def __iter__(self): return self def __next__(self): if self.cnt == self.num_edges: raise StopIteration beg = self.cnt if self.cnt + self.batch_size > self.num_edges: self.cnt = self.num_edges else: self.cnt += self.batch_size return F.tensor(self.edges['h,r->t']['hr'][beg:self.cnt], F.int64), F.tensor(self.edges['h,r->t']['t_correct_index'][beg:self.cnt], F.int64), F.tensor(self.edges['h,r->t']['t_candidate'][beg:self.cnt], F.int64)
Apache License 2.0
plaid/plaid-python
plaid/model/signal_evaluate_response.py
SignalEvaluateResponse.openapi_types
python
def openapi_types(): lazy_import() return { 'request_id': (str,), 'scores': (SignalScores,), 'core_attributes': (SignalEvaluateCoreAttributes,), }
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
https://github.com/plaid/plaid-python/blob/950d04d621a5f5b92a7705cc30d14d4004db8543/plaid/model/signal_evaluate_response.py#L76-L90
import re import sys from plaid.model_utils import ( ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) def lazy_import(): from plaid.model.signal_evaluate_core_attributes import SignalEvaluateCoreAttributes from plaid.model.signal_scores import SignalScores globals()['SignalEvaluateCoreAttributes'] = SignalEvaluateCoreAttributes globals()['SignalScores'] = SignalScores class SignalEvaluateResponse(ModelNormal): allowed_values = { } validations = { } @cached_property def additional_properties_type(): lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) _nullable = False @cached_property
MIT License
dragonfly/dragonfly
dragonfly/exd/cp_domain_utils.py
_unpack_vectorised_domain
python
def _unpack_vectorised_domain(x, dim_ordering): ret = [None] * len(dim_ordering) counter = 0 for idx, num_dims in enumerate(dim_ordering): if num_dims == '': ret[idx] = x[counter] counter += 1 else: ret[idx] = x[counter:counter+num_dims] counter += num_dims assert counter == len(x) return ret
Unpacks a vectorised domain.
https://github.com/dragonfly/dragonfly/blob/a579b5eadf452e23b07d4caf27b402703b0012b7/dragonfly/exd/cp_domain_utils.py#L306-L318
from __future__ import print_function from argparse import Namespace from copy import deepcopy import numpy as np from warnings import warn from . import domains from ..parse.config_parser import config_parser from ..utils.general_utils import flatten_list_of_objects_and_iterables, get_original_order_from_reordered_list, transpose_list_of_lists from ..utils.oper_utils import random_sample_from_euclidean_domain, random_sample_from_discrete_euclidean_domain, random_sample_from_integral_domain, random_sample_from_prod_discrete_domain def _process_fidel_to_opt(raw_fidel_to_opt, fidel_space, fidel_space_orderings, config_file): if raw_fidel_to_opt is None: fidel_to_opt = None warn('fidel_to_opt is None for %s.'%(config_file)) else: try: fidel_to_opt = get_processed_point_from_raw_point(raw_fidel_to_opt, fidel_space, fidel_space_orderings.index_ordering, fidel_space_orderings.dim_ordering) assert fidel_space.is_a_member(fidel_to_opt) except: raise ValueError('fidel_to_opt: %s (raw: %s) is not a member of fidel_space %s.'%( fidel_to_opt, raw_fidel_to_opt, fidel_space)) return raw_fidel_to_opt, fidel_to_opt def _preprocess_domain_parameters(domain_parameters, var_prefix='var_'): if domain_parameters is None: return domain_parameters for idx, var_dict in enumerate(domain_parameters): if not 'name' in var_dict.keys(): var_dict['name'] = '%s%02d'%(var_prefix, idx) if not 'dim' in var_dict.keys(): var_dict['dim'] = '' if not 'kernel' in var_dict.keys(): var_dict['kernel'] = '' if var_dict['type'] in ['float', 'int']: if not ('min' in var_dict.keys() and 'max' in var_dict.keys()): if not 'bounds' in var_dict.keys(): raise ValueError('Specify bounds or min and max for Euclidean and Integral ' + 'variables: %s.'%(var_dict)) else: var_dict['min'] = var_dict['bounds'][0] var_dict['max'] = var_dict['bounds'][1] if var_dict['type'] == 'discrete_numeric': if 'items' not in var_dict.keys(): raise ValueError('Specify items for discrete_numeric variables.') if isinstance(var_dict['items'], str): if ':' not in var_dict['items']: _items = [float(x) for x in var_dict['items'].split('-')] else: _range = [float(x) for x in var_dict['items'].split(':')] _items = list(np.arange(_range[0], _range[2], _range[1])) var_dict['items'] = _items if var_dict['type'] == 'discrete_euclidean' and var_dict['dim'] != '': raise ValueError('dim parameter for Discrete Euclidean vectors should be an empty' + ' string or not specified. Given %s.'%(var_dict['dim'])) return domain_parameters def _preprocess_domain_constraints(domain_constraints, constraint_prefix): if domain_constraints is None: return domain_constraints for idx, var_dict in enumerate(domain_constraints): if not 'name' in var_dict.keys(): var_dict['name'] = '%s%02d'%(constraint_prefix, idx) return domain_constraints def _preprocess_config_params(config_params): config_params = deepcopy(config_params) if not 'name' in config_params: if 'exp_info' in config_params and 'name' in config_params['exp_info']: config_params['name'] = config_params['exp_info']['name'] else: config_params['name'] = 'no_name' config_params['domain'] = _preprocess_domain_parameters(config_params['domain'], var_prefix='domvar_') if 'domain_constraints' in config_params: config_params['domain_constraints'] = _preprocess_domain_constraints( config_params['domain_constraints'], 'domconstraint_') if 'fidel_space' in config_params: config_params['fidel_space'] = _preprocess_domain_parameters( config_params['fidel_space'], var_prefix='fidelvar_') if 'fidel_space_constraints' in config_params: config_params['fidel_space_constraints'] = _preprocess_domain_constraints( config_params['fidel_space_constraints'], 'fidelconstraint_') return config_params def load_config_file(config_file, *args, **kwargs): parsed_result = config_parser(config_file) return load_config(parsed_result, config_file, *args, **kwargs) def load_config(config_params, config_file=None, *args, **kwargs): config_params = _preprocess_config_params(config_params) domain_params = config_params['domain'] domain_constraints = None if not ('domain_constraints' in config_params.keys()) else config_params['domain_constraints'] domain_info = Namespace(config_file=config_file) domain, domain_orderings = load_domain_from_params(domain_params, domain_constraints=domain_constraints, domain_info=domain_info, *args, **kwargs) config = Namespace(name=config_params['name'], domain=domain, domain_orderings=domain_orderings) if 'fidel_space' in config_params.keys(): fidel_space_params = config_params['fidel_space'] fidel_space_constraints = None if not ('fidel_space_constraints' in config_params.keys()) else config_params['fidel_space_constraints'] fidel_space_info = Namespace(config_file=config_file) fidel_space, fidel_space_orderings = load_domain_from_params( fidel_space_params, domain_constraints=fidel_space_constraints, domain_info=fidel_space_info, *args, **kwargs) if len(fidel_space.list_of_domains) > 0: config.fidel_space = fidel_space config.fidel_space_orderings = fidel_space_orderings config.raw_fidel_to_opt, config.fidel_to_opt = _process_fidel_to_opt( config_params['fidel_to_opt'], fidel_space, fidel_space_orderings, config_file) return config def load_cp_domain_from_config_file(config_file, *args, **kwargs): parsed_result = config_parser(config_file) domain_params = parsed_result['domain'] domain_constraints = None if not ('domain_constraints' in parsed_result.keys()) else parsed_result['domain_constraints'] domain_info = Namespace(config_file=config_file) return load_domain_from_params(domain_params, domain_constraints=domain_constraints, domain_info=domain_info, *args, **kwargs) def load_domain_from_params(domain_params, general_euclidean_kernel='', general_integral_kernel='', general_discrete_kernel='', general_discrete_numeric_kernel='', domain_constraints=None, domain_info=None): list_of_domains = [] general_euclidean_bounds = [] general_euclidean_idxs = [] general_integral_bounds = [] general_integral_idxs = [] general_discrete_items_list = [] general_discrete_idxs = [] general_discrete_numeric_items_list = [] general_discrete_numeric_idxs = [] raw_name_ordering = [] index_ordering = [] for idx, param in enumerate(domain_params): raw_name_ordering.append(param['name']) if param['type'] in ['float', 'int']: bound_dim = 1 if param['dim'] == '' else param['dim'] curr_bounds = [[param['min'], param['max']]] * bound_dim elif param['type'] in ['discrete', 'discrete_numeric', 'boolean', 'discrete_euclidean']: items_dim = 1 if param['dim'] == '' else param['dim'] if param['type'] == 'boolean': param_items = [0, 1] else: param_items = param['items'] curr_items = [param_items[:] for _ in range(items_dim)] if param['type'] == 'float': if param['kernel'] == '': general_euclidean_bounds.extend(curr_bounds) general_euclidean_idxs.append(idx) else: list_of_domains.append(domains.EuclideanDomain(curr_bounds)) index_ordering.append(idx) elif param['type'] == 'int': if param['kernel'] == '': general_integral_bounds.extend(curr_bounds) general_integral_idxs.append(idx) else: list_of_domains.append(domains.IntegralDomain(curr_bounds)) index_ordering.append(idx) elif param['type'] in ['boolean', 'discrete']: if param['kernel'] == '': general_discrete_items_list.extend(curr_items) general_discrete_idxs.append(idx) else: list_of_domains.append(domains.ProdDiscreteDomain(curr_items)) index_ordering.append(idx) elif param['type'] == 'discrete_numeric': if param['kernel'] == '': general_discrete_numeric_items_list.extend(curr_items) general_discrete_numeric_idxs.append(idx) else: list_of_domains.append(domains.ProdDiscreteNumericDomain(curr_items)) index_ordering.append(idx) elif param['type'] == 'discrete_euclidean': list_of_domains.append(domains.DiscreteEuclideanDomain(param_items)) index_ordering.append(idx) elif param['type'].startswith(('nn', 'cnn', 'mlp')): from ..nn.nn_domains import get_nn_domain_from_constraints list_of_domains.append(get_nn_domain_from_constraints(param['type'], param)) index_ordering.append(idx) else: raise ValueError('Unknown domain type: %s.'%(param['type'])) kernel_ordering = [domain_params[idx]['kernel'] for idx in index_ordering] name_ordering = [domain_params[idx]['name'] for idx in index_ordering] dim_ordering = [domain_params[idx]['dim'] for idx in index_ordering] if len(general_euclidean_bounds) > 0: list_of_domains.append(domains.EuclideanDomain(general_euclidean_bounds)) general_euclidean_names = [domain_params[idx]['name'] for idx in general_euclidean_idxs] general_euclidean_dims = [domain_params[idx]['dim'] for idx in general_euclidean_idxs] name_ordering.append(general_euclidean_names) dim_ordering.append(general_euclidean_dims) index_ordering.append(general_euclidean_idxs) kernel_ordering.append(general_euclidean_kernel) if len(general_integral_bounds) > 0: list_of_domains.append(domains.IntegralDomain(general_integral_bounds)) general_integral_names = [domain_params[idx]['name'] for idx in general_integral_idxs] general_integral_dims = [domain_params[idx]['dim'] for idx in general_integral_idxs] name_ordering.append(general_integral_names) dim_ordering.append(general_integral_dims) index_ordering.append(general_integral_idxs) kernel_ordering.append(general_integral_kernel) if len(general_discrete_items_list) > 0: list_of_domains.append(domains.ProdDiscreteDomain(general_discrete_items_list)) general_discrete_names = [domain_params[idx]['name'] for idx in general_discrete_idxs] general_discrete_dims = [domain_params[idx]['dim'] for idx in general_discrete_idxs] name_ordering.append(general_discrete_names) dim_ordering.append(general_discrete_dims) index_ordering.append(general_discrete_idxs) kernel_ordering.append(general_discrete_kernel) if len(general_discrete_numeric_items_list) > 0: list_of_domains.append( domains.ProdDiscreteNumericDomain(general_discrete_numeric_items_list)) general_discrete_numeric_names = [domain_params[idx]['name'] for idx in general_discrete_numeric_idxs] general_discrete_numeric_dims = [domain_params[idx]['dim'] for idx in general_discrete_numeric_idxs] name_ordering.append(general_discrete_numeric_names) dim_ordering.append(general_discrete_numeric_dims) index_ordering.append(general_discrete_numeric_idxs) kernel_ordering.append(general_discrete_numeric_kernel) orderings = Namespace(index_ordering=index_ordering, kernel_ordering=kernel_ordering, dim_ordering=dim_ordering, name_ordering=name_ordering, raw_name_ordering=raw_name_ordering) if domain_info is None: domain_info = Namespace() domain_info.config_orderings = orderings if domain_constraints is not None: domain_info.constraints = domain_constraints cp_domain = domains.CartesianProductDomain(list_of_domains, domain_info) return cp_domain, orderings def get_num_raw_domains(ordering): num_raw_domains = len(ordering) for elem in ordering: if hasattr(elem, '__iter__'): num_raw_domains += len(elem) - 1 return num_raw_domains
MIT License
exiahuang/salesforcexytools
xlsxwriter/sharedstrings.py
SharedStringTable._get_strings
python
def _get_strings(self): return self.string_array
Return the sorted string list.
https://github.com/exiahuang/salesforcexytools/blob/dde689292bc991c1357ec7479a8e291cb537e8ff/xlsxwriter/sharedstrings.py#L151-L153
import re from . import xmlwriter class SharedStrings(xmlwriter.XMLwriter): def __init__(self): super(SharedStrings, self).__init__() self.string_table = None def _assemble_xml_file(self): self._xml_declaration() self._write_sst() self._write_sst_strings() self._xml_end_tag('sst') self._xml_close() def _write_sst(self): xmlns = 'http://schemas.openxmlformats.org/spreadsheetml/2006/main' attributes = [ ('xmlns', xmlns), ('count', self.string_table.count), ('uniqueCount', self.string_table.unique_count), ] self._xml_start_tag('sst', attributes) def _write_sst_strings(self): for string in (self.string_table._get_strings()): self._write_si(string) def _write_si(self, string): attributes = [] string = re.sub('(_x[0-9a-fA-F]{4}_)', r'_x005F\1', string) string = re.sub(r'([\x00-\x08\x0B-\x1F])', lambda match: "_x%04X_" % ord(match.group(1)), string) if re.search('^\s', string) or re.search('\s$', string): attributes.append(('xml:space', 'preserve')) if re.search('^<r>', string) and re.search('</r>$', string): self._xml_rich_si_element(string) else: self._xml_si_element(string, attributes) class SharedStringTable(object): def __init__(self): self.count = 0 self.unique_count = 0 self.string_table = {} self.string_array = [] def _get_shared_string_index(self, string): if string not in self.string_table: index = self.unique_count self.string_table[string] = index self.count += 1 self.unique_count += 1 return index else: index = self.string_table[string] self.count += 1 return index def _get_shared_string(self, index): return self.string_array[index] def _sort_string_data(self): self.string_array = sorted(self.string_table, key=self.string_table.__getitem__) self.string_table = {}
Apache License 2.0
jeeftor/alfredtoday
src/lib/oauth2client/contrib/appengine.py
OAuth2Decorator._create_flow
python
def _create_flow(self, request_handler): if self.flow is None: redirect_uri = request_handler.request.relative_url( self._callback_path) self.flow = OAuth2WebServerFlow( self._client_id, self._client_secret, self._scope, redirect_uri=redirect_uri, user_agent=self._user_agent, auth_uri=self._auth_uri, token_uri=self._token_uri, revoke_uri=self._revoke_uri, **self._kwargs)
Create the Flow object. The Flow is calculated lazily since we don't know where this app is running until it receives a request, at which point redirect_uri can be calculated and then the Flow object can be constructed. Args: request_handler: webapp.RequestHandler, the request handler.
https://github.com/jeeftor/alfredtoday/blob/f6e2c2228caa71015e654e1fdbf552e2ca4f90ad/src/lib/oauth2client/contrib/appengine.py#L676-L693
import cgi import json import logging import os import pickle import threading import httplib2 import webapp2 as webapp from google.appengine.api import app_identity from google.appengine.api import memcache from google.appengine.api import users from google.appengine.ext import db from google.appengine.ext.webapp.util import login_required from oauth2client import GOOGLE_AUTH_URI from oauth2client import GOOGLE_REVOKE_URI from oauth2client import GOOGLE_TOKEN_URI from oauth2client import clientsecrets from oauth2client import util from oauth2client.client import AccessTokenRefreshError from oauth2client.client import AssertionCredentials from oauth2client.client import Credentials from oauth2client.client import Flow from oauth2client.client import OAuth2WebServerFlow from oauth2client.client import Storage from oauth2client.contrib import xsrfutil try: from oauth2client.contrib import _appengine_ndb except ImportError: _appengine_ndb = None __author__ = 'jcgregorio@google.com (Joe Gregorio)' logger = logging.getLogger(__name__) OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns' XSRF_MEMCACHE_ID = 'xsrf_secret_key' if _appengine_ndb is None: CredentialsNDBModel = None CredentialsNDBProperty = None FlowNDBProperty = None _NDB_KEY = None _NDB_MODEL = None SiteXsrfSecretKeyNDB = None else: CredentialsNDBModel = _appengine_ndb.CredentialsNDBModel CredentialsNDBProperty = _appengine_ndb.CredentialsNDBProperty FlowNDBProperty = _appengine_ndb.FlowNDBProperty _NDB_KEY = _appengine_ndb.NDB_KEY _NDB_MODEL = _appengine_ndb.NDB_MODEL SiteXsrfSecretKeyNDB = _appengine_ndb.SiteXsrfSecretKeyNDB def _safe_html(s): return cgi.escape(s, quote=1).replace("'", '&#39;') class InvalidClientSecretsError(Exception): class InvalidXsrfTokenError(Exception): class SiteXsrfSecretKey(db.Model): secret = db.StringProperty() def _generate_new_xsrf_secret_key(): return os.urandom(16).encode("hex") def xsrf_secret_key(): secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE) if not secret: model = SiteXsrfSecretKey.get_or_insert(key_name='site') if not model.secret: model.secret = _generate_new_xsrf_secret_key() model.put() secret = model.secret memcache.add(XSRF_MEMCACHE_ID, secret, namespace=OAUTH2CLIENT_NAMESPACE) return str(secret) class AppAssertionCredentials(AssertionCredentials): @util.positional(2) def __init__(self, scope, **kwargs): self.scope = util.scopes_to_string(scope) self._kwargs = kwargs self.service_account_id = kwargs.get('service_account_id', None) self._service_account_email = None super(AppAssertionCredentials, self).__init__(None) @classmethod def from_json(cls, json_data): data = json.loads(json_data) return AppAssertionCredentials(data['scope']) def _refresh(self, http_request): try: scopes = self.scope.split() (token, _) = app_identity.get_access_token( scopes, service_account_id=self.service_account_id) except app_identity.Error as e: raise AccessTokenRefreshError(str(e)) self.access_token = token @property def serialization_data(self): raise NotImplementedError('Cannot serialize credentials ' 'for Google App Engine.') def create_scoped_required(self): return not self.scope def create_scoped(self, scopes): return AppAssertionCredentials(scopes, **self._kwargs) def sign_blob(self, blob): return app_identity.sign_blob(blob) @property def service_account_email(self): if self._service_account_email is None: self._service_account_email = ( app_identity.get_service_account_name()) return self._service_account_email class FlowProperty(db.Property): data_type = Flow def get_value_for_datastore(self, model_instance): flow = super(FlowProperty, self).get_value_for_datastore( model_instance) return db.Blob(pickle.dumps(flow)) def make_value_from_datastore(self, value): if value is None: return None return pickle.loads(value) def validate(self, value): if value is not None and not isinstance(value, Flow): raise db.BadValueError('Property %s must be convertible ' 'to a FlowThreeLegged instance (%s)' % (self.name, value)) return super(FlowProperty, self).validate(value) def empty(self, value): return not value class CredentialsProperty(db.Property): data_type = Credentials def get_value_for_datastore(self, model_instance): logger.info("get: Got type " + str(type(model_instance))) cred = super(CredentialsProperty, self).get_value_for_datastore( model_instance) if cred is None: cred = '' else: cred = cred.to_json() return db.Blob(cred) def make_value_from_datastore(self, value): logger.info("make: Got type " + str(type(value))) if value is None: return None if len(value) == 0: return None try: credentials = Credentials.new_from_json(value) except ValueError: credentials = None return credentials def validate(self, value): value = super(CredentialsProperty, self).validate(value) logger.info("validate: Got type " + str(type(value))) if value is not None and not isinstance(value, Credentials): raise db.BadValueError('Property %s must be convertible ' 'to a Credentials instance (%s)' % (self.name, value)) return value class StorageByKeyName(Storage): @util.positional(4) def __init__(self, model, key_name, property_name, cache=None, user=None): super(StorageByKeyName, self).__init__() if key_name is None: if user is None: raise ValueError('StorageByKeyName called with no ' 'key name or user.') key_name = user.user_id() self._model = model self._key_name = key_name self._property_name = property_name self._cache = cache def _is_ndb(self): if isinstance(self._model, type): if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL): return True elif issubclass(self._model, db.Model): return False raise TypeError('Model class not an NDB or DB model: %s.' % (self._model,)) def _get_entity(self): if self._is_ndb(): return self._model.get_by_id(self._key_name) else: return self._model.get_by_key_name(self._key_name) def _delete_entity(self): if self._is_ndb(): _NDB_KEY(self._model, self._key_name).delete() else: entity_key = db.Key.from_path(self._model.kind(), self._key_name) db.delete(entity_key) @db.non_transactional(allow_existing=True) def locked_get(self): credentials = None if self._cache: json = self._cache.get(self._key_name) if json: credentials = Credentials.new_from_json(json) if credentials is None: entity = self._get_entity() if entity is not None: credentials = getattr(entity, self._property_name) if self._cache: self._cache.set(self._key_name, credentials.to_json()) if credentials and hasattr(credentials, 'set_store'): credentials.set_store(self) return credentials @db.non_transactional(allow_existing=True) def locked_put(self, credentials): entity = self._model.get_or_insert(self._key_name) setattr(entity, self._property_name, credentials) entity.put() if self._cache: self._cache.set(self._key_name, credentials.to_json()) @db.non_transactional(allow_existing=True) def locked_delete(self): if self._cache: self._cache.delete(self._key_name) self._delete_entity() class CredentialsModel(db.Model): credentials = CredentialsProperty() def _build_state_value(request_handler, user): uri = request_handler.request.url token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(), action_id=str(uri)) return uri + ':' + token def _parse_state_value(state, user): uri, token = state.rsplit(':', 1) if not xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(), action_id=uri): raise InvalidXsrfTokenError() return uri class OAuth2Decorator(object): def set_credentials(self, credentials): self._tls.credentials = credentials def get_credentials(self): return getattr(self._tls, 'credentials', None) credentials = property(get_credentials, set_credentials) def set_flow(self, flow): self._tls.flow = flow def get_flow(self): return getattr(self._tls, 'flow', None) flow = property(get_flow, set_flow) @util.positional(4) def __init__(self, client_id, client_secret, scope, auth_uri=GOOGLE_AUTH_URI, token_uri=GOOGLE_TOKEN_URI, revoke_uri=GOOGLE_REVOKE_URI, user_agent=None, message=None, callback_path='/oauth2callback', token_response_param=None, _storage_class=StorageByKeyName, _credentials_class=CredentialsModel, _credentials_property_name='credentials', **kwargs): self._tls = threading.local() self.flow = None self.credentials = None self._client_id = client_id self._client_secret = client_secret self._scope = util.scopes_to_string(scope) self._auth_uri = auth_uri self._token_uri = token_uri self._revoke_uri = revoke_uri self._user_agent = user_agent self._kwargs = kwargs self._message = message self._in_error = False self._callback_path = callback_path self._token_response_param = token_response_param self._storage_class = _storage_class self._credentials_class = _credentials_class self._credentials_property_name = _credentials_property_name def _display_error_message(self, request_handler): request_handler.response.out.write('<html><body>') request_handler.response.out.write(_safe_html(self._message)) request_handler.response.out.write('</body></html>') def oauth_required(self, method): def check_oauth(request_handler, *args, **kwargs): if self._in_error: self._display_error_message(request_handler) return user = users.get_current_user() if not user: request_handler.redirect(users.create_login_url( request_handler.request.uri)) return self._create_flow(request_handler) self.flow.params['state'] = _build_state_value( request_handler, user) self.credentials = self._storage_class( self._credentials_class, None, self._credentials_property_name, user=user).get() if not self.has_credentials(): return request_handler.redirect(self.authorize_url()) try: resp = method(request_handler, *args, **kwargs) except AccessTokenRefreshError: return request_handler.redirect(self.authorize_url()) finally: self.credentials = None return resp return check_oauth
MIT License
flatironinstitute/inferelator
inferelator/workflow.py
WorkflowBaseLoader.set_expression_file
python
def set_expression_file(self, tsv=None, hdf5=None, h5ad=None, tenx_path=None, mtx=None, mtx_barcode=None, mtx_feature=None, h5_layer=None): nones = [tsv is None, hdf5 is None, h5ad is None, tenx_path is None, mtx is None] if all(nones): Debug.vprint("No file provided", level=0) elif sum(nones) != (len(nones) - 1): raise ValueError("Only one type of input expression file can be set") if tsv is not None: self._set_file_name("expression_matrix_file", tsv) self._expression_loader = _TSV elif hdf5 is not None: self._set_file_name("expression_matrix_file", hdf5) self._expression_loader = _HDF5 self._h5_layer = h5_layer elif h5ad is not None: self._set_file_name("expression_matrix_file", h5ad) self._expression_loader = _H5AD self._h5_layer = h5_layer elif mtx is not None: self._check_file_exists(mtx) self._check_file_exists(mtx_barcode) self._check_file_exists(mtx_feature) self.expression_matrix_file = (mtx, mtx_barcode, mtx_feature) self._expression_loader = _MTX elif tenx_path is not None: self.expression_matrix_file = tenx_path self._expression_loader = _TENX
Set the type of expression data file. Current loaders include TSV, hdf5, h5ad (AnnData), and MTX sparse files. Only one of these loaders can be used; passing arguments for multiple loaders will raise a ValueError. :param tsv: A path to a TSV (or tsv.gz) file which can be loaded by pandas.read_csv() :type tsv: str, optional :param hdf5: A path to a hdf5 file which can be loaded by pandas.HDFStore :type hdf5: str, optional :param h5ad: A path to an AnnData hd5 file :type h5ad: str, optional :param tenx_path: A path to the folder containing the 10x mtx, barcode, and feature files :type tenx_path: Path, optional :param mtx: A path to an mtx file :type mtx: str, optional :param mtx_barcode: A path to a list of observation names (i.e. barcodes, etc) for the mtx file :type mtx_barcode: str, optional :param mtx_feature: A path to a list of gene names for the mtx file :type mtx_feature: str, optional :param h5_layer: The layer (in an AnnData h5) or the store key (in an hdf5) file to use. Defaults to using the first key. :type h5_layer: str, optional
https://github.com/flatironinstitute/inferelator/blob/dd532f428132cfac98c9c7c161632dab2a4e9ea9/inferelator/workflow.py#L182-L233
from __future__ import unicode_literals, print_function import datetime import inspect import os import warnings import copy import numpy as np import pandas as pd from inferelator.utils import (Debug, InferelatorDataLoader, DEFAULT_PANDAS_TSV_SETTINGS, slurm_envs, is_string, DotProduct) from inferelator.distributed.inferelator_mp import MPControl from inferelator.preprocessing import ManagePriors, make_data_noisy from inferelator.regression.base_regression import _RegressionWorkflowMixin from inferelator.postprocessing import ResultsProcessor, InferelatorResults SBATCH_VARS_FOR_WORKFLOW = ["output_dir", "input_dir"] _TENX = "tenx" _TSV = "tsv" _H5AD = "h5ad" _HDF5 = "hdf5" _MTX = "mtx" class WorkflowBaseLoader(object): input_dir = None output_dir = None expression_matrix_file = None tf_names_file = None meta_data_file = None priors_file = None gold_standard_file = None _expression_loader = _TSV _h5_layer = None metadata_handler = "branching" gene_names_file = None gene_metadata_file = None gene_list_index = None tf_names = None gene_names = None priors_data = None gold_standard = None data = None design = None response = None expression_matrix_columns_are_genes = True extract_metadata_from_expression_matrix = False expression_matrix_metadata = None use_no_prior = False use_no_gold_standard = False _file_format_settings = None @property def _num_obs(self): if self.response is not None: return self.response.num_obs elif self.data is not None: return self.data.num_obs else: return None @property def _num_tfs(self): if self.design is not None: return self.design.num_genes elif self.tf_names is not None: return len(self.tf_names) else: return None @property def _num_genes(self): if self.response is not None: return self.response.num_genes elif self.data is not None: return self.data.num_genes else: return None @property def _gene_names(self): if self.data is not None: return self.data.gene_names else: return None def __init__(self): if self._file_format_settings is None: self._file_format_settings = dict() def set_file_paths(self, input_dir=None, output_dir=None, expression_matrix_file=None, tf_names_file=None, meta_data_file=None, priors_file=None, gold_standard_file=None, gene_metadata_file=None, gene_names_file=None): self._set_with_warning("input_dir", input_dir) self._set_with_warning("output_dir", output_dir) self._set_file_name("expression_matrix_file", expression_matrix_file) self._set_file_name("tf_names_file", tf_names_file) self._set_file_name("meta_data_file", meta_data_file) self._set_file_name("priors_file", priors_file) self._set_file_name("gold_standard_file", gold_standard_file) self._set_file_name("gene_metadata_file", gene_metadata_file) self._set_file_name("gene_names_file", gene_names_file) if expression_matrix_file is not None: self._expression_loader = _TSV if ".tsv" not in expression_matrix_file: msg = "`set_file_paths` assumes data is in a TSV. Use `set_expression_file` for other formats" warnings.warn(msg)
BSD 2-Clause Simplified License
databand-ai/dbnd
modules/dbnd/src/dbnd/_vendor/dulwich/pack.py
UnpackedObject._obj
python
def _obj(self): if self.pack_type_num in DELTA_TYPES: return (self.delta_base, self.decomp_chunks) else: return self.decomp_chunks
Return the decompressed chunks, or (delta base, delta chunks).
https://github.com/databand-ai/dbnd/blob/ec0076f9a142b20e2f7afd886ed1a18683c553ec/modules/dbnd/src/dbnd/_vendor/dulwich/pack.py#L167-L172
from collections import defaultdict import binascii from io import BytesIO, UnsupportedOperation from collections import deque import difflib import struct from itertools import chain try: from itertools import imap, izip except ImportError: imap = map izip = zip import os import sys from hashlib import sha1 from os import ( SEEK_CUR, SEEK_END, ) from struct import unpack_from import zlib try: import mmap except ImportError: has_mmap = False else: has_mmap = True if sys.platform == "Plan9": has_mmap = False from dbnd._vendor.dulwich.errors import ( ApplyDeltaError, ChecksumMismatch, ) from dbnd._vendor.dulwich.file import GitFile from dbnd._vendor.dulwich.lru_cache import LRUSizeCache from dbnd._vendor.dulwich.objects import ( ShaFile, hex_to_sha, sha_to_hex, object_header, ) OFS_DELTA = 6 REF_DELTA = 7 DELTA_TYPES = (OFS_DELTA, REF_DELTA) DEFAULT_PACK_DELTA_WINDOW_SIZE = 10 def take_msb_bytes(read, crc32=None): ret = [] while len(ret) == 0 or ret[-1] & 0x80: b = read(1) if crc32 is not None: crc32 = binascii.crc32(b, crc32) ret.append(ord(b[:1])) return ret, crc32 class UnpackedObject(object): __slots__ = [ "offset", "_sha", "obj_type_num", "obj_chunks", "pack_type_num", "delta_base", "comp_chunks", "decomp_chunks", "decomp_len", "crc32", ] def __init__(self, pack_type_num, delta_base, decomp_len, crc32): self.offset = None self._sha = None self.pack_type_num = pack_type_num self.delta_base = delta_base self.comp_chunks = None self.decomp_chunks = [] self.decomp_len = decomp_len self.crc32 = crc32 if pack_type_num in DELTA_TYPES: self.obj_type_num = None self.obj_chunks = None else: self.obj_type_num = pack_type_num self.obj_chunks = self.decomp_chunks self.delta_base = delta_base def sha(self): if self._sha is None: self._sha = obj_sha(self.obj_type_num, self.obj_chunks) return self._sha def sha_file(self): return ShaFile.from_raw_chunks(self.obj_type_num, self.obj_chunks)
Apache License 2.0
dmlc/dgl
python/dgl/nn/tensorflow/conv/densechebconv.py
DenseChebConv.call
python
def call(self, adj, feat, lambda_max=None): A = adj num_nodes = A.shape[0] in_degree = 1 / tf.sqrt(tf.clip_by_value(tf.reduce_sum(A, 1), clip_value_min=1, clip_value_max=np.inf)) D_invsqrt = tf.linalg.diag(in_degree) I = tf.eye(num_nodes) L = I - D_invsqrt @ A @ D_invsqrt if lambda_max is None: lambda_ = tf.linalg.eig(L)[0][:, 0] lambda_max = tf.reduce_max(lambda_) L_hat = 2 * L / lambda_max - I Z = [tf.eye(num_nodes)] for i in range(1, self._k): if i == 1: Z.append(L_hat) else: Z.append(2 * L_hat @ Z[-1] - Z[-2]) Zs = tf.stack(Z, 0) Zh = (Zs @ tf.expand_dims(feat, axis=0) @ self.W) Zh = tf.reduce_sum(Zh, 0) if self.bias is not None: Zh = Zh + self.bias return Zh
r""" Description ----------- Compute (Dense) Chebyshev Spectral Graph Convolution layer. Parameters ---------- adj : tf.Tensor The adjacency matrix of the graph to apply Graph Convolution on, should be of shape :math:`(N, N)`, where a row represents the destination and a column represents the source. feat : tf.Tensor The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}` is size of input feature, :math:`N` is the number of nodes. lambda_max : float or None, optional A float value indicates the largest eigenvalue of given graph. Default: None. Returns ------- tf.Tensor The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}` is size of output feature.
https://github.com/dmlc/dgl/blob/8341244a2dac850bd0c1153c7641c3b8a2bbfc30/python/dgl/nn/tensorflow/conv/densechebconv.py#L59-L113
import tensorflow as tf from tensorflow.keras import layers import numpy as np class DenseChebConv(layers.Layer): def __init__(self, in_feats, out_feats, k, bias=True): super(DenseChebConv, self).__init__() self._in_feats = in_feats self._out_feats = out_feats self._k = k xinit = tf.keras.initializers.glorot_normal() self.W = tf.Variable(initial_value=xinit( shape=(k, in_feats, out_feats), dtype='float32'), trainable=True) if bias: zeroinit = tf.keras.initializers.zeros() self.bias = tf.Variable(initial_value=zeroinit( shape=(out_feats), dtype='float32'), trainable=True) else: self.bias = None
Apache License 2.0
seungback/open3d-ros-helper
open3d_ros_helper/open3d_ros_helper.py
crop_with_2dmask
python
def crop_with_2dmask(o3dpc, mask, K=None): o3dpc = copy.deepcopy(o3dpc) cloud_npy = np.asarray(o3dpc.points) if K is None: mask = np.resize(mask, cloud_npy.shape[0]) cloud_npy = cloud_npy[mask!=0] o3dpc = open3d.geometry.PointCloud() o3dpc.points = open3d.utility.Vector3dVector(cloud_npy) else: cloud_npy = np.asarray(o3dpc.points) x = cloud_npy[:, 0] y = cloud_npy[:, 1] z = cloud_npy[:, 2] px = np.uint16(x * K[0, 0]/z + K[0, 2]) py = np.uint16(y * K[1, 1]/z + K[1, 2]) H, W = mask.shape row_indices = np.logical_and(0 <= px, px < W-1) col_indices = np.logical_and(0 <= py, py < H-1) image_indices = np.logical_and(row_indices, col_indices) cloud_npy = cloud_npy[image_indices] mask_indices = mask[(py[image_indices], px[image_indices])] mask_indices = np.where(mask_indices != 0)[0] o3dpc.points = open3d.utility.Vector3dVector(cloud_npy[mask_indices]) return o3dpc
crop open3d point cloud with given 2d binary mask Args: o3dpc (open3d.geometry.PointCloud): open3d point cloud mask (np.array): binary mask aligned with the point cloud frame shape of [H, W] K (np.array): intrinsic matrix of camera shape of (4x4) if K is not given, point cloud should be ordered Returns: o3dpc (open3d.geometry.PointCloud): filtered open3d point cloud
https://github.com/seungback/open3d-ros-helper/blob/02f19861f77bb6cf51584306cf64eb2d5e3c97b6/open3d_ros_helper/open3d_ros_helper.py#L409-L444
import ros_numpy import open3d import numpy as np import tf.transformations as t import rospy import copy import image_geometry import cv2 from std_msgs.msg import Header from sensor_msgs.msg import PointCloud2, PointField from geometry_msgs.msg import Point, Pose, PoseStamped, Quaternion, Transform, TransformStamped, Vector3 import numpy as np import numpy.matlib as npm def pose_to_pq(pose): p = np.array([pose.position.x, pose.position.y, pose.position.z]) q = np.array([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]) return p, q def pose_stamped_to_pq(pose_stamped): return pose_to_pq(pose_stamped.pose) def transform_to_pq(transform): p = np.array([transform.translation.x, transform.translation.y, transform.translation.z]) q = np.array([transform.rotation.x, transform.rotation.y, transform.rotation.z, transform.rotation.w]) return p, q def transform_stamped_to_pq(transform_stamped): return transform_to_pq(transform_stamped.transform) def msg_to_se3(msg): if isinstance(msg, Pose): p, q = pose_to_pq(msg) elif isinstance(msg, PoseStamped): p, q = pose_stamped_to_pq(msg) elif isinstance(msg, Transform): p, q = transform_to_pq(msg) elif isinstance(msg, TransformStamped): p, q = transform_stamped_to_pq(msg) else: raise TypeError("Invalid type for conversion to SE(3)") norm = np.linalg.norm(q) if np.abs(norm - 1.0) > 1e-3: raise ValueError( "Received un-normalized quaternion (q = {0:s} ||q|| = {1:3.6f})".format( str(q), np.linalg.norm(q))) elif np.abs(norm - 1.0) > 1e-6: q = q / norm se3 = t.quaternion_matrix(q) se3[0:3, -1] = p return se3 def pq_to_pose_stamped(p, q, source_frame, target_frame, stamp=None): pose_stamped = PoseStamped() pose_stamped.header.frame_id = source_frame if stamp is None: stamp = rospy.Time.now() pose_stamped.header.stamp = stamp pose_stamped.child_frame_id = target_frame pose_stamped.pose = pq_to_pose(p, q) return pose_stamped def pq_to_pose(p, q): pose = Pose() pose.position.x = p[0] pose.position.y = p[1] pose.position.z = p[2] pose.orientation.x = q[0] pose.orientation.y = q[1] pose.orientation.z = q[2] pose.orientation.w = q[3] return pose def pq_to_transform(p, q): transform = Transform() transform.translation.x = p[0] transform.translation.y = p[1] transform.translation.z = p[2] transform.rotation.x = q[0] transform.rotation.y = q[1] transform.rotation.z = q[2] transform.rotation.w = q[3] return transform def pq_to_transform_stamped(p, q, source_frame, target_frame, stamp=None): transform_stamped = TransformStamped() transform_stamped.header.frame_id = source_frame if stamp is None: stamp = rospy.Time.now() transform_stamped.header.stamp = stamp transform_stamped.child_frame_id = target_frame transform_stamped.transform = pq_to_transform(p, q) return transform_stamped def se3_to_transform(transform_nparray): pos = transform_nparray[:3, 3] quat = t.quaternion_from_matrix(transform_nparray) transform = pq_to_transform(pos, quat) return transform def se3_to_transform_stamped(transform_nparray, source_frame, target_frame, stamp=None): pos = transform_nparray[:3, 3] quat = t.quaternion_from_matrix(transform_nparray) if stamp is None: stamp = rospy.Time.now() transform_stamped = pq_to_transform_stamped(pos, quat, source_frame, target_frame, stamp) return transform_stamped def average_q(qs): M = qs.shape[0] A = npm.zeros(shape=(4,4)) for i in range(0,M): q = qs[i,:] A = np.outer(q, q) + A A = (1.0/M)*A eigenValues, eigenVectors = np.linalg.eig(A) eigenVectors = eigenVectors[:,eigenValues.argsort()[::-1]] q_average = np.real(eigenVectors[:,0].A1) return q_average def average_pq(ps, qs): p_average = np.mean(np.asarray(ps), axis=0) q_average = average_q(np.asarray(qs)) return p_average, q_average convert_rgbUint32_to_tuple = lambda rgb_uint32: ( (rgb_uint32 & 0x00ff0000)>>16, (rgb_uint32 & 0x0000ff00)>>8, (rgb_uint32 & 0x000000ff) ) def rospc_to_o3dpc(rospc, remove_nans=False): field_names = [field.name for field in rospc.fields] is_rgb = 'rgb' in field_names cloud_array = ros_numpy.point_cloud2.pointcloud2_to_array(rospc).ravel() if remove_nans: mask = np.isfinite(cloud_array['x']) & np.isfinite(cloud_array['y']) & np.isfinite(cloud_array['z']) cloud_array = cloud_array[mask] if is_rgb: cloud_npy = np.zeros(cloud_array.shape + (4,), dtype=np.float) else: cloud_npy = np.zeros(cloud_array.shape + (3,), dtype=np.float) cloud_npy[...,0] = cloud_array['x'] cloud_npy[...,1] = cloud_array['y'] cloud_npy[...,2] = cloud_array['z'] o3dpc = open3d.geometry.PointCloud() if len(np.shape(cloud_npy)) == 3: cloud_npy = np.reshape(cloud_npy[:, :, :3], [-1, 3], 'F') o3dpc.points = open3d.utility.Vector3dVector(cloud_npy[:, :3]) if is_rgb: rgb_npy = cloud_array['rgb'] rgb_npy.dtype = np.uint32 r = np.asarray((rgb_npy >> 16) & 255, dtype=np.uint8) g = np.asarray((rgb_npy >> 8) & 255, dtype=np.uint8) b = np.asarray(rgb_npy & 255, dtype=np.uint8) rgb_npy = np.asarray([r, g, b]) rgb_npy = rgb_npy.astype(np.float)/255 rgb_npy = np.swapaxes(rgb_npy, 0, 1) o3dpc.colors = open3d.utility.Vector3dVector(rgb_npy) return o3dpc BIT_MOVE_16 = 2**16 BIT_MOVE_8 = 2**8 def o3dpc_to_rospc(o3dpc, frame_id=None, stamp=None): cloud_npy = np.asarray(copy.deepcopy(o3dpc.points)) is_color = o3dpc.colors n_points = len(cloud_npy[:, 0]) if is_color: data = np.zeros(n_points, dtype=[ ('x', np.float32), ('y', np.float32), ('z', np.float32), ('rgb', np.uint32) ]) else: data = np.zeros(n_points, dtype=[ ('x', np.float32), ('y', np.float32), ('z', np.float32) ]) data['x'] = cloud_npy[:, 0] data['y'] = cloud_npy[:, 1] data['z'] = cloud_npy[:, 2] if is_color: rgb_npy = np.asarray(copy.deepcopy(o3dpc.colors)) rgb_npy = np.floor(rgb_npy*255) rgb_npy = rgb_npy[:, 0] * BIT_MOVE_16 + rgb_npy[:, 1] * BIT_MOVE_8 + rgb_npy[:, 2] rgb_npy = rgb_npy.astype(np.uint32) data['rgb'] = rgb_npy rospc = ros_numpy.msgify(PointCloud2, data) if frame_id is not None: rospc.header.frame_id = frame_id if stamp is None: rospc.header.stamp = rospy.Time.now() else: rospc.header.stamp = stamp rospc.height = 1 rospc.width = n_points rospc.fields = [] rospc.fields.append(PointField( name="x", offset=0, datatype=PointField.FLOAT32, count=1)) rospc.fields.append(PointField( name="y", offset=4, datatype=PointField.FLOAT32, count=1)) rospc.fields.append(PointField( name="z", offset=8, datatype=PointField.FLOAT32, count=1)) if is_color: rospc.fields.append(PointField( name="rgb", offset=12, datatype=PointField.UINT32, count=1)) rospc.point_step = 16 else: rospc.point_step = 12 rospc.is_bigendian = False rospc.row_step = rospc.point_step * n_points rospc.is_dense = True return rospc def do_transform_point(o3dpc, transform_stamped): H = msg_to_se3(transform_stamped) o3dpc = copy.deepcopy(o3dpc) o3dpc.transform(H) return o3dpc def apply_pass_through_filter(o3dpc, x_range, y_range, z_range): o3dpc = copy.deepcopy(o3dpc) cloud_npy = np.asarray(o3dpc.points) x_range = np.logical_and(cloud_npy[:, 0] >= x_range[0], cloud_npy[:, 0] <= x_range[1]) y_range = np.logical_and(cloud_npy[:, 1] >= y_range[0], cloud_npy[:, 1] <= y_range[1]) z_range = np.logical_and(cloud_npy[:, 2] >= z_range[0], cloud_npy[:, 2] <= z_range[1]) pass_through_filter = np.logical_and(x_range, np.logical_and(y_range, z_range)) o3dpc.points = open3d.utility.Vector3dVector(cloud_npy[pass_through_filter]) colors = np.asarray(o3dpc.colors) if len(colors) > 0: o3dpc.colors = open3d.utility.Vector3dVector(colors[pass_through_filter]) return o3dpc
MIT License
demisto/demisto-py
demisto_client/demisto_api/models/investigation.py
Investigation.child_investigations
python
def child_investigations(self): return self._child_investigations
Gets the child_investigations of this Investigation. # noqa: E501 ChildInvestigations id's # noqa: E501 :return: The child_investigations of this Investigation. # noqa: E501 :rtype: list[str]
https://github.com/demisto/demisto-py/blob/95d29e07693d27c133f7fe6ef9da13e4b6dbf542/demisto_client/demisto_api/models/investigation.py#L270-L278
import pprint import re import six from demisto_client.demisto_api.models.investigation_status import InvestigationStatus from demisto_client.demisto_api.models.investigation_type import InvestigationType from demisto_client.demisto_api.models.run_status import RunStatus from demisto_client.demisto_api.models.system import System class Investigation(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'shard_id': 'int', 'category': 'str', 'child_investigations': 'list[str]', 'closed': 'datetime', 'closing_user_id': 'str', 'created': 'datetime', 'creating_user_id': 'str', 'details': 'str', 'entitlements': 'list[str]', 'entry_users': 'list[str]', 'has_role': 'bool', 'id': 'str', 'is_child_investigation': 'bool', 'last_open': 'datetime', 'mirror_auto_close': 'dict(str, bool)', 'mirror_types': 'dict(str, str)', 'modified': 'datetime', 'name': 'str', 'open_duration': 'int', 'parent_investigation': 'str', 'persistent_entitlements': 'dict(str, str)', 'previous_roles': 'list[str]', 'raw_category': 'str', 'reason': 'dict(str, str)', 'roles': 'list[str]', 'run_status': 'RunStatus', 'slack_mirror_auto_close': 'bool', 'slack_mirror_type': 'str', 'sort_values': 'list[str]', 'status': 'InvestigationStatus', 'systems': 'list[System]', 'tags': 'list[str]', 'type': 'InvestigationType', 'users': 'list[str]', 'version': 'int' } attribute_map = { 'shard_id': 'ShardID', 'category': 'category', 'child_investigations': 'childInvestigations', 'closed': 'closed', 'closing_user_id': 'closingUserId', 'created': 'created', 'creating_user_id': 'creatingUserId', 'details': 'details', 'entitlements': 'entitlements', 'entry_users': 'entryUsers', 'has_role': 'hasRole', 'id': 'id', 'is_child_investigation': 'isChildInvestigation', 'last_open': 'lastOpen', 'mirror_auto_close': 'mirrorAutoClose', 'mirror_types': 'mirrorTypes', 'modified': 'modified', 'name': 'name', 'open_duration': 'openDuration', 'parent_investigation': 'parentInvestigation', 'persistent_entitlements': 'persistentEntitlements', 'previous_roles': 'previousRoles', 'raw_category': 'rawCategory', 'reason': 'reason', 'roles': 'roles', 'run_status': 'runStatus', 'slack_mirror_auto_close': 'slackMirrorAutoClose', 'slack_mirror_type': 'slackMirrorType', 'sort_values': 'sortValues', 'status': 'status', 'systems': 'systems', 'tags': 'tags', 'type': 'type', 'users': 'users', 'version': 'version' } def __init__(self, shard_id=None, category=None, child_investigations=None, closed=None, closing_user_id=None, created=None, creating_user_id=None, details=None, entitlements=None, entry_users=None, has_role=None, id=None, is_child_investigation=None, last_open=None, mirror_auto_close=None, mirror_types=None, modified=None, name=None, open_duration=None, parent_investigation=None, persistent_entitlements=None, previous_roles=None, raw_category=None, reason=None, roles=None, run_status=None, slack_mirror_auto_close=None, slack_mirror_type=None, sort_values=None, status=None, systems=None, tags=None, type=None, users=None, version=None): self._shard_id = None self._category = None self._child_investigations = None self._closed = None self._closing_user_id = None self._created = None self._creating_user_id = None self._details = None self._entitlements = None self._entry_users = None self._has_role = None self._id = None self._is_child_investigation = None self._last_open = None self._mirror_auto_close = None self._mirror_types = None self._modified = None self._name = None self._open_duration = None self._parent_investigation = None self._persistent_entitlements = None self._previous_roles = None self._raw_category = None self._reason = None self._roles = None self._run_status = None self._slack_mirror_auto_close = None self._slack_mirror_type = None self._sort_values = None self._status = None self._systems = None self._tags = None self._type = None self._users = None self._version = None self.discriminator = None if shard_id is not None: self.shard_id = shard_id if category is not None: self.category = category if child_investigations is not None: self.child_investigations = child_investigations if closed is not None: self.closed = closed if closing_user_id is not None: self.closing_user_id = closing_user_id if created is not None: self.created = created if creating_user_id is not None: self.creating_user_id = creating_user_id if details is not None: self.details = details if entitlements is not None: self.entitlements = entitlements if entry_users is not None: self.entry_users = entry_users if has_role is not None: self.has_role = has_role if id is not None: self.id = id if is_child_investigation is not None: self.is_child_investigation = is_child_investigation if last_open is not None: self.last_open = last_open if mirror_auto_close is not None: self.mirror_auto_close = mirror_auto_close if mirror_types is not None: self.mirror_types = mirror_types if modified is not None: self.modified = modified if name is not None: self.name = name if open_duration is not None: self.open_duration = open_duration if parent_investigation is not None: self.parent_investigation = parent_investigation if persistent_entitlements is not None: self.persistent_entitlements = persistent_entitlements if previous_roles is not None: self.previous_roles = previous_roles if raw_category is not None: self.raw_category = raw_category if reason is not None: self.reason = reason if roles is not None: self.roles = roles if run_status is not None: self.run_status = run_status if slack_mirror_auto_close is not None: self.slack_mirror_auto_close = slack_mirror_auto_close if slack_mirror_type is not None: self.slack_mirror_type = slack_mirror_type if sort_values is not None: self.sort_values = sort_values if status is not None: self.status = status if systems is not None: self.systems = systems if tags is not None: self.tags = tags if type is not None: self.type = type if users is not None: self.users = users if version is not None: self.version = version @property def shard_id(self): return self._shard_id @shard_id.setter def shard_id(self, shard_id): self._shard_id = shard_id @property def category(self): return self._category @category.setter def category(self, category): self._category = category @property
Apache License 2.0
d-li14/psconv
mmdet/models/backbones/resnext.py
Bottleneck.__init__
python
def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs): super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / 64)) * groups self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm2_name, norm2 = build_norm_layer( self.norm_cfg, width, postfix=2) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.get('fallback_on_stride', False) self.with_modulated_dcn = self.dcn.get('modulated', False) if not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( dict(type='PSGConv'), width, width, kernel_size=3, stride=self.conv2_stride, padding=1, dilation=2, groups=groups, parts=4, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' groups = self.dcn.get('groups', 1) deformable_groups = self.dcn.get('deformable_groups', 1) if not self.with_modulated_dcn: conv_op = DeformConv offset_channels = 18 else: conv_op = ModulatedDeformConv offset_channels = 27 self.conv2_offset = nn.Conv2d( width, deformable_groups * offset_channels, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation) self.conv2 = conv_op( width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, deformable_groups=deformable_groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3)
Bottleneck block for ResNeXt. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer.
https://github.com/d-li14/psconv/blob/9afde67f43025bd5ae59e1ff1cc5a04c0744a5bd/mmdet/models/backbones/resnext.py#L14-L92
import math import torch.nn as nn from mmdet.ops import DeformConv, ModulatedDeformConv from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet from ..registry import BACKBONES from ..utils import build_conv_layer, build_norm_layer class Bottleneck(_Bottleneck):
MIT License
pytransitions/transitions
transitions/extensions/states.py
Error.__init__
python
def __init__(self, *args, **kwargs): tags = kwargs.get('tags', []) accepted = kwargs.pop('accepted', False) if accepted: tags.append('accepted') kwargs['tags'] = tags super(Error, self).__init__(*args, **kwargs)
Args: **kwargs: If kwargs contains the keywork `accepted` add the 'accepted' tag to a tag list which will be forwarded to the Tags constructor.
https://github.com/pytransitions/transitions/blob/1893a822763f9266a9691f0b285e1d77ff755082/transitions/extensions/states.py#L43-L54
from threading import Timer import logging import inspect from ..core import MachineError, listify, State _LOGGER = logging.getLogger(__name__) _LOGGER.addHandler(logging.NullHandler()) class Tags(State): def __init__(self, *args, **kwargs): self.tags = kwargs.pop('tags', []) super(Tags, self).__init__(*args, **kwargs) def __getattr__(self, item): if item.startswith('is_'): return item[3:] in self.tags return super(Tags, self).__getattribute__(item) class Error(Tags):
MIT License
flennerhag/warpgrad
src/maze_navigation/run.py
get_suffix
python
def get_suffix(config): return "run_" + "".join( [str(x) + "_" if pair[0] is not 'nbsteps' and pair[0] is not 'rngseed' and pair[0] is not 'save_every' and pair[0] is not 'test_every' and pair[0] is not 'pe' else '' for pair in sorted(zip( config.keys(), config.values()), key=lambda x:x[0]) for x in pair])[:-1] + "_rngseed_" + str(config['rngseed'])
Get experiment name. Args: config (dict): dict with experiment configs. Returns: suffix (str): experiment name.
https://github.com/flennerhag/warpgrad/blob/d9ef72af10eec62ae92bc24595cb1a4a0207e319/src/maze_navigation/run.py#L36-L54
from alstm import aRNN from maze import Maze, ObsSpec import argparse import torch import torch.nn as nn from numpy import random import random import pickle import time import numpy as np np.set_printoptions(precision=4) ADDITIONAL_INPUTS = 4 NUM_ACTIONS = 4 REF_SIZE = 3 TOTAL_INPUTS = REF_SIZE * REF_SIZE + ADDITIONAL_INPUTS + NUM_ACTIONS
Apache License 2.0
microsoft/qdk-python
azure-quantum/azure/quantum/storage.py
create_container
python
def create_container( connection_string: str, container_name: str ) -> ContainerClient: blob_service_client = BlobServiceClient.from_connection_string( connection_string ) logger.info( f'{"Initializing storage client for account:"}' + f"{blob_service_client.account_name}" ) container_client = blob_service_client.get_container_client(container_name) create_container_using_client(container_client) return container_client
Creates and initialize a container; returns the client needed to access it.
https://github.com/microsoft/qdk-python/blob/d0a87fda57dc360c96d9ce9772b71406d9b29ebe/azure-quantum/azure/quantum/storage.py#L24-L40
import logging from typing import Any, Dict from azure.core import exceptions from azure.storage.blob import ( BlobServiceClient, ContainerClient, BlobClient, BlobSasPermissions, ContentSettings, generate_blob_sas, generate_container_sas, BlobType, ) from datetime import datetime, timedelta from enum import Enum logger = logging.getLogger(__name__)
MIT License
kakao/khaiii
train/transform_corpus.py
Sentence.load_train
python
def load_train(cls, rsc_src: str) -> List['Sentence']: restore_dic = Resource.load_restore_dic(f'{rsc_src}/restore.dic') sentences = [] for sent in PosDataset(None, restore_dic, sys.stdin): sentence = Sentence() for word in sent.pos_tagged_words: sentence.words.append(word.raw) sentence.morphs.append(' + '.join([str(m) for m in word.pos_tagged_morphs])) sentences.append(sentence) return sentences
load from khaiii training set Returns: list of sentences
https://github.com/kakao/khaiii/blob/328d5a8af456a5941130383354c07d1cd0e47cf5/train/transform_corpus.py#L86-L100
from argparse import ArgumentParser, Namespace import logging import random import sys from typing import List from khaiii.munjong.sejong_corpus import sents from khaiii.resource.resource import Resource from khaiii.train.dataset import PosDataset class Sentence: def __init__(self): self.words = [] self.morphs = [] def merge_words(self, rate: float = 0.0): if rate <= 0.0: return idx = 0 while idx < len(self.words)-1: if random.random() >= rate: idx += 1 continue self.words[idx] += self.words[idx+1] self.morphs[idx] += ' + ' + self.morphs[idx+1] del self.words[idx+1] del self.morphs[idx+1] def __str__(self): words_str = [f'{w}\t{m}' for w, m in zip(self.words, self.morphs)] return '\n'.join(words_str) + '\n' def raw(self): return ' '.join(self.words) @classmethod def load_sejong(cls) -> List['Sentence']: sentences = [] for sent in sents(sys.stdin): sentence = Sentence() for word in sent.words: sentence.words.append(word.raw) sentence.morphs.append(' + '.join([str(m) for m in word.morphs])) sentences.append(sentence) return sentences @classmethod
Apache License 2.0
waliens/sldc
sldc/logging.py
FileLogger.close
python
def close(self): self._file.close()
Close the logging file
https://github.com/waliens/sldc/blob/b16d28ca223ac686b711ca988f5e76f7cdedbaca/sldc/logging.py#L195-L198
import os import threading from abc import abstractmethod, ABCMeta __author__ = "Romain Mormont <romainmormont@hotmail.com>" __version__ = "0.1" class Logger(object): SILENT = 0 ERROR = 1 WARNING = 2 INFO = 3 DEBUG = 4 def __init__(self, level, prefix=True, pid=True): self._level = level self._prefix = prefix self._pid = pid @property def level(self): return self._level @level.setter def level(self, level): self._level = level def d(self, msg): self.debug(msg) def debug(self, msg): self._log(Logger.DEBUG, msg) def i(self, msg): self.info(msg) def info(self, msg): self._log(Logger.INFO, msg) def w(self, msg): self.warning(msg) def warning(self, msg): self._log(Logger.WARNING, msg) def e(self, msg): self.error(msg) def error(self, msg): self._log(Logger.ERROR, msg) def _log(self, level, msg): if self._level >= level: formatted = self._format_msg(level, msg) self._print(formatted) @abstractmethod def _print(self, formatted_msg): pass def prefix(self, level): from datetime import datetime now = datetime.now().isoformat() if self._pid: pid = "{}".format(os.getpid()).zfill(6) fid = "pid:{}".format(pid) else: tid = "{}".format(threading.current_thread().ident).zfill(6) fid = "tid:{}".format(tid) return "[{}][{}][{}]".format(fid, now, self.level2str(level)) @classmethod def level2str(cls, level): if level == cls.DEBUG: return "DEBUG" elif level == cls.WARNING: return "WARN " elif level == cls.ERROR: return "ERROR" else: return "INFO " def _format_msg(self, level, msg): if self._prefix: rows = ["{} {}".format(self.prefix(level), row) for row in msg.splitlines()] return os.linesep.join(rows) else: return msg class StandardOutputLogger(Logger): def _print(self, formatted_msg): print (formatted_msg) class FileLogger(Logger): def __init__(self, filepath, level, prefix=True): Logger.__init__(self, level, prefix=prefix) self._file = open(filepath, "w+") def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() def _print(self, formatted_msg): self._file.write(formatted_msg)
MIT License
zphang/bert_on_stilts
examples/run_classifier.py
RteProcessor._create_examples
python
def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
Creates examples for the training and dev sets.
https://github.com/zphang/bert_on_stilts/blob/2d9c8c6ba47e06c0f171a7452a916dd3b7a09a6a/examples/run_classifier.py#L315-L327
from __future__ import absolute_import, division, print_function import argparse import csv import logging import os import random import sys import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) class InputExample(object): def __init__(self, guid, text_a, text_b=None, label=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class InputFeatures(object): def __init__(self, input_ids, input_mask, segment_ids, label_id): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id class DataProcessor(object): def get_train_examples(self, data_dir): raise NotImplementedError() def get_dev_examples(self, data_dir): raise NotImplementedError() def get_labels(self): raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): with open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines class MrpcProcessor(DataProcessor): def get_train_examples(self, data_dir): logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): return ["0", "1"] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliProcessor(DataProcessor): def get_train_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_labels(self): return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[8] text_b = line[9] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): def get_train_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): return ["0", "1"] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line[3] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class SstProcessor(DataProcessor): def get_train_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): return ["0", "1"] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class QqpProcessor(DataProcessor): def get_train_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): return ["0", "1"] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) try: text_a = line[3] text_b = line[4] label = line[5] except IndexError: continue examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QnliProcessor(DataProcessor): def get_train_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") def get_labels(self): return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, 1) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RteProcessor(DataProcessor): def get_train_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): return ["entailment", "not_entailment"]
Apache License 2.0
tqsd/qunetsim
qunetsim/backends/eqsn_backend.py
EQSNBackend.rx
python
def rx(self, qubit, phi): self.eqsn.RX_gate(qubit.qubit, phi)
Perform a rotation pauli x gate with an angle of phi. Args: qubit (Qubit): Qubit on which gate should be applied to. phi (float): Amount of roation in Rad.
https://github.com/tqsd/qunetsim/blob/dbf1531aac56822f83557e992fcc175e47b223ce/qunetsim/backends/eqsn_backend.py#L308-L316
from eqsn import EQSN import uuid from qunetsim.objects.qubit import Qubit import threading import numpy as np from queue import Queue class RWLock: def __init__(self): self._read_ready = threading.Condition(threading.RLock()) self._num_reader = 0 self._num_writer = 0 self._readerList = [] self._writerList = [] def acquire_read(self): self._read_ready.acquire() try: while self._num_writer > 0: self._read_ready.wait() self._num_reader += 1 finally: self._readerList.append(threading.get_ident()) self._read_ready.release() def release_read(self): self._read_ready.acquire() try: self._num_reader -= 1 if not self._num_reader: self._read_ready.notifyAll() finally: self._readerList.remove(threading.get_ident()) self._read_ready.release() def acquire_write(self): self._read_ready.acquire() self._num_writer += 1 self._writerList.append(threading.get_ident()) while self._num_reader > 0: self._read_ready.wait() def release_write(self): self._num_writer -= 1 self._writerList.remove(threading.get_ident()) self._read_ready.notifyAll() self._read_ready.release() class SafeDict(object): def __init__(self): self.lock = RWLock() self.dict = {} def __str__(self): self.lock.acquire_read() ret = str(self.dict) self.lock.release_read() return ret def add_to_dict(self, key, value): self.lock.acquire_write() self.dict[key] = value self.lock.release_write() def get_from_dict(self, key): ret = None self.lock.acquire_read() if key in self.dict: ret = self.dict[key] self.lock.release_read() return ret class EQSNBackend(object): class Hosts(SafeDict): __instance = None @staticmethod def get_instance(): if EQSNBackend.Hosts.__instance is not None: return EQSNBackend.Hosts.__instance else: return EQSNBackend.Hosts() def __init__(self): if EQSNBackend.Hosts.__instance is not None: raise Exception("Call get instance to get this class!") EQSNBackend.Hosts.__instance = self SafeDict.__init__(self) class EntanglementIDs(SafeDict): __instance = None @staticmethod def get_instance(): if EQSNBackend.EntanglementIDs.__instance is not None: return EQSNBackend.EntanglementIDs.__instance else: return EQSNBackend.EntanglementIDs() def __init__(self): if EQSNBackend.EntanglementIDs.__instance is not None: raise Exception("Call get instance to get this class!") EQSNBackend.EntanglementIDs.__instance = self SafeDict.__init__(self) def __init__(self): self._hosts = EQSNBackend.Hosts.get_instance() self._entaglement_qubits = EQSNBackend.EntanglementIDs.get_instance() self.eqsn = EQSN.get_instance() def start(self, **kwargs): pass def stop(self): self.eqsn.stop_all() def add_host(self, host): self._hosts.add_to_dict(host.host_id, host) def create_qubit(self, host_id): id = str(uuid.uuid4()) self.eqsn.new_qubit(id) return id def send_qubit_to(self, qubit, from_host_id, to_host_id): new_host = self._hosts.get_from_dict(to_host_id) qubit.host = new_host def create_EPR(self, host_a_id, host_b_id, q_id=None, block=False): uid1 = uuid.uuid4() uid2 = uuid.uuid4() host_a = self._hosts.get_from_dict(host_a_id) host_b = self._hosts.get_from_dict(host_b_id) self.eqsn.new_qubit(uid1) self.eqsn.new_qubit(uid2) self.eqsn.H_gate(uid1) self.eqsn.cnot_gate(uid2, uid1) q1 = Qubit(host_a, qubit=uid1, q_id=q_id, blocked=block) q2 = Qubit(host_b, qubit=uid2, q_id=q1.id, blocked=block) self.store_ent_pair(host_a.host_id, host_b.host_id, q2) return q1 def store_ent_pair(self, host_a, host_b, qubit): key = host_a + ':' + host_b ent_queue = self._entaglement_qubits.get_from_dict(key) if ent_queue is not None: ent_queue.put(qubit) else: ent_queue = Queue() ent_queue.put(qubit) self._entaglement_qubits.add_to_dict(key, ent_queue) def receive_epr(self, host_id, sender_id, q_id=None, block=False): key = sender_id + ':' + host_id ent_queue = self._entaglement_qubits.get_from_dict(key) if ent_queue is None: raise Exception("Internal Error!") q = ent_queue.get() self._entaglement_qubits.add_to_dict(key, ent_queue) if q_id is not None and q_id != q.id: raise ValueError("Qid doesent match id!") return q def I(self, qubit): pass def X(self, qubit): self.eqsn.X_gate(qubit.qubit) def Y(self, qubit): self.eqsn.Y_gate(qubit.qubit) def Z(self, qubit): self.eqsn.Z_gate(qubit.qubit) def H(self, qubit): self.eqsn.H_gate(qubit.qubit) def K(self, qubit): self.eqsn.K_gate(qubit.qubit) def S(self, qubit): self.eqsn.S_gate(qubit.qubit) def T(self, qubit): self.eqsn.T_gate(qubit.qubit)
MIT License
voxel51/eta
eta/core/events.py
DetectedEvent.has_attributes
python
def has_attributes(self): return bool(self.attrs)
Whether the event has attributes.
https://github.com/voxel51/eta/blob/e51510fda0722ac7cadb17b109bad413a6602ed3/eta/core/events.py#L155-L157
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import * from future.utils import iteritems, itervalues from collections import defaultdict from copy import deepcopy import logging import eta.core.data as etad import eta.core.frameutils as etaf import eta.core.geometry as etag import eta.core.labels as etal import eta.core.objects as etao import eta.core.serial as etas import eta.core.utils as etau logger = logging.getLogger(__name__) class DetectedEvent(etal.Labels, etag.HasBoundingBox): def __init__( self, label=None, bounding_box=None, mask=None, confidence=None, name=None, top_k_probs=None, index=None, frame_number=None, attrs=None, objects=None, ): self.type = etau.get_class_name(self) self.label = label self.bounding_box = bounding_box self.mask = mask self.confidence = confidence self.name = name self.top_k_probs = top_k_probs self.index = index self.frame_number = frame_number self.attrs = attrs or etad.AttributeContainer() self.objects = objects or etao.DetectedObjectContainer() @property def is_empty(self): return not ( self.has_label or self.has_bounding_box or self.has_mask or self.has_attributes or self.has_objects ) @property def has_label(self): return self.label is not None @property def has_bounding_box(self): return self.bounding_box is not None @property def has_mask(self): return self.mask is not None @property def has_confidence(self): return self.confidence is not None @property def has_name(self): return self.name is not None @property def has_top_k_probs(self): return self.top_k_probs is not None @property def has_index(self): return self.index is not None @property def has_frame_number(self): return self.frame_number is not None @property
Apache License 2.0
open-research/sumatra
sumatra/projects.py
Project.launch
python
def launch(self, parameters={}, input_data=[], script_args="", executable='default', repository='default', main_file='default', version='current', launch_mode='default', diff='', label=None, reason=None, timestamp_format='default', repeats=None): record = self.new_record(parameters, input_data, script_args, executable, repository, main_file, version, launch_mode, diff, label, reason, timestamp_format) record.run(with_label=self.data_label, project=self) if 'matlab' in record.executable.name.lower(): record.register(record.repository.get_working_copy()) if repeats: record.repeats = repeats self.save_record(record) logger.debug("Record saved @ completion.") self.save() return record.label
Launch a new simulation or analysis.
https://github.com/open-research/sumatra/blob/2ff2a359e11712a7d17cf9346a0b676ab33e2074/sumatra/projects.py#L213-L229
from __future__ import print_function from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from builtins import str from builtins import object import os import re import importlib import pickle from copy import deepcopy import uuid import sumatra import django import sqlite3 import time import shutil import textwrap from datetime import datetime from importlib import import_module from sumatra.records import Record from sumatra import programs, datastore from sumatra.formatting import get_formatter, get_diff_formatter from sumatra.recordstore import DefaultRecordStore from sumatra.versioncontrol import UncommittedModificationsError, get_working_copy, VersionControlError from sumatra.core import TIMESTAMP_FORMAT import mimetypes import json import logging logger = logging.getLogger("Sumatra") DEFAULT_PROJECT_FILE = "project" LABEL_GENERATORS = { 'timestamp': lambda: None, 'uuid': lambda: str(uuid.uuid4()).split('-')[-1] } def _remove_left_margin(s): lines = s.strip().split('\n') return "\n".join(line.strip() for line in lines) def _get_project_file(path): return os.path.join(path, ".smt", DEFAULT_PROJECT_FILE) class Project(object): valid_name_pattern = r'(?P<project>\w+[\w\- ]*)' def __init__(self, name, default_executable=None, default_repository=None, default_main_file=None, default_launch_mode=None, data_store='default', record_store='default', on_changed='error', description='', data_label=None, input_datastore=None, label_generator='timestamp', timestamp_format=TIMESTAMP_FORMAT, allow_command_line_parameters=True, plugins=[]): self.path = os.getcwd() if not os.path.exists(".smt"): os.mkdir(".smt") if os.path.exists(_get_project_file(self.path)): raise Exception("Sumatra project already exists in this directory.") if re.match(Project.valid_name_pattern, name): self.name = name else: raise ValueError("Invalid project name. Names may only contain letters, numbers, spaces and hyphens") self.default_executable = default_executable self.default_repository = default_repository self.default_main_file = default_main_file self.default_launch_mode = default_launch_mode if data_store == 'default': data_store = datastore.FileSystemDataStore(None) self.data_store = data_store self.input_datastore = input_datastore or self.data_store if record_store == 'default': record_store = DefaultRecordStore(os.path.abspath(".smt/records")) self.record_store = record_store self.on_changed = on_changed self.description = description self.data_label = data_label self.label_generator = label_generator self.timestamp_format = timestamp_format self.sumatra_version = sumatra.__version__ self.allow_command_line_parameters = allow_command_line_parameters self._most_recent = None self.plugins = [] self.load_plugins(*plugins) self.save() print("Sumatra project successfully set up") def __set_data_label(self, value): assert value in (None, 'parameters', 'cmdline') self._data_label = value def __get_data_label(self): return self._data_label data_label = property(fset=__set_data_label, fget=__get_data_label) def save(self): state = {} for name in ('name', 'default_executable', 'default_repository', 'default_launch_mode', 'data_store', 'record_store', 'default_main_file', 'on_changed', 'description', 'data_label', '_most_recent', 'input_datastore', 'label_generator', 'timestamp_format', 'sumatra_version', 'allow_command_line_parameters', 'plugins'): try: attr = getattr(self, name) except: if name == 'allow_command_line_parameters': print(textwrap.dedent("""\ Upgrading from a Sumatra version which did not have the --plain configuration option. After this upgrade, arguments to 'smt run' of the form 'name=value' will continue to overwrite default parameter values, but this is now configurable. If it is desired that they should be passed straight through to the program, run the command 'smt configure --plain' after this upgrade. """)) attr = True else: attr = None if hasattr(attr, "__getstate__"): state[name] = {'type': attr.__class__.__module__ + "." + attr.__class__.__name__} for key, value in attr.__getstate__().items(): state[name][key] = value else: state[name] = attr f = open(_get_project_file(self.path), 'w') json.dump(state, f, indent=2) f.close() def info(self): template = """ Project name : %(name)s Default executable : %(default_executable)s Default repository : %(default_repository)s Default main file : %(default_main_file)s Default launch mode : %(default_launch_mode)s Data store (output) : %(data_store)s . (input) : %(input_datastore)s Record store : %(record_store)s Code change policy : %(on_changed)s Append label to : %(_data_label)s Label generator : %(label_generator)s Timestamp format : %(timestamp_format)s Plug-ins : %(plugins)s Sumatra version : %(sumatra_version)s """ return _remove_left_margin(template % self.__dict__) def new_record(self, parameters={}, input_data=[], script_args="", executable='default', repository='default', main_file='default', version='current', launch_mode='default', diff='', label=None, reason=None, timestamp_format='default'): logger.debug("Creating new record") if executable == 'default': executable = deepcopy(self.default_executable) if repository == 'default': repository = deepcopy(self.default_repository) if main_file == 'default': main_file = self.default_main_file if launch_mode == 'default': launch_mode = deepcopy(self.default_launch_mode) if timestamp_format == 'default': timestamp_format = self.timestamp_format working_copy = repository.get_working_copy() version, diff = self.update_code(working_copy, version, diff) if label is None: label = LABEL_GENERATORS[self.label_generator]() record = Record(executable, repository, main_file, version, launch_mode, self.data_store, parameters, input_data, script_args, label=label, reason=reason, diff=diff, on_changed=self.on_changed, input_datastore=self.input_datastore, timestamp_format=timestamp_format) self.add_record(record) if not isinstance(executable, programs.MatlabExecutable): record.register(working_copy) return record
BSD 2-Clause Simplified License
erdc/proteus
proteus/tests/sandbox/testTimeIntegration.py
dUdtEqMinusLambda.setUnknowns
python
def setUnknowns(self,u): self.q['u'].flat[:] = u.flat[:]
copy over solution values u into dictionary holding unknowns
https://github.com/erdc/proteus/blob/fe4872257aae10b5a686394e78259582e93a39cb/proteus/tests/sandbox/testTimeIntegration.py#L84-L88
from __future__ import print_function from __future__ import division from builtins import range from past.utils import old_div from builtins import object from LinearAlgebra import * from TimeIntegrationTools import * from NonlinearSolvers import * from ScalarTransport import * import numpy class PhiDummy(object): def __init__(self,dim): self.dim=dim self.dof=dim class dUdtEqMinusLambda(object): def __init__(self,u0,TimeIntegrationClass=BackwardEuler, cfl=Numeric.ones(1,Numeric.Float), lam=Numeric.ones(1,Numeric.Float), nstages=1,order=1): self.dim = Numeric.size(lam,0) self.lam= lam self.q = {} self.ebq_global = {} for term in ['u','m','mt','dm','dmt','f','div(f)', 'a','grad(u)','r','dr','cfl']: self.q[term] = Numeric.zeros(self.dim,Numeric.Float) self.q['cfl'].flat[:] = cfl.flat[:] self.phi = PhiDummy(self.dim) self.nFreeDOF_global=self.dim self.q['u'].flat[:] = u0.flat[:] self.q['m'].flat[:] = self.q['u'].flat[:] self.q['r']= lam*self.q['m'] if TimeIntegrationClass == SSPRKintegration: self.timeIntegrator = TimeIntegrationClass(self,order) else: self.timeIntegrator = TimeIntegrationClass(self)
MIT License
leonhard-s/auraxium
auraxium/_rest.py
get_components
python
def get_components(url: yarl.URL) -> Tuple[str, Optional[str]]: components = url.path.split('/')[1:] if components[0].startswith('s:'): _ = components.pop(0) if components[0] in ('xml', 'json'): _ = components.pop(0) _ = components.pop(0) if not components[-1]: _ = components.pop(-1) if len(components) == 1: return components[0], None assert len(components) == 2, 'Unable to parse URL' return components[0], components[1]
Return the namespace and collection of a given query. :param yarl.URL url: The :class:`yarl.URL` to process. Only REST API query URLs in the DBG census API format are allowed. :return: The namespace/game and collection that was accessed. Collection may be :obj:`None` for some queries.
https://github.com/leonhard-s/auraxium/blob/8a1b7fb6e6e1b11334d69875df032ccc6da330bf/auraxium/_rest.py#L131-L151
import asyncio import copy import json import logging import sys import warnings from typing import Any, Dict, Iterator, Literal, List, Optional, Tuple, Type, TypeVar, cast from types import TracebackType import aiohttp import backoff import yarl from .census import Query from .errors import (PayloadError, BadRequestSyntaxError, CensusError, InvalidSearchTermError, InvalidServiceIDError, MaintenanceError, MissingServiceIDError, NotFoundError, ResponseError, ServerError, ServiceUnavailableError, UnknownCollectionError) from .types import CensusData __all__ = [ 'RequestClient', 'extract_payload', 'extract_single', 'run_query' ] _T = TypeVar('_T') _log = logging.getLogger('auraxium.http') class RequestClient: def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None, service_id: str = 's:example', profiling: bool = False ) -> None: self.loop = loop or asyncio.get_event_loop() self.profiling = profiling self.service_id = service_id self.session = aiohttp.ClientSession() self._timing_cache: List[float] = [] async def __aenter__(self: _T) -> _T: return self async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType]) -> Literal[False]: await self.close() return False @property def latency(self) -> float: if not self._timing_cache: return -1.0 return sum(self._timing_cache) / len(self._timing_cache) async def close(self) -> None: _log.info('Shutting down client') await self.session.close() async def request(self, query: Query, verb: str = 'get') -> CensusData: if self.profiling: query = copy.copy(query) query.timing(True) data = await run_query(query, verb=verb, session=self.session) if self.profiling and verb == 'get': timing = cast(CensusData, data.pop('timing')) if _log.level <= logging.DEBUG: url = query.url() _log.debug('Query times for "%s?%s": %s', '/'.join(url.parts[-2:]), url.query_string, ', '.join([f'{k}: {v}' for k, v in timing.items()])) self._timing_cache.append(float(str(timing['total-ms']))) return data
MIT License
google-research/language
language/templama/templama.py
_build_example
python
def _build_example(query): inp = query["query"].encode("utf-8") trg = query["answer"]["name"].encode("utf-8") id_ = query["id"].encode("utf-8") recent = query["most_recent_answer"]["name"].encode("utf-8") frequent = query["most_frequent_answer"]["name"].encode("utf-8") rel = query["relation"].encode("utf-8") feature = { "id": tf.train.Feature(bytes_list=tf.train.BytesList(value=[id_])), "date": tf.train.Feature( int64_list=tf.train.Int64List(value=[int(query["date"])])), "relation": tf.train.Feature(bytes_list=tf.train.BytesList(value=[rel])), "query": tf.train.Feature(bytes_list=tf.train.BytesList(value=[inp])), "answer": tf.train.Feature(bytes_list=tf.train.BytesList(value=[trg])), "most_frequent_answer": tf.train.Feature(bytes_list=tf.train.BytesList(value=[frequent])), "most_recent_answer": tf.train.Feature(bytes_list=tf.train.BytesList(value=[recent])), } return tf.train.Example(features=tf.train.Features(feature=feature))
Creates a tf.Example for prediction with T5 from the input query. Args: query: a dict mapping query features to their values. Returns: a tf.train.Example consisting of the query features.
https://github.com/google-research/language/blob/240cd2a1fd0307c6822b6f1f6c2abf1349a5a4da/language/templama/templama.py#L326-L361
import collections import csv import datetime import json import os import random from absl import app from absl import flags from absl import logging import sling import tensorflow as tf from tqdm import tqdm FLAGS = flags.FLAGS flags.DEFINE_string("out_dir", None, "Path to store constructed queries.") flags.DEFINE_string( "facts_file", None, "File containing facts with qualifiers extracted from `sling2facts.py`.") flags.DEFINE_string("sling_kb_file", None, "SLING file containing wikidata KB.") flags.DEFINE_string( "sling_wiki_mapping_file", None, "SLING file containing mapping from QID to english wikipedia pages.") flags.DEFINE_integer( "min_year", 2010, "Starting year to construct queries from. Only facts which have a start / " "end date after this will be considered.") flags.DEFINE_integer("max_year", 2020, "Ending year to construct queries up till.") flags.DEFINE_integer( "max_subject_per_relation", 1000, "Maximum number of subjects to retain per relation. Subjects are sorted " "based on popularity before filtering.") flags.DEFINE_float("train_frac", 0.2, "Fraction of queries to hold out for training set.") flags.DEFINE_float("val_frac", 0.1, "Fraction of queries to hold out for validation set.") random.seed(42) Y_TOK = "_X_" WIKI_PRE = "/wp/en/" def _datetup2int(date): dint = date[0] * 1e4 dint += date[1] * 1e2 if date[1] else 0 dint += date[2] if date[2] else 0 return dint def date_in_interval(date, start, end): date_int = _datetup2int(date) start_int = _datetup2int(start) if start else 0 end_int = _datetup2int(end) if end else 21000000 return date_int >= start_int and date_int <= end_int def parse_date(date_str): date = None try: if len(date_str) == 4: date_obj = datetime.datetime.strptime(date_str, "%Y") date = (date_obj.year, None, None) elif len(date_str) == 6: date_obj = datetime.datetime.strptime(date_str, "%Y%m") date = (date_obj.year, date_obj.month, None) elif len(date_str) == 8: date_obj = datetime.datetime.strptime(date_str, "%Y%m%d") date = (date_obj.year, date_obj.month, date_obj.day) except ValueError: pass if date is not None and date[0] > 2100: date = None return date def load_sling_mappings(sling_kb_file, sling_wiki_mapping_file): logging.info("Extracting entity names and num-facts from SLING KB.") commons = sling.Store() commons.load(sling_kb_file) commons.freeze() qid_names = {} qid_numfacts = {} total = 0 for f in commons: total += 1 if "name" in f: if isinstance(f.name, sling.String): qid_names[f.id] = f.name.text() elif isinstance(f.name, bytes): qid_names[f.id] = f.name.decode("utf-8", errors="ignore") elif isinstance(f.name, str): qid_names[f.id] = f.name else: logging.warn("Could not read name of type %r", type(f.name)) ln = len(f) qid_numfacts[f.id] = ln logging.info("Processed %d QIDs out of %d", len(qid_names), total) logging.info("Extracting entity mapping to Wikipedia from SLING.") commons = sling.Store() commons.load(sling_wiki_mapping_file) commons.freeze() qid_mapping = {} for f in commons: try: if "/w/item/qid" in f: pg = f.id[len(WIKI_PRE):] if f.id.startswith(WIKI_PRE) else f.id qid_mapping[f["/w/item/qid"].id] = pg except UnicodeDecodeError: continue logging.info("Extracted %d mappings", len(qid_mapping)) return qid_names, qid_mapping, qid_numfacts def read_facts(facts_file, qid_mapping, min_year): logging.info("Reading facts from %s", facts_file) all_facts = [] with tf.io.gfile.GFile(facts_file) as f: for line in tqdm(f): fact = line.strip().split("\t") if not fact[0].startswith("P"): continue if fact[0] == "P31": continue if not fact[2].startswith("Q"): continue if fact[1] not in qid_mapping or fact[2] not in qid_mapping: continue start, end = None, None for qual in fact[3:]: if not qual: continue elems = qual.split("=") if elems[0].endswith("*"): continue if len(elems) != 2: continue if elems[0].startswith("P580"): start = parse_date(elems[1]) elif elems[0].startswith("P582"): end = parse_date(elems[1]) if start is None and end is None: continue if ((start is None or start[0] < min_year) and (end is None or end[0] < min_year)): continue all_facts.append(fact[:3] + [start, end]) logging.info("Loaded total %d facts", len(all_facts)) return all_facts def read_templates(): my_path = os.path.dirname(os.path.realpath(__file__)) template_file = os.path.join(my_path, "templates.csv") logging.info("Reading templates from %s", template_file) reader = csv.reader(tf.io.gfile.GFile(template_file)) headers = next(reader, None) data = collections.defaultdict(list) for row in reader: for h, v in zip(headers, row): data[h].append(v) templates = dict(zip(data["Wikidata ID"], data["Template"])) logging.info("\n".join("%s: %s" % (k, v) for k, v in templates.items())) return templates def resolve_objects(facts): def _datekey(fact): start = _datetup2int(fact[3]) if fact[3] else 0 end = _datetup2int(fact[4]) if fact[4] else 21000000 return (start, end) sorted_facts = sorted(facts, key=_datekey) out_facts = [sorted_facts[0]] for fact in sorted_facts[1:]: if (fact[2] == out_facts[-1][2] and fact[3] != fact[4] and out_facts[-1][3] != out_facts[-1][4]): out_facts[-1][4] = fact[4] else: out_facts.append(fact) return out_facts def _map_years_to_objects(facts, qid_numfacts, min_year, max_year): year2obj = {} numfacts = lambda x: qid_numfacts.get(x, 0) for f in facts: min_ = f[3][0] if f[3] is not None else min_year max_ = f[4][0] if f[4] is not None else max_year min_ = max(min_, min_year) max_ = min(max_, max_year) for yr in range(min_, max_ + 1): if yr in year2obj: if numfacts(year2obj[yr]) < numfacts(f[2]): year2obj[yr] = f[2] else: year2obj[yr] = f[2] return year2obj
Apache License 2.0
alievk/npbg
npbg/gl/render.py
cpy_texture_to_tensor
python
def cpy_texture_to_tensor(texture, tensor): with cuda_activate_array(texture) as src: cpy = pycuda.driver.Memcpy2D() cpy.set_src_array(src) cpy.set_dst_device(tensor.data_ptr()) cpy.width_in_bytes = cpy.src_pitch = cpy.dst_pitch = tensor.shape[1] * 4 * 4 cpy.height = tensor.shape[0] cpy(aligned=False) torch.cuda.synchronize() return tensor
Copy GL texture (cuda view) to pytorch tensor
https://github.com/alievk/npbg/blob/c0931a462d1a71d711420603c7447d283a0e8135/npbg/gl/render.py#L128-L141
from glumpy import app, gloo, gl from contextlib import contextmanager import numpy as np try: import pycuda.driver from pycuda.gl import graphics_map_flags, BufferObject _PYCUDA = True except ImportError as err: print('pycuda import error:', err) _PYCUDA = False import torch class OffscreenRender: def __init__(self, viewport_size, out_buffer_location='opengl', clear_color=None): self._init_buffers(viewport_size, out_buffer_location) self.clear_color = clear_color if clear_color is not None else (0., 0., 0., 1.) def _init_buffers(self, viewport_size, out_buffer_location): assert out_buffer_location in ['torch', 'opengl', 'numpy'] if out_buffer_location == 'torch': assert _PYCUDA, 'pycuda is not available' try: import pycuda.gl.autoinit except: raise RuntimeError('PyCUDA init failed, cannot use torch buffer') _ = torch.cuda.FloatTensor(1, 3, 512,512) color_np = np.zeros((viewport_size[1], viewport_size[0], 4), np.float32) self.color_buf, self.color_buf_cuda = create_shared_texture(color_np) self.out_buf = torch.zeros((viewport_size[1], viewport_size[0], 4), dtype=torch.float32).cuda() elif out_buffer_location == 'opengl': self.color_buf = np.zeros((viewport_size[1], viewport_size[0], 4), dtype=np.float32).view(gloo.TextureFloat2D) self.out_buf = self.color_buf elif out_buffer_location == 'numpy': self.color_buf = np.zeros((viewport_size[1], viewport_size[0], 4), dtype=np.float32).view(gloo.TextureFloat2D) self.out_buf = np.zeros((viewport_size[1], viewport_size[0], 3), dtype=np.float32) self.viewport_size = viewport_size self.out_buffer_location = out_buffer_location self.depth_buf = gloo.DepthBuffer(viewport_size[0], viewport_size[1], gl.GL_DEPTH_COMPONENT32) self.fbo = gloo.FrameBuffer(color=self.color_buf, depth=self.depth_buf) def render(self, scene, cull_face=True): self.fbo.activate() gl.glEnable(gl.GL_PROGRAM_POINT_SIZE) gl.glEnable(gl.GL_DEPTH_TEST) gl.glShadeModel(gl.GL_FLAT) if cull_face: gl.glEnable(gl.GL_CULL_FACE) gl.glCullFace(gl.GL_BACK) else: gl.glDisable(gl.GL_CULL_FACE) gl.glClearColor(*self.clear_color) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) gl.glViewport(0, 0, self.viewport_size[0], self.viewport_size[1]) if scene.draw_points: scene.program.draw(gl.GL_POINTS) else: assert scene.index_buffer is not None scene.program.draw(gl.GL_TRIANGLES, scene.index_buffer) if self.out_buffer_location == 'torch': frame = cpy_texture_to_tensor(self.color_buf_cuda, self.out_buf).clone() elif self.out_buffer_location == 'opengl': frame = self.out_buf else: gl.glReadPixels(0, 0, self.viewport_size[0], self.viewport_size[1], gl.GL_RGB, gl.GL_FLOAT, self.out_buf) frame = self.out_buf.copy() self.fbo.deactivate() return frame @contextmanager def cuda_activate_array(img): mapping = img.map() yield mapping.array(0,0) mapping.unmap() @contextmanager def cuda_activate_buffer(buf): mapping = buf.map() yield mapping.device_ptr() mapping.unmap() def create_shared_texture(arr, map_flags=None): if map_flags is None: map_flags = graphics_map_flags.WRITE_DISCARD gl_view = arr.view(gloo.TextureFloat2D) gl_view.activate() gl_view.deactivate() cuda_view = pycuda.gl.RegisteredImage( int(gl_view.handle), gl_view.target, map_flags) return gl_view, cuda_view def create_shared_buffer(arr): gl_view = arr.view(gloo.VertexBuffer) gl_view.activate() gl_view.deactivate() cuda_view = BufferObject(np.long(gl_view.handle)) return gl_view, cuda_view
MIT License
babybuddy/babybuddy
reports/graphs/feeding_duration.py
feeding_duration
python
def feeding_duration(instances): totals = instances.annotate(date=TruncDate('start')) .values('date') .annotate(count=Count('id')) .annotate(sum=Sum('duration')) .order_by('-date') averages = [] for total in totals: averages.append(total['sum']/total['count']) trace_avg = go.Scatter( name=_('Average duration'), line=dict(shape='spline'), x=list(totals.values_list('date', flat=True)), y=[td.seconds/60 for td in averages], hoverinfo='text', text=[_duration_string_ms(td) for td in averages] ) trace_count = go.Scatter( name=_('Total feedings'), mode='markers', x=list(totals.values_list('date', flat=True)), y=list(totals.values_list('count', flat=True)), yaxis='y2', hoverinfo='y' ) layout_args = utils.default_graph_layout_options() layout_args['title'] = _('<b>Average Feeding Durations</b>') layout_args['xaxis']['title'] = _('Date') layout_args['xaxis']['rangeselector'] = utils.rangeselector_date() layout_args['yaxis']['title'] = _('Average duration (minutes)') layout_args['yaxis2'] = dict(layout_args['yaxis']) layout_args['yaxis2']['title'] = _('Number of feedings') layout_args['yaxis2']['overlaying'] = 'y' layout_args['yaxis2']['side'] = 'right' fig = go.Figure({ 'data': [trace_avg, trace_count], 'layout': go.Layout(**layout_args) }) output = plotly.plot(fig, output_type='div', include_plotlyjs=False) return utils.split_graph_output(output)
Create a graph showing average duration of feeding instances over time. This function originally used the Avg() function from django.db.models but for some reason it was returning None any time the exact count of entries was equal to seven. :param instances: a QuerySet of Feeding instances. :returns: a tuple of the the graph's html and javascript.
https://github.com/babybuddy/babybuddy/blob/a361f96e4db1d77ce3e787af15e17eb04698116e/reports/graphs/feeding_duration.py#L14-L67
from django.db.models import Count, Sum from django.db.models.functions import TruncDate from django.utils.translation import gettext as _ import plotly.offline as plotly import plotly.graph_objs as go from core.utils import duration_parts from reports import utils
BSD 2-Clause Simplified License
python-provy/provy
provy/more/debian/web/apache.py
ApacheRole.cleanup
python
def cleanup(self): super(ApacheRole, self).cleanup() if self.must_restart: self.restart()
Restarts Apache if any changes have been made. There's no need to call this method manually.
https://github.com/python-provy/provy/blob/ca3d5e96a2210daf3c1fd4b96e047efff152db14/provy/more/debian/web/apache.py#L64-L72
from fabric.api import settings from provy.core.roles import Role from provy.more.debian import AptitudeRole class ApacheRole(Role): def __available_site_for(self, name): return '/etc/apache2/sites-available/%s' % name def __enabled_site_for(self, name): return '/etc/apache2/sites-enabled/%s' % name def __init__(self, prov, context): super(ApacheRole, self).__init__(prov, context) self.must_restart = False def provision(self): with self.using(AptitudeRole) as aptitude: aptitude.ensure_package_installed('apache2')
MIT License
popsim-consortium/demes-python
demes/__main__.py
ParseCommand.load_and_count_documents
python
def load_and_count_documents( self, filename: Union[str, io.TextIOBase] ) -> Tuple[int, Iterator[demes.Graph]]: graph_generator = demes.load_all(filename) graph_list = [] for graph in graph_generator: graph_list.append(graph) if len(graph_list) > 1: break num_documents = len(graph_list) graph_iter = itertools.chain(graph_list, graph_generator) return num_documents, graph_iter
Count the documents in the file, returning the count and an iterator over the graphs. The returned document count is: 0 for zero documents, 1 for one document, and 2 for two or more documents.
https://github.com/popsim-consortium/demes-python/blob/29d7ae04a0ffb9de7bd99ff05d73d9897ee11daa/demes/__main__.py#L116-L136
import io import sys import itertools import argparse from typing import Iterator, Tuple, Union import textwrap import demes from . import ms class ParseCommand: def __init__(self, subparsers): parser = subparsers.add_parser( "parse", help="Parse models and write them to stdout in canonical form.", description=textwrap.dedent(self.__doc__), ) parser.set_defaults(func=self) format_group = parser.add_mutually_exclusive_group() format_group.add_argument( "-j", "--json", action="store_true", default=False, help="Output a JSON-formatted model.", ) format_group.add_argument( "--ms", metavar="REFERENCE_SIZE", type=float, default=None, help=( "Output ms command line arguments, using the given reference " "population size (N0) to translate into coalescent units " "(see the 'ms' subcommand for interpretation of this value)." "The sampling configuration in the output will need editing " "prior to simulation. The order of deme IDs matches the " "order of demes in the input model. " ), ) parser.add_argument( "-s", "--simplified", action="store_true", default=False, help=( "Output a simplified model. This is a compact representation " "in which many default values are omitted. As only the " "essential details are retained, this is usually easier for " "humans to read. The simplified output is guaranteed to be a " "valid Demes model that can be resolved identically to the " "input model. But exactly which fields are simplified, " "and how simplification is performed, may change over time. " "Thus users should not rely on details of the output such as " "presence or absence of specific fields, or other details " "that do not alter how the model is resolved into a " "fully-qualified model. " "A fully-qualified model is output by default." ), ) parser.add_argument( "filename", type=argparse.FileType(), help=( "Filename of the model. The special value '-' may be used to " "read from stdin. The file may be in YAML or JSON format, " "but will be parsed as YAML. Multi-document YAML is supported." ), ) def __call__(self, args: argparse.Namespace) -> None: if args.json: output_format = "json" elif args.ms: output_format = "ms" else: output_format = "yaml" if args.ms and args.simplified: pass num_documents, graphs = self.load_and_count_documents(args.filename) if num_documents == 0: pass elif num_documents == 1: graph = next(graphs) if args.ms is not None: print(demes.to_ms(graph, N0=args.ms)) else: demes.dump( graph, sys.stdout, simplified=args.simplified, format=output_format, ) else: if output_format != "yaml": raise RuntimeError( "The input file contains multiple models, which is only " "supported with YAML output. If multi-model output " "would be useful to you with other formats, " "please open an issue on github.", ) demes.dump_all(graphs, sys.stdout, simplified=args.simplified)
ISC License
michaelgale/cq-kit
cqkit/cq_discrete.py
discretize_edge
python
def discretize_edge(edge, resolution=16): if isinstance(edge, Edge): curve = BRepAdaptor_Curve(edge.wrapped) else: curve = BRepAdaptor_Curve(edge) try: gt = GCPnts_QuasiUniformAbscissa(curve, resolution + 1) except: return [] pts = [] for p in range(resolution + 1): pt = gt.Parameter(p + 1) curve_props = BRepLProp_CLProps(curve, 1, 1e-6) curve_props.SetParameter(pt) vpt = curve_props.Value() pts.append((vpt.X(), vpt.Y(), vpt.Z())) return pts
Uniformly samples an edge with specified resolution (number of segments) and returns an array (segments + 1) of discrete (approximated) 3D points.
https://github.com/michaelgale/cq-kit/blob/e44b54d75e2687fa29cf1ee0f181008521befc3c/cqkit/cq_discrete.py#L57-L75
try: from OCC.Core.BRep import BRep_Tool from OCC.Core.BRepMesh import BRepMesh_IncrementalMesh from OCC.Core.TopAbs import TopAbs_FACE, TopAbs_VERTEX from OCC.Core.TopExp import TopExp_Explorer from OCC.Core.TopLoc import TopLoc_Location from OCC.Core.TopoDS import TopoDS_Face, TopoDS_Vertex, TopoDS_Iterator from OCC.Core.BRepAdaptor import BRepAdaptor_Curve from OCC.Core.BRepLProp import BRepLProp_CLProps from OCC.Core.GCPnts import GCPnts_AbscissaPoint, GCPnts_QuasiUniformAbscissa from OCC.Core.gp import gp_Dir except: from OCP.BRep import BRep_Tool from OCP.BRepMesh import BRepMesh_IncrementalMesh from OCP.TopAbs import TopAbs_FACE, TopAbs_VERTEX, TopAbs_Orientation from OCP.TopExp import TopExp_Explorer from OCP.TopLoc import TopLoc_Location from OCP.TopoDS import TopoDS_Face, TopoDS_Vertex, TopoDS_Iterator from OCP.BRepAdaptor import BRepAdaptor_Curve from OCP.BRepLProp import BRepLProp_CLProps from OCP.GCPnts import GCPnts_AbscissaPoint, GCPnts_QuasiUniformAbscissa from OCP.gp import gp_Dir BRep_Tool.Triangulation = BRep_Tool.Triangulation_s GCPnts_AbscissaPoint.Length = GCPnts_AbscissaPoint.Length_s import cadquery as cq from cadquery import *
MIT License
roberodin/ha-samsungtv-custom
custom_components/samsungtv_custom/samsungctl_qled/upnp.py
Upnp.set_current_media
python
def set_current_media(self, url): self.SOAPrequest('SetAVTransportURI', "<CurrentURI>{url}</CurrentURI><CurrentURIMetaData></CurrentURIMetaData>".format(url=url), 'AVTransport')
Set media to playback.
https://github.com/roberodin/ha-samsungtv-custom/blob/da6bf9349d1e33bf143a115b4f2a3754d6754472/custom_components/samsungtv_custom/samsungctl_qled/upnp.py#L56-L58
import xml.etree.ElementTree as ET import requests class Upnp: def __init__(self, config): self._host = config['host'] self.mute = False self.volume = 0 def SOAPrequest(self, action, arguments, protocole): headers = {'SOAPAction': '"urn:schemas-upnp-org:service:{protocole}:1#{action}"'.format(action=action, protocole=protocole), 'content-type': 'text/xml'} body = """<?xml version="1.0" encoding="utf-8"?> <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> <s:Body> <u:{action} xmlns:u="urn:schemas-upnp-org:service:{protocole}:1"> <InstanceID>0</InstanceID> {arguments} </u:{action}> </s:Body> </s:Envelope>""".format(action=action, arguments=arguments, protocole=protocole) response = None try: response = requests.post("http://{host}:9197/upnp/control/{protocole}1".format(host=self._host, protocole=protocole), data=body, headers=headers, timeout=0.2) response = response.content except: pass return response def get_volume(self): self.volume = 0 response = self.SOAPrequest('GetVolume', "<Channel>Master</Channel>", 'RenderingControl') if (response is not None): volume_xml = response.decode('utf8') tree = ET.fromstring(volume_xml) for elem in tree.iter(tag='CurrentVolume'): self.volume = elem.text return self.volume def set_volume(self, volume): self.SOAPrequest('SetVolume', "<Channel>Master</Channel><DesiredVolume>{}</DesiredVolume>".format(volume), 'RenderingControl') def get_mute(self): self.mute = False response = self.SOAPrequest('GetMute', "<Channel>Master</Channel>", 'RenderingControl') if (response is not None): mute_xml = response.decode('utf8') tree = ET.fromstring(mute_xml) for elem in tree.iter(tag='CurrentMute'): mute = elem.text if (int(mute) == 0): self.mute = False else: self.mute = True return self.mute
Apache License 2.0
xuru/pyvisdk
pyvisdk/do/exit_maintenance_mode_event.py
ExitMaintenanceModeEvent
python
def ExitMaintenanceModeEvent(vim, *args, **kwargs): obj = vim.client.factory.create('ns0:ExitMaintenanceModeEvent') if (len(args) + len(kwargs)) < 4: raise IndexError('Expected at least 5 arguments got: %d' % len(args)) required = [ 'chainId', 'createdTime', 'key', 'userName' ] optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs', 'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
This event records that the host is no longer in maintenance mode.
https://github.com/xuru/pyvisdk/blob/de24eb4426eb76233dc2e57640d3274ffd304eb3/pyvisdk/do/exit_maintenance_mode_event.py#L11-L33
import logging from pyvisdk.exceptions import InvalidArgumentError log = logging.getLogger(__name__)
MIT License
pyobo/pyobo
src/pyobo/sources/dictybase_gene.py
get_obo
python
def get_obo(force: bool = False) -> Obo: return Obo( iter_terms=get_terms, iter_terms_kwargs=dict(force=force), name=NAME, ontology=PREFIX, typedefs=[from_species, has_gene_product], auto_generated_by=f"bio2obo:{PREFIX}", )
Get dictyBase Gene as OBO.
https://github.com/pyobo/pyobo/blob/678b4eeb5ba40205ab5ed8315ae0cc1d4ff3199f/src/pyobo/sources/dictybase_gene.py#L34-L43
import logging from typing import Iterable import click import pandas as pd from more_click import verbose_option from tqdm import tqdm from pyobo.struct import Obo, Reference, Synonym, Term, from_species, has_gene_product from pyobo.utils.io import multisetdict from pyobo.utils.path import ensure_df logger = logging.getLogger(__name__) PREFIX = "dictybase.gene" NAME = "dictyBase Gene" URL = ( "http://dictybase.org/db/cgi-bin/dictyBase/download/" "download.pl?area=general&ID=gene_information.txt" ) UNIPROT_MAPPING = ( "http://dictybase.org/db/cgi-bin/dictyBase/download/" "download.pl?area=general&ID=DDB-GeneID-UniProt.txt" )
MIT License
amccaugh/phidl
phidl/geometry.py
C
python
def C(width = 1, size = (10, 20), layer = 0): D = Device(name = 'C') w = width/2 s1, s2 = size points = [(-w, -w), (s1, -w), (s1, w), (w, w), (w, s2-w), (s1, s2-w), (s1, s2+w), (-w, s2+w), (-w, -w)] D.add_polygon(points, layer = layer) D.add_port(name = 1, midpoint = (s1, s2), width = width, orientation = 0) D.add_port(name = 2, midpoint = (s1, 0), width = width, orientation = 0) return D
Generates a 'C' geometry with ports on both ends. Parameters ---------- width : int or float Thickness of the line forming the C. size : tuple of int or float Lengths of the base + top edges and the height of the C, respectively. layer : int, array-like[2], or set Specific layer(s) to put polygon geometry on. Returns ------- D : Device A Device containing a [-bracket-shaped polygon and two ports (`1` and `2`) on either end of the [ shape.
https://github.com/amccaugh/phidl/blob/eae1cea6de172db72207e926c8a6500bc9dfcddf/phidl/geometry.py#L375-L401
from __future__ import division, print_function, absolute_import import os.path import numpy as np import itertools from numpy import sqrt, pi, cos, sin, log, exp, sinh import gdspy from gdspy import clipper from phidl.device_layout import Device, Port, Polygon, CellArray, Group from phidl.device_layout import _parse_layer, DeviceReference, make_device import copy as python_copy from collections import OrderedDict import pickle import json import warnings from functools import update_wrapper from phidl.constants import _glyph,_width,_indent def rectangle(size = (4,2), layer = 0): D = Device(name = 'rectangle') points = [[size[0], size[1]], [size[0], 0], [0, 0], [0, size[1]]] D.add_polygon(points, layer = layer) return D def bbox(bbox = [(-1, -1), (3, 4)], layer = 0): D = Device(name = 'bbox') (a,b), (c,d) = bbox points = ((a,b), (c,b), (c,d), (a,d)) D.add_polygon(points, layer = layer) return D def cross(length = 10, width = 3, layer = 0): D = Device(name = 'cross') R = rectangle(size = (width, length), layer = layer) r1 = D.add_ref(R).rotate(90) r2 = D.add_ref(R) r1.center = (0,0) r2.center = (0,0) return D def ellipse(radii = (10,5), angle_resolution = 2.5, layer = 0): D = Device(name = 'ellipse') a = radii[0] b = radii[1] t = np.linspace(0, 360, int(np.ceil(360/angle_resolution) + 1)) * pi/180 r = a*b / (sqrt((b*cos(t))**2 + (a*sin(t))**2)) xpts = r*cos(t) ypts = r*sin(t) D.add_polygon(points = (xpts, ypts), layer = layer) return D def circle(radius = 10, angle_resolution = 2.5, layer = 0): D = Device(name = 'circle') t = np.linspace(0, 360, int(np.ceil(360/angle_resolution) + 1)) * pi/180 xpts = (radius*cos(t)).tolist() ypts = (radius*sin(t)).tolist() D.add_polygon(points = (xpts, ypts), layer = layer) return D def ring(radius = 10, width = 0.5, angle_resolution = 2.5, layer = 0): D = Device(name = 'ring') inner_radius = radius - width/2 outer_radius = radius + width/2 n = int(np.round(360/angle_resolution)) t = np.linspace(0, 360, n+1) * pi/180 inner_points_x = (inner_radius*cos(t)).tolist() inner_points_y = (inner_radius*sin(t)).tolist() outer_points_x = (outer_radius*cos(t)).tolist() outer_points_y = (outer_radius*sin(t)).tolist() xpts = inner_points_x + outer_points_x[::-1] ypts = inner_points_y + outer_points_y[::-1] D.add_polygon(points = (xpts, ypts), layer = layer) return D def arc(radius = 10, width = 0.5, theta = 45, start_angle = 0, angle_resolution = 2.5, layer = 0): inner_radius = radius - width/2 outer_radius = radius + width/2 angle1 = (start_angle) * pi/180 angle2 = (start_angle + theta) * pi/180 t = np.linspace(angle1, angle2, int(np.ceil(abs(theta)/angle_resolution))) inner_points_x = (inner_radius*cos(t)).tolist() inner_points_y = (inner_radius*sin(t)).tolist() outer_points_x = (outer_radius*cos(t)).tolist() outer_points_y = (outer_radius*sin(t)).tolist() xpts = inner_points_x + outer_points_x[::-1] ypts = inner_points_y + outer_points_y[::-1] D = Device('arc') D.add_polygon(points = (xpts,ypts), layer = layer) D.add_port(name = 1, midpoint = (radius*cos(angle1), radius*sin(angle1)), width = width, orientation = start_angle - 90 + 180*(theta<0)) D.add_port(name = 2, midpoint = (radius*cos(angle2), radius*sin(angle2)), width = width, orientation = start_angle + theta + 90 - 180*(theta<0)) D.info['length'] = (abs(theta) * pi/180) * radius return D def turn(port, radius = 10, angle = 270, angle_resolution = 2.5, layer = 0): D = arc(radius = radius, width = port.width, theta = angle, start_angle = 0, angle_resolution = angle_resolution, layer = layer) D.rotate(angle = 180 + port.orientation - D.ports[1].orientation, center = D.ports[1].midpoint) D.move(origin = D.ports[1], destination = port) return D def straight(size = (4, 2), layer = 0): D = Device(name = 'wire') points = [[size[0], size[1]], [size[0], 0], [0, 0], [0, size[1]]] D.add_polygon(points, layer = layer) D.add_port(name = 1, midpoint = (size[0]/2, size[1]), width = size[0], orientation = 90) D.add_port(name = 2, midpoint = (size[0]/2, 0), width = size[0], orientation = -90) return D def L(width = 1, size = (10, 20), layer = 0): D = Device(name = 'L') w = width/2 s1, s2 = size points = [(-w, -w), (s1, -w), (s1, w), (w, w), (w, s2), (-w, s2), (-w, -w)] D.add_polygon(points, layer = layer) D.add_port(name = 1, midpoint = (0, s2), width = width, orientation = 90) D.add_port(name = 2, midpoint = (s1, 0), width = width, orientation = 0) return D
MIT License
googlearchive/simian
src/simian/auth/x509.py
X509Certificate.CheckIssuer
python
def CheckIssuer(self, issuer=None): if issuer is None: issuer = self._required_issuer if issuer is None: return if self._cert['issuer'] is None or self._cert['issuer'] != issuer: raise CertificateValueError( 'Issuer does not match required issuer: "%s" != required "%s"' % ( self._cert['issuer'], issuer))
Check that the certificate has a specific issuer. Args: issuer: str, optional, issuer that is required Raises: CertificateValueError: if issuer does not match
https://github.com/googlearchive/simian/blob/fb9c43946ff7ba29be417068d6447cfc0adfe9ef/src/simian/auth/x509.py#L688-L705
import base64 import datetime import hashlib import re import time from pyasn1.codec.der import decoder as der_decoder from pyasn1.codec.der import encoder as der_encoder import pyasn1.error from pyasn1.type import univ from pyasn1_modules import rfc2459 from simian.auth import tlslite_bridge OID_NAME = { (2, 5, 4, 3): 'CN', (2, 5, 4, 6): 'C', (2, 5, 4, 7): 'L', (2, 5, 4, 8): 'ST', (2, 5, 4, 10): 'O', (2, 5, 4, 11): 'OU', (1, 2, 840, 113549, 1, 9, 1): 'emailAddress', (0, 9, 2342, 19200300, 100, 1, 25): 'DC', } OID_ID = {} for k, v in OID_NAME.iteritems(): OID_ID[v] = k OID_ID['domainComponent'] = OID_ID['DC'] OID_SHA1_WITH_RSA_ENC = (1, 2, 840, 113549, 1, 1, 5) OID_SHA256_WITH_RSA_ENC = (1, 2, 840, 113549, 1, 1, 11) OID_MS_NT_PRINCIPAL_NAME = (1, 3, 6, 1, 4, 1, 311, 20, 2, 3) OID_X509V3_BASIC_CONSTRAINTS = (2, 5, 29, 19) OID_X509V3_KEY_USAGE = (2, 5, 29, 15) X509V3_KEY_USAGE_BIT_FIELDS = ( 'digitalSignature', 'nonRepudiation', 'keyEncipherment', 'dataEncipherment', 'keyAgreement', 'keyCertSign', 'CRLSign', 'encipherOnly', 'decipherOnly', ) OID_X509V3_SUBJECT_ALT_NAME = (2, 5, 29, 17) X509_CERT_VERSION_3 = 0x2 BASE64_RE = re.compile(r'^[0-9A-Za-z/+=]+$') SPACE = str(u'\u0020') NULL = '\x00' RFC4514_ESCAPED_CHARS = frozenset(('"', '+', ',', ';', '<', '>', '\\', NULL)) class Error(Exception): class CertificateError(Error): class FormatError(Error): class CertificateValueError(CertificateError): class CertificateParseError(CertificateError): class CertificateFormatError(CertificateError, FormatError): class PEMFormatError(FormatError): class HeaderMissingPEMFormatError(PEMFormatError): class FooterMissingPEMFormatError(PEMFormatError): class CertificateASN1FormatError(CertificateFormatError): class CertificatePEMFormatError(CertificateFormatError, PEMFormatError): class RSAKeyPEMFormatError(PEMFormatError): class RSAPrivateKeyPEMFormatError(RSAKeyPEMFormatError): class BaseDataObject(object): def _GetDataDict(self): raise NotImplementedError @classmethod def CreateGetMethod(cls, name, key, setattr_=None): if setattr_ is None: setattr_ = setattr setattr_(cls, 'Get%s' % name, lambda self: self._GetDataDict()[key]) class X509Certificate(BaseDataObject): SIGNATURE_ALGORITHMS = [OID_SHA1_WITH_RSA_ENC, OID_SHA256_WITH_RSA_ENC] TIMESTAMP_FMT = '%y%m%d%H%M%SZ' def __init__(self): self.Reset() self._required_issuer = None def Reset(self): self._cert = { 'serial_num': None, 'issuer': None, 'subject': None, 'valid_notbefore': None, 'valid_notafter': None, 'fields_data': None, 'sig_data': None, 'sig_algorithm': None, 'entire_cert_data': None, 'public_key': None, 'may_act_as_ca': None, 'key_usage': None, 'subject_alt_name': None, } def _GetDataDict(self): return self._cert BaseDataObject.CreateGetMethod('Issuer', 'issuer') BaseDataObject.CreateGetMethod('Subject', 'subject') BaseDataObject.CreateGetMethod('DatetimeNotValidBefore', 'valid_notbefore') BaseDataObject.CreateGetMethod('DatetimeNotValidAfter', 'valid_notafter') BaseDataObject.CreateGetMethod('FieldsData', 'fields_data') BaseDataObject.CreateGetMethod('SignatureData', 'sig_data') BaseDataObject.CreateGetMethod('SignatureAlgorithm', 'sig_algorithm') BaseDataObject.CreateGetMethod('SerialNumber', 'serial_num') BaseDataObject.CreateGetMethod('EntireCertData', 'entire_cert_data') BaseDataObject.CreateGetMethod('PublicKey', 'public_key') BaseDataObject.CreateGetMethod('MayActAsCA', 'may_act_as_ca') BaseDataObject.CreateGetMethod('KeyUsage', 'key_usage') BaseDataObject.CreateGetMethod('SubjectAltName', 'subject_alt_name') def _CertTimestampToDatetime(self, ts): try: t = time.strptime(str(ts[0]), self.TIMESTAMP_FMT) except ValueError, e: raise CertificateValueError('Timestamp %s: %s' % (ts, str(e))) t = list(t[:6]) t.append(0) d = datetime.datetime(*t) return d def _FindOctetStrings(self, values): return [x for x in values if isinstance(x, univ.OctetString)] def _GetV3ExtensionFieldsFromSequence(self, seq): seq = der_decoder.decode(der_encoder.encode(seq))[0] output = {} cert_key_usage = [] for i in xrange(len(seq)): oid, values = seq[i][0], seq[i][1:] if oid == OID_X509V3_BASIC_CONSTRAINTS: octet_strings = self._FindOctetStrings(values) if 1 < len(values) > 2: raise CertificateParseError('X509V3 Multiple CA/Paths') encaps_seq = der_decoder.decode(octet_strings[0]) if len(encaps_seq): if len(encaps_seq[1]): raise CertificateParseError( 'X509V3 pathLenConstraint unsupported') if len(encaps_seq[0]) and encaps_seq[0][0]: output['may_act_as_ca'] = True elif oid == OID_X509V3_KEY_USAGE: octet_strings = self._FindOctetStrings(values) if octet_strings[0][0] != '\x03': raise CertificateParseError('X509V3 Key Usage encoding') encaps_bitstr = der_decoder.decode(octet_strings[0])[0] n = 0 while n < len(encaps_bitstr): if encaps_bitstr[n]: cert_key_usage.append(X509V3_KEY_USAGE_BIT_FIELDS[n]) n += 1 elif oid == OID_X509V3_SUBJECT_ALT_NAME: octet_strings = self._FindOctetStrings(values) if octet_strings[0][0] != '\x30': raise CertificateParseError('X509V3 Subject Alt Name encoding') encaps_seq = der_decoder.decode(octet_strings[0])[0] if not encaps_seq: continue if encaps_seq[0] == OID_MS_NT_PRINCIPAL_NAME: output['subject_alt_name'] = ( 'X_MS_NT_Principal_Name=%s' % encaps_seq[1]) else: raise CertificateParseError( 'X509V3 SubjectAltName Unknown OID %s' % str(encaps_seq[0])) cert_key_usage = tuple(cert_key_usage) if cert_key_usage: output['key_usage'] = cert_key_usage return output def _AttributeValueToString(self, value): if isinstance(value, rfc2459.AttributeValue): value = der_decoder.decode(value.asOctets())[0] else: value = str(value) tmp = [] i = 0 while i < len(value): if value[i] in RFC4514_ESCAPED_CHARS: if i == 0 or value[i - 1] != '\\': if value[i] == NULL: tmp.append('\\00') else: tmp.append('\\%s' % value[i]) else: tmp.append(value[i]) i += 1 value = ''.join(tmp) if value.startswith(SPACE): value = '\\' + value elif value.startswith('#'): value = '\\' + value if value.endswith(SPACE): value = value[0:-1] + '\\' + SPACE return value def _AssembleDNSequence(self, seq): output = [] delimiter = ',' try: for i in seq[0]: oid, value = i[0]['type'], i[0]['value'] if oid in OID_NAME: new_value = self._AttributeValueToString(value) output.append('%s=%s' % (OID_NAME[oid], new_value)) else: raise CertificateParseError('Unknown OID %s' % str(oid)) except (IndexError, ValueError, TypeError): raise CertificateParseError('Unknown DN sequence structure', seq) return delimiter.join(output) def _GetFieldsFromSequence(self, seq): try: if seq['version'] != X509_CERT_VERSION_3: raise CertificateParseError( 'X509 version %s not supported' % seq['version']) serial_num = int(seq['serialNumber']) cert_sig_algorithm = self._GetSignatureAlgorithmFromSequence( seq['signature']) cert_issuer = self._AssembleDNSequence(seq['issuer']) if (seq['validity']['notBefore'].isSameTypeWith(rfc2459.Time()) and seq['validity']['notAfter'].isSameTypeWith(rfc2459.Time())): cert_valid_notbefore = self._CertTimestampToDatetime( seq['validity']['notBefore']) cert_valid_notafter = self._CertTimestampToDatetime( seq['validity']['notAfter']) else: raise CertificateParseError('Validity time structure') cert_subject = self._AssembleDNSequence(seq['subject']) if len(seq) > 7: v3_output = self._GetV3ExtensionFieldsFromSequence(seq['extensions']) else: v3_output = {} fields_data = der_encoder.encode(seq) output = { 'serial_num': serial_num, 'issuer': unicode(cert_issuer), 'subject': unicode(cert_subject), 'valid_notbefore': cert_valid_notbefore, 'valid_notafter': cert_valid_notafter, 'fields_data': fields_data, 'sig_algorithm': cert_sig_algorithm, } output.update(v3_output) except (IndexError, TypeError, AttributeError, ValueError), e: raise CertificateParseError(str(e)) return output def _GetSignatureAlgorithmFromSequence(self, seq): try: if seq['algorithm'] not in self.SIGNATURE_ALGORITHMS: raise CertificateValueError( 'Unsupported signature algorithm %s' % str(seq['algorithm'])) output = {'sig_algorithm': seq['algorithm']} except (IndexError, TypeError, AttributeError), e: raise CertificateParseError(str(e)) return output def _GetSignatureFromSequence(self, seq): try: if len(seq) >= 1024: total = 0 for b in seq: total |= seq[b] if total not in [0, 1]: raise CertificateParseError('Invalid signature format') else: raise CertificateParseError('Signature length must be >=1024') sig_data = der_encoder.encode(seq) sig_data = sig_data[-1 * (len(seq)/8):] output = {'sig_data': sig_data} except (IndexError, TypeError, AttributeError), e: raise CertificateParseError(str(e)) return output def _GetCertSequencesFromTopSequence(self, seq): if type(seq) is not tuple or len(seq) < 1: raise CertificateParseError( 'Top of certificate should consist of 1+ sequences') certificate = seq[0] fields = self._GetFieldsFromSequence(certificate['tbsCertificate']) sigalg = self._GetSignatureAlgorithmFromSequence( certificate['signatureAlgorithm']) sig = self._GetSignatureFromSequence(certificate['signatureValue']) cert = {} cert.update(fields) cert.update(sigalg) cert.update(sig) return cert def _GetPublicKeyFromByteString(self, bytes_str): cert = tlslite_bridge.X509() cert.parseBinary(bytearray(bytes_str)) return { 'public_key': cert.publicKey, } def LoadFromByteString(self, bytes_str): try: c = der_decoder.decode(bytes_str, asn1Spec=rfc2459.Certificate()) except pyasn1.error.PyAsn1Error, e: raise CertificateASN1FormatError('DER decode: %s' % str(e)) cert = { 'entire_byte_string': bytes_str, } cert.update(self._GetCertSequencesFromTopSequence(c)) cert.update(self._GetPublicKeyFromByteString(bytes_str)) self.Reset() self._cert.update(cert) def CheckValidity(self, utcnow=None): if utcnow is None: utcnow = datetime.datetime.utcnow() if utcnow > self._cert['valid_notafter']: raise CertificateValueError( 'Certificate expired on %s' % self._cert['valid_notafter']) if utcnow < self._cert['valid_notbefore']: raise CertificateValueError( 'Certificate not valid until %s' % self._cert['valid_notbefore'])
Apache License 2.0
sonibla/pytorch_keras_converter
pytorch_keras_converter/utility/LayerRepresentation.py
LayerRepresentation.progression
python
def progression(self, framework=None): equivalents = self.numberOfEquivalents(framework=framework) if framework in self.equivalent.keys(): equivalents += 1 total = self.numberOfChildren() + 1 return (equivalents / total) * 100
Returns the fraction of the model (in %) which have an equivalent available in keras or pyTorch (argument framework)
https://github.com/sonibla/pytorch_keras_converter/blob/21925b67b6eb3cbbfa8eb6d33f682d57dafd357d/pytorch_keras_converter/utility/LayerRepresentation.py#L923-L933
try: import tensorflow.keras as keras except ImportError: try: import keras except ImportError: keras = None try: import torch except ImportError: torch = None try: import graphviz except ImportError: graphviz = None from . import torch2keras as t2k def UsedFramework(module): moduleClass = str(module.__class__)[8:-2] IsKeras = 'keras' in moduleClass IsTorch = 'torch' in moduleClass if '_backend' in dir(module): IsTorch = IsTorch or 'torch' in str(module._backend) if IsKeras: return 'keras' if IsTorch: return 'torch' return None def normalizeShape(shape): if isinstance(shape, tuple): normalizedShape = shape elif isinstance(shape, int): normalizedShape = (shape,) else: try: normalizedShape = tuple(shape) except TypeError: raise TypeError("Could not convert provided shape to tulpe") return normalizedShape class LayerRepresentation: def __init__(self, module=None): self.parent = None self.children = list() self.originalFramework = None self.name = str(module.__class__.__name__) self.type = str(module.__class__.__name__) self.detailedType = str(module.__class__)[8:-2] self.detailedTypeList = self.detailedType.split('.') self.equivalent = dict() self.equivalentTxt = dict() self.input_shape = None self.output_shape = None self.InputConnectedTo = set() self.OutputConnectedTo = set() framework = UsedFramework(module) if framework == 'torch': self.originalFramework = 'torch' self.equivalent['torch'] = module.eval() children = dict(module.named_children()).items() for name, child in children: self.addChild(child, name=name) elif framework == 'keras': self.originalFramework = 'keras' if keras is None: self.equivalent['keras'] = module else: self.equivalent['keras'] = keras.models.clone_model(module) if 'layers' in dir(module): for child in module.layers: self.addChild(child, name=child.name) self.kerasOutput = None self.kerasInput = None def __setattr__(self, attr, val): object.__setattr__(self, attr, val) if 'kerasInput' in dir(self): if attr == 'input_shape': if not isinstance(self.input_shape, tuple): self.input_shape = normalizeShape(self.input_shape) elif attr == 'output_shape': if not isinstance(self.output_shape, tuple): self.output_shape = normalizeShape(self.output_shape) elif attr == 'kerasInput': inputExist = self.kerasInput is not None outputExist = self.kerasOutput is not None equivExist = 'keras' in self.equivalent.keys() if inputExist and equivExist: output = self.equivalent['keras'](self.kerasInput) self.kerasOutput = output if inputExist and outputExist and not(equivExist): if keras is None: err = "Could not import keras. Conversion failed !" raise ImportError(err) kerasEq = keras.models.Model(inputs=self.kerasInput, outputs=self.kerasOutput, name=self.name) self.equivalent['keras'] = kerasEq if self.kerasInput is not None and self.input_shape is not None: shape = self.kerasInputShape() if shape is not None and shape != self.input_shape: err = "Conversion failed! Details: at layer {}, input \ shape should be {}, but is {}\ ".format(self.name, self.input_shape, shape) raise RuntimeError(err) if self.kerasOutput is not None and self.output_shape is not None: shape = self.kerasOutputShape() if shape is not None and shape != self.output_shape: err = "Conversion failed! Details: at layer {}, output \ shape should be {}, but is {}\ ".format(self.name, self.output_shape, shape) raise RuntimeError(err) def __getitem__(self, index): if isinstance(index, str): return self.getChild(name=index) if isinstance(index, int): return self.getChildId(identifier=index) return None def getChildId(self, identifier=None, framework=None): if framework is None: for child in self.children: if id(child) == identifier: return child if id(self) == identifier: return self mainParent = self.firstParent() if id(mainParent) == identifier: return mainParent for child in mainParent.allChildren(): if id(child) == identifier: return child return None else: for child in self.children: if framework in child.equivalent.keys(): equiv = child.equivalent[framework] if id(equiv) == identifier: return child if framework in self.equivalent.keys(): if id(self.equivalent[framework]) == identifier: return self mainParent = self.firstParent() if framework in mainParent.equivalent.keys(): if id(mainParent.equivalent[framework]) == identifier: return mainParent for child in mainParent.allChildren(): if framework in child.equivalent.keys(): equiv = child.equivalent[framework] if id(equiv) == identifier: return child return None def getChild(self, name=None): for child in self.children: if child.name == name: return child return None def addChild(self, childEq, name=None): child = LayerRepresentation(childEq) child.name = str(name) child.parent = self self.children.append(child) return child def delChildren(self): self.children = list() def delChild(self, name=None): if self.getChild(name=name) is not None: del self.children[self.getChild(name=name)] def allChildren(self): if not self.children: return list() else: List = self.children for child in self.children: List = List + child.allChildren() return List def numberOfChildren(self): number = len(self.children) for child in self.children: number += child.numberOfChildren() return number def completeName(self): if self.parent is None: return self.name return self.parent.completeName() + '_' + self.name def firstParent(self): if self.parent is None: return self return self.parent.firstParent() def connectionsAmongChildren(self, attr, reverse=False): connected = set() if isinstance(attr, str): if (attr != 'IN' or reverse) and not(attr == 'OUT' and reverse): return set() else: for child in self.children: if attr == 'IN' and 0 in child.InputConnectedTo: connected.add(child) elif attr == 'OUT' and 0 in child.OutputConnectedTo: connected.add(child) return connected else: child = attr if child not in self.children: return set() if not reverse: for bro in self.children: if child is not bro and child in bro.InputConnectedTo: connected.add(bro) for Output in child.OutputConnectedTo: if not Output == 0: connected.add(Output) else: connected.add('OUT') elif reverse: for bro in self.children: if child is not bro and child in bro.OutputConnectedTo: connected.add(bro) for Input in child.OutputConnectedTo: if not Input == 0: connected.add(Input) else: connected.add('IN') return connected def connectedChildren(self, attr, reverse=False): connected = self.connectionsAmongChildren(attr, reverse=reverse) connectedSimple = set() for layer in connected: if isinstance(layer, str): if self.parent is None: connectedSimple.add(layer) else: parent = self.parent cnctdRecursive = parent.connectedChildren(self, reverse=reverse) for simpleLayer in cnctdRecursive: connectedSimple.add(simpleLayer) elif not layer.children: connectedSimple.add(layer) elif layer.children: if reverse: cnctdRecursive = layer.connectedChildren('OUT', reverse=reverse) else: cnctdRecursive = layer.connectedChildren('IN', reverse=reverse) for simpleLayer in cnctdRecursive: connectedSimple.add(simpleLayer) return connectedSimple def numberOfEquivalents(self, framework=None, file=False): number = 0 for child in self.children: if not file: if framework in child.equivalent.keys(): number += 1 elif file and framework in child.equivalentTxt.keys(): if framework in child.equivalent.keys(): number += 1 number += child.numberOfEquivalents(framework=framework) return number def childrenEquivalentsCompleted(self, framework=None, file=False): for child in self.children: if framework not in child.equivalent.keys(): return False if file and framework not in child.equivalentTxt.keys(): return False if (framework is None) and (child.equivalent == {}): return False if (framework is None) and file and (child.equivalentTxt == {}): return False return True def Connect2Layers(self, name0, name1, connectKeras=True): child0 = self.getChild(name=name0) child1 = self.getChild(name=name1) if child0 is None or child1 is None: return None child0.OutputConnectedTo.add(child1) child1.InputConnectedTo.add(child0) if connectKeras: child1.kerasInput = child0.kerasOutput def ConnectLayers(self, *names, **kwargs): if 'connectKeras' in kwargs.keys(): connectKeras = kwargs['connectKeras'] else: connectKeras = True for i in range(len(names)-1): self.Connect2Layers(names[i], names[i+1], connectKeras=connectKeras) def ConnectModelInputToChildren(self, *names, **kwargs): if 'connectKeras' in kwargs.keys(): connectKeras = kwargs['connectKeras'] else: connectKeras = True for name in names: child = self.getChild(name=name) if child is not None: child.InputConnectedTo.add(0) if connectKeras: if self.kerasInput is None: if keras is None: err = "Could not import keras. Conversion failed !" raise ImportError(err) Input = keras.layers.Input(shape=self.input_shape) self.kerasInput = Input child.kerasInput = self.kerasInput def ConnectChildrenOutputToModel(self, *names, **kwargs): if 'connectKeras' in kwargs.keys(): connectKeras = kwargs['connectKeras'] else: connectKeras = True if connectKeras: kerasOutputs = list() for name in names: child = self.getChild(name=name) if child is not None: child.OutputConnectedTo.add(0) if connectKeras: kerasOutputs.append(child.kerasOutput) if connectKeras: if None in kerasOutputs: return None elif len(kerasOutputs) == 0: return None elif len(kerasOutputs) == 1: self.kerasOutput = kerasOutputs[0] else: cat = keras.layers.concatenate(kerasOutputs, axis=1) self.kerasOutput = cat def ConnectChildrenOutputToChild(self, *names, **kwargs): if 'connectKeras' in kwargs.keys(): connectKeras = kwargs['connectKeras'] else: connectKeras = True childName = kwargs['childName'] if connectKeras: kerasOutputs = list() child = self.getChild(name=childName) for i in range(len(names)): if isinstance(names[i], str): child_i = self.getChild(name=names[i]) else: child_i = child_i if child_i is not None: child_i.OutputConnectedTo.add(child) child.InputConnectedTo.add(child_i) if connectKeras: kerasOutputs.append(child_i.kerasOutput) if connectKeras: if None in kerasOutputs: return None elif len(kerasOutputs) == 0: return None elif len(kerasOutputs) == 1: self.kerasOutput = kerasOutputs[0] else: cat = keras.layers.concatenate(kerasOutputs, axis=1) self.getChild(name=childName).kerasInput = cat def ConnectChildrenOutputToChildren(self, *names, **kwargs): if 'connectKeras' in kwargs.keys(): connectKeras = kwargs['connectKeras'] else: connectKeras = True childrenNames = kwargs['childrenNames'] if isinstance(childrenNames, str): self.ConnectChildrenOutputToChild(*names, childName=childrenNames, connectKeras=connectKeras) elif isinstance(childrenNames, list): for child in childrenNames: self.ConnectChildrenOutputToChild(*names, childName=child, connectKeras=connectKeras) def isTorchBuiltIn(self): dT = self.detailedType return 'torch' in dT and 'torchvision' not in dT def isContainer(self): return ('container' in self.detailedType) def isTorchLayer(self): return self.isTorchBuiltIn() and not self.isContainer() def isTorchContainer(self): return self.isTorchBuiltIn() and self.isContainer() def kerasInputShape(self): return t2k.kerasShape(self.kerasInput) def kerasOutputShape(self): return t2k.kerasShape(self.kerasOutput) def DOT(self, shapes=True, debug=False): if graphviz is None: return None if debug: shapes = True dot = graphviz.Digraph(name='cluster_{}'.format(str(id(self))), format='svg') label = DOTlabel(model=self, shapes=shapes, debug=debug, name=str(self)) color = DOTcolor(model=self, debug=debug) dot.attr(label=label, fontsize='12', color=color) for child in self.children: if not child.children: label = DOTlabel(model=child, shapes=shapes, debug=debug, name=child.name) color = DOTcolor(model=child, debug=debug) dot.node(str(id(child)), label=label, color=color, shape='box', fontsize='11') else: dot.subgraph(child.DOT(shapes=shapes, debug=debug)) if self.parent is None: Dot = graphviz.Digraph(name='all', format='svg') Dot.subgraph(dot) connectedIN = self.connectedChildren('IN') connectedOUT = self.connectedChildren('OUT', reverse=True) if shapes: if connectedIN: Dot.node('IN', label='IN\n'+str(self.input_shape)) if connectedOUT: Dot.node('OUT', label='OUT\n'+str(self.output_shape)) else: if connectedIN: Dot.node('IN') if connectedOUT: Dot.node('OUT') Dot = createDOTedges(self, Dot, debug=debug) return Dot return dot
MIT License
dashcare/irrexplorer
irrexplorer/storage/migrations/env.py
run_migrations_offline
python
def run_migrations_offline(): url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations()
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
https://github.com/dashcare/irrexplorer/blob/0c007a82ae0dec685453d1e765bc3e5c46ca1271/irrexplorer/storage/migrations/env.py#L27-L48
from logging.config import fileConfig from alembic import context from sqlalchemy import engine_from_config, pool from irrexplorer.settings import DATABASE_URL from irrexplorer.storage.tables import metadata config = context.config fileConfig(config.config_file_name) config.set_main_option("sqlalchemy.url", str(DATABASE_URL)) target_metadata = metadata
BSD 2-Clause Simplified License
rpi-distro/thonny
thonny/shared/thonny/backend.py
FancyTracer._try_interpret_as_again_event
python
def _try_interpret_as_again_event(self, frame, original_event, original_args): if original_event == "after_expression": node_tags = original_args.get("node_tags") value = original_args.get("value") if (node_tags is not None and ("last_child" in node_tags or "or_arg" in node_tags and value or "and_arg" in node_tags and not value)): again_args = {"text_range" : original_args.get("parent_range"), "node_tags" : ""} again_event = ("before_expression_again" if "child_of_expression" in node_tags else "before_statement_again") self._handle_progress_event(frame, again_event, again_args)
Some after_* events can be interpreted also as "before_*_again" events (eg. when last argument of a call was evaluated, then we are just before executing the final stage of the call)
https://github.com/rpi-distro/thonny/blob/78a289c5948310377aacfe5349cb1a43d75ed7d8/thonny/shared/thonny/backend.py#L903-L927
import sys import io import os.path import inspect import ast import _ast import _io import traceback import types import logging import pydoc import builtins import site import __main__ from thonny import ast_utils from thonny.common import TextRange, parse_message, serialize_message, DebuggerCommand, ToplevelCommand, FrameInfo, InlineCommand, InputSubmission import signal import warnings BEFORE_STATEMENT_MARKER = "_thonny_hidden_before_stmt" BEFORE_EXPRESSION_MARKER = "_thonny_hidden_before_expr" AFTER_STATEMENT_MARKER = "_thonny_hidden_after_stmt" AFTER_EXPRESSION_MARKER = "_thonny_hidden_after_expr" EXCEPTION_TRACEBACK_LIMIT = 100 DEBUG = True logger = logging.getLogger() info = logger.info class VM: def __init__(self): self._main_dir = os.path.dirname(sys.modules["thonny"].__file__) self._heap = {} site.sethelper() pydoc.pager = pydoc.plainpager self._install_fake_streams() self._current_executor = None self._io_level = 0 original_argv = sys.argv.copy() original_path = sys.path.copy() sys.path = [d for d in sys.path if d != ""] if len(sys.argv) > 1: special_names_to_remove = set() sys.argv[:] = sys.argv[1:] sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0]))) __main__.__dict__["__file__"] = sys.argv[0] else: special_names_to_remove = {"__file__", "__cached__"} sys.argv[:] = [""] sys.path.insert(0, "") if "JEDI_LOCATION" in os.environ: sys.path.append(os.environ["JEDI_LOCATION"]) for key in list(__main__.__dict__.keys()): if not key.startswith("__") or key in special_names_to_remove: del __main__.__dict__[key] __main__.__doc__ = None self.send_message(self.create_message("ToplevelResult", main_dir=self._main_dir, original_argv=original_argv, original_path=original_path, argv=sys.argv, path=sys.path, welcome_text="Python " + _get_python_version_string(), executable=sys.executable, in_venv=hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix, python_version=_get_python_version_string(), cwd=os.getcwd())) self._install_signal_handler() def mainloop(self): try: while True: try: cmd = self._fetch_command() self.handle_command(cmd, "waiting_toplevel_command") except KeyboardInterrupt: logger.exception("Interrupt in mainloop") self.send_message(self.create_message("ToplevelResult")) except: logger.exception("Crash in mainloop") def handle_command(self, cmd, command_context): assert isinstance(cmd, ToplevelCommand) or isinstance(cmd, InlineCommand) error_response_type = "ToplevelResult" if isinstance(cmd, ToplevelCommand) else "InlineError" try: handler = getattr(self, "_cmd_" + cmd.command) except AttributeError: response = self.create_message(error_response_type, error="Unknown command: " + cmd.command) else: try: response = handler(cmd) except: response = self.create_message(error_response_type, error="Thonny internal error: {0}".format(traceback.format_exc(EXCEPTION_TRACEBACK_LIMIT))) if response is not None: response["command_context"] = command_context response["command"] = cmd.command if response["message_type"] == "ToplevelResult": response["gui_is_active"] = ( self._get_tkinter_default_root() is not None or self._get_qt_app() is not None ) self.send_message(response) def _install_signal_handler(self): def signal_handler(signal, frame): raise KeyboardInterrupt("Execution interrupted") if os.name == 'nt': signal.signal(signal.SIGBREAK, signal_handler) else: signal.signal(signal.SIGINT, signal_handler) def _cmd_cd(self, cmd): try: os.chdir(cmd.path) return self.create_message("ToplevelResult") except Exception as e: return self.create_message("ToplevelResult", error=str(e)) def _cmd_Reset(self, cmd): return self.create_message("ToplevelResult", welcome_text="Python " + _get_python_version_string(), executable=sys.executable) def _cmd_Run(self, cmd): return self._execute_file(cmd, False) def _cmd_run(self, cmd): return self._execute_file(cmd, False) def _cmd_Debug(self, cmd): return self._execute_file(cmd, True) def _cmd_debug(self, cmd): return self._execute_file(cmd, True) def _cmd_execute_source(self, cmd): return self._execute_source(cmd, "ToplevelResult") def _cmd_execute_source_inline(self, cmd): return self._execute_source(cmd, "InlineResult") def _cmd_process_gui_events(self, cmd): try: root = self._get_tkinter_default_root() if root is not None: import tkinter while root.dooneevent(tkinter._tkinter.DONT_WAIT): pass else: app = self._get_qt_app() if app is not None: app.processEvents() except: pass return None def _cmd_get_globals(self, cmd): if not cmd.module_name in sys.modules: raise ThonnyClientError("Module '{0}' is not loaded".format(cmd.module_name)) return self.create_message("Globals", module_name=cmd.module_name, globals=self.export_variables(sys.modules[cmd.module_name].__dict__)) def _cmd_get_locals(self, cmd): for frame in inspect.stack(): if id(frame) == cmd.frame_id: return self.create_message("Locals", locals=self.export_variables(frame.f_locals)) else: raise ThonnyClientError("Frame '{0}' not found".format(cmd.frame_id)) def _cmd_get_heap(self, cmd): result = {} for key in self._heap: result[key] = self.export_value(self._heap[key]) return self.create_message("Heap", heap=result) def _cmd_shell_autocomplete(self, cmd): error = None try: import jedi except ImportError: completions = [] error = "Could not import jedi" else: try: interpreter = jedi.Interpreter(cmd.source, [__main__.__dict__]) completions = self._export_completions(interpreter.completions()) except Exception as e: completions = [] error = "Autocomplete error: " + str(e) except: completions = [] error = "Autocomplete error" return self.create_message("ShellCompletions", source=cmd.source, completions=completions, error=error ) def _cmd_editor_autocomplete(self, cmd): error = None try: import jedi with warnings.catch_warnings(): script = jedi.Script(cmd.source, cmd.row, cmd.column, cmd.filename) completions = self._export_completions(script.completions()) except ImportError: completions = [] error = "Could not import jedi" except Exception as e: completions = [] error = "Autocomplete error: " + str(e) except: completions = [] error = "Autocomplete error" return self.create_message("EditorCompletions", source=cmd.source, row=cmd.row, column=cmd.column, filename=cmd.filename, completions=completions, error=error) def _export_completions(self, jedi_completions): result = [] for c in jedi_completions: if not c.name.startswith("__"): record = {"name":c.name, "complete":c.complete, "type":c.type, "description":c.description} try: except: pass result.append(record) return result def _cmd_get_object_info(self, cmd): if cmd.object_id in self._heap: value = self._heap[cmd.object_id] attributes = {} if cmd.include_attributes: for name in dir(value): if not name.startswith("__") or cmd.all_attributes: try: attributes[name] = getattr(value, name) except: pass self._heap[id(type(value))] = type(value) info = {'id' : cmd.object_id, 'repr' : repr(value), 'type' : str(type(value)), 'type_id' : id(type(value)), 'attributes': self.export_variables(attributes)} if isinstance(value, _io.TextIOWrapper): self._add_file_handler_info(value, info) elif (type(value) in (types.BuiltinFunctionType, types.BuiltinMethodType, types.FunctionType, types.LambdaType, types.MethodType)): self._add_function_info(value, info) elif (isinstance(value, list) or isinstance(value, tuple) or isinstance(value, set)): self._add_elements_info(value, info) elif (isinstance(value, dict)): self._add_entries_info(value, info) else: self._try_add_dataframe_info(value, info) self._try_add_matplotlib_info(value, info, cmd) else: info = {'id' : cmd.object_id, "repr": "<object info not found>", "type" : "object", "type_id" : id(object), "attributes" : {}} return self.create_message("ObjectInfo", id=cmd.object_id, info=info) def _get_tkinter_default_root(self): tkinter = sys.modules.get("tkinter") if tkinter is not None: return getattr(tkinter, "_default_root", None) else: return None def _get_qt_app(self): mod = sys.modules.get("PyQt5.QtCore") if mod is None: mod = sys.modules.get("PyQt4.QtCore") if mod is None: mod = sys.modules.get("PySide.QtCore") if mod is None: return None app_class = getattr(mod, "QCoreApplication", None) if app_class is not None: try: return app_class.instance() except: return None else: return None def _add_file_handler_info(self, value, info): try: assert isinstance(value.name, str) assert value.mode in ("r", "rt", "tr", "br", "rb") assert value.errors in ("strict", None) assert value.newlines is None or value.tell() > 0 with open(value.name, encoding=value.encoding) as f: info["file_encoding"] = f.encoding info["file_content"] = f.read() info["file_tell"] = value.tell() except Exception as e: info["file_error"] = "Could not get file content, error:" + str(e) pass def _add_function_info(self, value, info): try: info["source"] = inspect.getsource(value) except: pass def _add_elements_info(self, value, info): info["elements"] = [] for element in value: info["elements"].append(self.export_value(element)) def _add_entries_info(self, value, info): info["entries"] = [] for key in value: info["entries"].append((self.export_value(key), self.export_value(value[key]))) def _try_add_dataframe_info(self, value, info): try: if (type(value).__name__ == "DataFrame" and type(value).__module__ == "pandas.core.frame"): info["columns"] = value.columns.tolist() info["index"] = value.index.tolist() info["values"] = value.values.tolist() info["row_count"] = len(value) info["is_DataFrame"] = True import pandas as pd info["float_format"] = pd.options.display.float_format except: logger.exception("Couldn't add DataFrame info") def _try_add_matplotlib_info(self, value, info, cmd): try: if (type(value).__name__ == "Figure" and type(value).__module__ == "matplotlib.figure"): frame_width = getattr(cmd, "frame_width", None) frame_height = getattr(cmd, "frame_height", None) if frame_width is not None and frame_height is not None: frame_ratio = frame_width / frame_height fig_ratio = value.get_figwidth() / value.get_figheight() if frame_ratio > fig_ratio: dpi = frame_height / value.get_figheight() else: dpi = frame_width / value.get_figwidth() else: dpi = None fp = io.BytesIO() value.savefig(fp, format="png", dpi=dpi) import base64 info["image_data"] = base64.b64encode(fp.getvalue()) fp.close() except: logger.exception("Couldn't add Figure info") def _execute_file(self, cmd, debug_mode): result_attributes = self._execute_source_ex(cmd.source, cmd.full_filename, "exec", debug_mode) return self.create_message("ToplevelResult", **result_attributes) def _execute_source(self, cmd, result_type): filename = "<pyshell>" if hasattr(cmd, "global_vars"): global_vars = cmd.global_vars elif hasattr(cmd, "extra_vars"): global_vars = __main__.__dict__.copy() global_vars.update(cmd.extra_vars) else: global_vars = __main__.__dict__ try: root = ast.parse(cmd.source, filename=filename, mode="exec") except SyntaxError as e: return self.create_message(result_type, error="".join(traceback.format_exception_only(SyntaxError, e))) assert isinstance(root, ast.Module) if len(root.body) == 1 and isinstance(root.body[0], ast.Expr): mode = "eval" else: mode = "exec" result_attributes = self._execute_source_ex(cmd.source, filename, mode, hasattr(cmd, "debug_mode") and cmd.debug_mode, global_vars) if "__result__" in global_vars: result_attributes["__result__"] = global_vars["__result__"] if hasattr(cmd, "request_id"): result_attributes["request_id"] = cmd.request_id else: result_attributes["request_id"] = None return self.create_message(result_type, **result_attributes) def _execute_source_ex(self, source, filename, execution_mode, debug_mode, global_vars=None): if debug_mode: self._current_executor = FancyTracer(self) else: self._current_executor = Executor(self) try: return self._current_executor.execute_source(source, filename, execution_mode, global_vars) finally: self._current_executor = None def _install_fake_streams(self): self._original_stdin = sys.stdin self._original_stdout = sys.stdout self._original_stderr = sys.stderr sys.stdin = VM.FakeInputStream(self, sys.stdin) sys.stdout = VM.FakeOutputStream(self, sys.stdout, "stdout") sys.stderr = VM.FakeOutputStream(self, sys.stdout, "stderr") sys.__stdin__ = sys.stdin sys.__stdout__ = sys.stdout sys.__stderr__ = sys.stderr def _fetch_command(self): line = self._original_stdin.readline() if line == "": logger.info("Read stdin EOF") sys.exit() cmd = parse_message(line) return cmd def create_message(self, message_type, **kwargs): kwargs["message_type"] = message_type if "cwd" not in kwargs: kwargs["cwd"] = os.getcwd() return kwargs def send_message(self, msg): self._original_stdout.write(serialize_message(msg) + "\n") self._original_stdout.flush() def export_value(self, value, skip_None=False): if value is None and skip_None: return None self._heap[id(value)] = value try: type_name = value.__class__.__name__ except: type_name = type(value).__name__ result = {'id' : id(value), 'repr' : repr(value), 'type_name' : type_name} return result def export_variables(self, variables): result = {} for name in variables: if not name.startswith("_thonny_hidden_"): result[name] = self.export_value(variables[name]) return result def _debug(self, *args): print("VM:", *args, file=self._original_stderr) def _enter_io_function(self): self._io_level += 1 def _exit_io_function(self): self._io_level -= 1 def is_doing_io(self): return self._io_level > 0 class FakeStream: def __init__(self, vm, target_stream): self._vm = vm self._target_stream = target_stream def isatty(self): return True def __getattr__(self, name): return getattr(self._target_stream, name) class FakeOutputStream(FakeStream): def __init__(self, vm, target_stream, stream_name): VM.FakeStream.__init__(self, vm, target_stream) self._stream_name = stream_name def write(self, data): try: self._vm._enter_io_function() if data != "": self._vm.send_message(self._vm.create_message("ProgramOutput", stream_name=self._stream_name, data=data)) finally: self._vm._exit_io_function() def writelines(self, lines): try: self._vm._enter_io_function() self.write(''.join(lines)) finally: self._vm._exit_io_function() class FakeInputStream(FakeStream): def _generic_read(self, method, limit=-1): try: self._vm._enter_io_function() self._vm.send_message(self._vm.create_message("InputRequest", method=method, limit=limit)) while True: cmd = self._vm._fetch_command() if isinstance(cmd, InputSubmission): return cmd.data elif isinstance(cmd, InlineCommand): self._vm.handle_command(cmd, "waiting_input") else: raise ThonnyClientError("Wrong type of command when waiting for input") finally: self._vm._exit_io_function() def read(self, limit=-1): return self._generic_read("read", limit) def readline(self, limit=-1): return self._generic_read("readline", limit) def readlines(self, limit=-1): return self._generic_read("readlines", limit) class Executor: def __init__(self, vm): self._vm = vm def execute_source(self, source, filename, mode, global_vars=None): if global_vars is None: global_vars = __main__.__dict__ try: bytecode = self._compile_source(source, filename, mode) if hasattr(self, "_trace"): sys.settrace(self._trace) if mode == "eval": value = eval(bytecode, global_vars) if value is not None: builtins._ = value return {"value_info" : self._vm.export_value(value)} else: assert mode == "exec" exec(bytecode, global_vars) return {"context_info" : "after normal execution", "source" : source, "filename" : filename, "mode" : mode} except SyntaxError as e: return {"error" : "".join(traceback.format_exception_only(SyntaxError, e))} except ThonnyClientError as e: return {"error" : str(e)} except SystemExit: e_type, e_value, e_traceback = sys.exc_info() self._print_user_exception(e_type, e_value, e_traceback) return {"SystemExit" : True} except: e_type, e_value, e_traceback = sys.exc_info() self._print_user_exception(e_type, e_value, e_traceback) return {"context_info" : "other unhandled exception"} finally: sys.settrace(None) def _print_user_exception(self, e_type, e_value, e_traceback): lines = traceback.format_exception(e_type, e_value, e_traceback) for line in lines: if ("thonny/backend" in line or "thonny\\backend" in line or "remove this line from stacktrace" in line): continue else: sys.stderr.write(line) def _compile_source(self, source, filename, mode): return compile(source, filename, mode) class FancyTracer(Executor): def __init__(self, vm): self._vm = vm self._normcase_thonny_src_dir = os.path.normcase(os.path.dirname(sys.modules["thonny"].__file__)) self._instrumented_files = _PathSet() self._interesting_files = _PathSet() self._current_command = None self._unhandled_exception = None self._install_marker_functions() self._custom_stack = [] def execute_source(self, source, filename, mode, global_vars=None): self._current_command = DebuggerCommand(command="step", state=None, focus=None, frame_id=None, exception=None) return Executor.execute_source(self, source, filename, mode, global_vars) def _install_marker_functions(self): self.marker_function_names = { BEFORE_STATEMENT_MARKER, AFTER_STATEMENT_MARKER, BEFORE_EXPRESSION_MARKER, AFTER_EXPRESSION_MARKER, } for name in self.marker_function_names: if not hasattr(builtins, name): setattr(builtins, name, getattr(self, name)) def _is_interesting_exception(self, frame): cmd = self._current_command return (id(frame) == cmd.frame_id or not self._frame_is_alive(cmd.frame_id)) def _compile_source(self, source, filename, mode): root = ast.parse(source, filename, mode) ast_utils.mark_text_ranges(root, source) self._tag_nodes(root) self._insert_expression_markers(root) self._insert_statement_markers(root) self._instrumented_files.add(filename) return compile(root, filename, mode) def _may_step_in(self, code): return not ( code is None or code.co_filename is None or code.co_flags & inspect.CO_GENERATOR or sys.version_info >= (3,5) and code.co_flags & inspect.CO_COROUTINE or sys.version_info >= (3,5) and code.co_flags & inspect.CO_ITERABLE_COROUTINE or sys.version_info >= (3,6) and code.co_flags & inspect.CO_ASYNC_GENERATOR or "importlib._bootstrap" in code.co_filename or os.path.normcase(code.co_filename) not in self._instrumented_files and code.co_name not in self.marker_function_names or os.path.normcase(code.co_filename).startswith(self._normcase_thonny_src_dir) and code.co_name not in self.marker_function_names or self._vm.is_doing_io() ) def _trace(self, frame, event, arg): if not self._may_step_in(frame.f_code): return code_name = frame.f_code.co_name if event == "call": self._unhandled_exception = None if code_name in self.marker_function_names: if code_name == BEFORE_STATEMENT_MARKER: event = "before_statement" elif code_name == AFTER_STATEMENT_MARKER: event = "after_statement" elif code_name == BEFORE_EXPRESSION_MARKER: event = "before_expression" elif code_name == AFTER_EXPRESSION_MARKER: event = "after_expression" else: raise AssertionError("Unknown marker function") marker_function_args = frame.f_locals.copy() del marker_function_args["self"] self._handle_progress_event(frame.f_back, event, marker_function_args) self._try_interpret_as_again_event(frame.f_back, event, marker_function_args) else: self._custom_stack.append(CustomStackFrame(frame, "call")) elif event == "return": if code_name not in self.marker_function_names: self._custom_stack.pop() if len(self._custom_stack) == 0: sys.settrace(None) else: pass elif event == "exception": exc = arg[1] if self._unhandled_exception is None: exc.causing_frame = frame else: exc.causing_frame = self._unhandled_exception.causing_frame self._unhandled_exception = exc if self._is_interesting_exception(frame): self._report_state_and_fetch_next_message(frame) elif event == "line": self._unhandled_exception = None return self._trace def _handle_progress_event(self, frame, event, args): self._debug("Progress event:", event, self._current_command) focus = TextRange(*args["text_range"]) self._custom_stack[-1].last_event = event self._custom_stack[-1].last_event_focus = focus self._custom_stack[-1].last_event_args = args tester = getattr(self, "_cmd_" + self._current_command.command + "_completed") if tester(frame, event, args, focus, self._current_command): if event == "after_expression": value = self._vm.export_value(args["value"]) else: value = None self._report_state_and_fetch_next_message(frame, value) def _report_state_and_fetch_next_message(self, frame, value=None): if self._unhandled_exception is not None: frame_infos = traceback.format_stack(self._unhandled_exception.causing_frame) if frame == self._unhandled_exception.causing_frame: interesting_frame_infos = [] else: _distance = 0 _f = self._unhandled_exception.causing_frame while _f != frame: _distance += 1 _f = _f.f_back if _f == None: break interesting_frame_infos = frame_infos[-_distance:] exception_lower_stack_description = "".join(interesting_frame_infos) exception_msg = str(self._unhandled_exception) else: exception_lower_stack_description = None exception_msg = None self._vm.send_message(self._vm.create_message("DebuggerProgress", command=self._current_command.command, stack=self._export_stack(), exception=self._vm.export_value(self._unhandled_exception, True), exception_msg=exception_msg, exception_lower_stack_description=exception_lower_stack_description, value=value, command_context="waiting_debugger_command" )) self._current_command = self._vm._fetch_command() self._debug("got command:", self._current_command) self._respond_to_inline_commands() assert isinstance(self._current_command, DebuggerCommand)
MIT License
angr/cle
cle/address_translator.py
AddressTranslator.to_mva
python
def to_mva(self): return self._rva + self._owner.mapped_base
RVA -> MVA :rtype: int
https://github.com/angr/cle/blob/7996cb1789eccc461cb31ab3c6234a74015489fd/cle/address_translator.py#L71-L76
class AddressTranslator(object): __slots__ = ('_rva', '_owner', ) """ Mediates address translations between typed addresses such as RAW, RVA, LVA, MVA and VA including address owner and its state (linked or mapped) Semantics:: owner - object associated with the address (any object class based on `cle.Backend`) owner mapping state - sparse object can be either mapped or not (actual object's image base VA to be considered valid) RAW - offset (index) inside a file stream VA - address inside process flat virtual memory space RVA - address relative to the object's segment base (segment base normalized virtual address) LVA - linked VA (linker) MVA - mapped VA (loader) """ def __init__(self, rva, owner): self._rva, self._owner = rva, owner @classmethod def from_lva(cls, lva, owner): return cls(lva - owner.linked_base, owner) @classmethod def from_mva(cls, mva, owner): return cls(mva - owner.mapped_base, owner) @classmethod def from_rva(cls, rva, owner): return cls(rva, owner) @classmethod def from_raw(cls, raw, owner): return cls(owner.offset_to_addr(raw) - (owner.mapped_base if owner._is_mapped else owner.linked_base), owner) from_linked_va = from_lva from_va = from_mapped_va = from_mva from_relative_va = from_rva def to_lva(self): return self._rva + self._owner.linked_base
BSD 2-Clause Simplified License
pallets/itsdangerous
src/itsdangerous/serializer.py
Serializer.dump_payload
python
def dump_payload(self, obj: _t.Any) -> bytes: return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))
Dumps the encoded object. The return value is always bytes. If the internal serializer returns text, the value will be encoded as UTF-8.
https://github.com/pallets/itsdangerous/blob/0e13d6613acebe99be211d27d55f56819cba9147/src/itsdangerous/serializer.py#L164-L169
import json import typing as _t from .encoding import want_bytes from .exc import BadPayload from .exc import BadSignature from .signer import _make_keys_list from .signer import Signer _t_str_bytes = _t.Union[str, bytes] _t_opt_str_bytes = _t.Optional[_t_str_bytes] _t_kwargs = _t.Dict[str, _t.Any] _t_opt_kwargs = _t.Optional[_t_kwargs] _t_signer = _t.Type[Signer] _t_fallbacks = _t.List[_t.Union[_t_kwargs, _t.Tuple[_t_signer, _t_kwargs], _t_signer]] _t_load_unsafe = _t.Tuple[bool, _t.Any] _t_secret_key = _t.Union[_t.Iterable[_t_str_bytes], _t_str_bytes] def is_text_serializer(serializer: _t.Any) -> bool: return isinstance(serializer.dumps({}), str) class Serializer: default_serializer: _t.Any = json default_signer: _t_signer = Signer default_fallback_signers: _t_fallbacks = [] def __init__( self, secret_key: _t_secret_key, salt: _t_opt_str_bytes = b"itsdangerous", serializer: _t.Any = None, serializer_kwargs: _t_opt_kwargs = None, signer: _t.Optional[_t_signer] = None, signer_kwargs: _t_opt_kwargs = None, fallback_signers: _t.Optional[_t_fallbacks] = None, ): self.secret_keys: _t.List[bytes] = _make_keys_list(secret_key) if salt is not None: salt = want_bytes(salt) self.salt = salt if serializer is None: serializer = self.default_serializer self.serializer: _t.Any = serializer self.is_text_serializer: bool = is_text_serializer(serializer) if signer is None: signer = self.default_signer self.signer: _t_signer = signer self.signer_kwargs: _t_kwargs = signer_kwargs or {} if fallback_signers is None: fallback_signers = list(self.default_fallback_signers or ()) self.fallback_signers: _t_fallbacks = fallback_signers self.serializer_kwargs: _t_kwargs = serializer_kwargs or {} @property def secret_key(self) -> bytes: return self.secret_keys[-1] def load_payload( self, payload: bytes, serializer: _t.Optional[_t.Any] = None ) -> _t.Any: if serializer is None: serializer = self.serializer is_text = self.is_text_serializer else: is_text = is_text_serializer(serializer) try: if is_text: return serializer.loads(payload.decode("utf-8")) return serializer.loads(payload) except Exception as e: raise BadPayload( "Could not load the payload because an exception" " occurred on unserializing the data.", original_error=e, ) from e
BSD 3-Clause New or Revised License
angr/angr
angr/sim_type.py
SimTypeLength.__init__
python
def __init__(self, signed=False, addr=None, length=None, label=None): super().__init__(signed=signed, label=label) self.addr = addr self.length = length
:param signed: Whether the value is signed or not :param label: The type label. :param addr: The memory address (expression). :param length: The length (expression).
https://github.com/angr/angr/blob/94de0f468df0c0d27428301dae93d94f935ade9b/angr/sim_type.py#L960-L969
from collections import OrderedDict, defaultdict, ChainMap from archinfo import Endness from .misc.ux import deprecated import copy import re import logging from typing import Optional, Dict, Any, Tuple, List, Union import claripy l = logging.getLogger(name=__name__) errorlog = logging.getLogger(name=__name__ + ".yacc") errorlog.setLevel(logging.ERROR) try: import pycparser except ImportError: pycparser = None try: import CppHeaderParser except ImportError: CppHeaderParser = None class SimType: _fields = () _arch = None _size = None _can_refine_int = False _base_name = None base = True def __init__(self, label=None): self.label = label def __eq__(self, other): if type(self) != type(other): return False for attr in self._fields: if attr == 'size' and self._arch is None and other._arch is None: continue if getattr(self, attr) != getattr(other, attr): return False return True def __ne__(self, other): return not self == other def __hash__(self): out = hash(type(self)) for attr in self._fields: out ^= hash(getattr(self, attr)) return out def _refine_dir(self): return [] def _refine(self, view, k): raise KeyError(f"{k} is not a valid refinement") @property def size(self): if self._size is not None: return self._size return NotImplemented @property def alignment(self): if self._arch is None: return NotImplemented if self.size is NotImplemented: return NotImplemented return self.size // self._arch.byte_width def with_arch(self, arch): if arch is None: return self if self._arch is not None and self._arch == arch: return self else: return self._with_arch(arch) def _with_arch(self, arch): cp = copy.copy(self) cp._arch = arch return cp def _init_str(self): return f"NotImplemented({self.__class__.__name__})" def c_repr(self, name=None, full=0, memo=None, indent=0): if name is None: return repr(self) else: return f'{str(self) if self.label is None else self.label} {name}' def copy(self): raise NotImplementedError() class TypeRef(SimType): def __init__(self, name, ty): super().__init__() self.type = ty self._name = name @property def name(self): return self._name def __eq__(self, other): return type(other) is TypeRef and self.type == other.type def __hash__(self): return hash(self.type) def __repr__(self): return self.name @property def _arch(self): return self.type._arch @property def size(self): return self.type.size @property def alignment(self): return self.type.alignment def with_arch(self, arch): self.type = self.type.with_arch(arch) return self def c_repr(self, name=None, full=0, memo=None, indent=0): if not full: if name is not None: return f'{self.name} {name}' else: return self.name else: return self.type.c_repr(name=name, full=full, memo=memo, indent=indent) def copy(self): raise NotImplementedError("copy() for TypeRef is ill-defined. What do you want this to do?") class NamedTypeMixin: def __init__(self, *args, name: Optional[str]=None, **kwargs): super().__init__(*args, **kwargs) self._name = name @property def name(self) -> str: if self._name is None: self._name = repr(self) return self._name @name.setter def name(self, v): self._name = v def unqualified_name(self, lang: str = "c++") -> str: if lang == "c++": splitter = "::" n = self.name.split(splitter) return n[-1] raise NotImplementedError(f"Unsupported language {lang}.") class SimTypeBottom(SimType): _base_name = 'bot' def __repr__(self, label=None): return 'BOT' def _init_str(self): return "%s(%s)" % ( self.__class__.__name__, ("label=\"%s\"" % self.label) if self.label else "" ) def copy(self): return SimTypeBottom(self.label) class SimTypeTop(SimType): _fields = ('size',) def __init__(self, size=None, label=None): SimType.__init__(self, label) self._size = size def __repr__(self): return 'TOP' def copy(self): return SimTypeTop(size=self.size, label=self.label) class SimTypeReg(SimType): _fields = ('size',) def __init__(self, size, label=None): SimType.__init__(self, label=label) self._size = size def __repr__(self): return "reg{}_t".format(self.size) def extract(self, state, addr, concrete=False): assert self.size % state.arch.byte_width == 0 out = state.memory.load(addr, self.size // state.arch.byte_width, endness=state.arch.memory_endness) if not concrete: return out return state.solver.eval(out) def store(self, state, addr, value): store_endness = state.arch.memory_endness try: value = value.ast except AttributeError: pass if isinstance(value, claripy.ast.Bits): if value.size() != self.size: raise ValueError("size of expression is wrong size for type") elif isinstance(value, int): value = state.solver.BVV(value, self.size) elif isinstance(value, bytes): store_endness = 'Iend_BE' else: raise TypeError("unrecognized expression type for SimType {}".format(type(self).__name__)) state.memory.store(addr, value, endness=store_endness) def copy(self): return self.__class__(self.size, label=self.label) class SimTypeNum(SimType): _fields = SimType._fields + ('signed', 'size') def __init__(self, size, signed=True, label=None): super().__init__(label) self._size = size self.signed = signed def __repr__(self): return "{}int{}_t".format('' if self.signed else 'u', self.size) def extract(self, state, addr, concrete=False): out = state.memory.load(addr, self.size // state.arch.byte_width, endness=state.arch.memory_endness) if not concrete: return out n = state.solver.eval(out) if self.signed and n >= 1 << (self.size-1): n -= 1 << (self.size) return n def store(self, state, addr, value): store_endness = state.arch.memory_endness if isinstance(value, claripy.ast.Bits): if value.size() != self.size: raise ValueError("size of expression is wrong size for type") elif isinstance(value, int): value = state.solver.BVV(value, self.size) elif isinstance(value, bytes): store_endness = 'Iend_BE' else: raise TypeError("unrecognized expression type for SimType {}".format(type(self).__name__)) state.memory.store(addr, value, endness=store_endness) def copy(self): return SimTypeNum(self.size, signed=self.signed, label=self.label) class SimTypeInt(SimTypeReg): _fields = tuple(x for x in SimTypeReg._fields if x != 'size') + ('signed',) _base_name = 'int' def __init__(self, signed=True, label=None): super().__init__(None, label=label) self.signed = signed def c_repr(self, name=None, full=0, memo=None, indent=0): out = self._base_name if not self.signed: out = 'unsigned ' + out if name is None: return out return '%s %s' % (out, name) def __repr__(self): name = self._base_name if not self.signed: name = 'unsigned ' + name try: return name + ' (%d bits)' % self.size except ValueError: return name @property def size(self): if self._arch is None: raise ValueError("Can't tell my size without an arch!") try: return self._arch.sizeof[self._base_name] except KeyError: raise ValueError("Arch %s doesn't have its %s type defined!" % (self._arch.name, self._base_name)) def extract(self, state, addr, concrete=False): out = state.memory.load(addr, self.size // state.arch.byte_width, endness=state.arch.memory_endness) if not concrete: return out n = state.solver.eval(out) if self.signed and n >= 1 << (self.size-1): n -= 1 << self.size return n def _init_str(self): return "%s(signed=%s%s)" % ( self.__class__.__name__, self.signed, (', label="%s"' % self.label) if self.label is not None else "", ) def _refine_dir(self): return ['signed', 'unsigned'] def _refine(self, view, k): if k == 'signed': ty = copy.copy(self) ty.signed = True elif k == 'unsigned': ty = copy.copy(self) ty.signed = False else: raise KeyError(k) return view._deeper(ty=ty) def copy(self): return self.__class__(signed=self.signed, label=self.label) class SimTypeShort(SimTypeInt): _base_name = 'short' class SimTypeLong(SimTypeInt): _base_name = 'long' class SimTypeLongLong(SimTypeInt): _base_name = 'long long' class SimTypeChar(SimTypeReg): _base_name = 'char' def __init__(self, signed=True, label=None): SimTypeReg.__init__(self, 8, label=label) self.signed = signed def __repr__(self): return 'char' def store(self, state, addr, value): self._size = state.arch.byte_width try: super().store(state, addr, value) except TypeError: if isinstance(value, bytes) and len(value) == 1: value = state.solver.BVV(value[0], state.arch.byte_width) super().store(state, addr, value) else: raise def extract(self, state, addr, concrete=False): self._size = state.arch.byte_width out = super().extract(state, addr, concrete) if concrete: return bytes([out]) return out def _init_str(self): return "%s(%s)" % ( self.__class__.__name__, ('label="%s"' % self.label) if self.label is not None else "", ) def copy(self): return self.__class__(signed=self.signed, label=self.label) class SimTypeBool(SimTypeChar): _base_name = "bool" def __repr__(self): return 'bool' def store(self, state, addr, value): return super().store(state, addr, int(value)) def extract(self, state, addr, concrete=False): ver = super().extract(state, addr, concrete) if concrete: return ver != b'\0' return ver != 0 def _init_str(self): return f"{self.__class__.__name__}()" class SimTypeFd(SimTypeReg): _fields = SimTypeReg._fields def __init__(self, label=None): super().__init__(32, label=label) def __repr__(self): return 'fd_t' def copy(self): return SimTypeFd(label=self.label) def _init_str(self): return "%s(%s)" % ( self.__class__.__name__, ('label="%s"' % self.label) if self.label is not None else "", ) class SimTypePointer(SimTypeReg): _fields = SimTypeReg._fields + ('pts_to',) def __init__(self, pts_to, label=None, offset=0): super().__init__(None, label=label) self.pts_to = pts_to self.signed = False self.offset = offset def __repr__(self): return '{}*'.format(self.pts_to) def c_repr(self, name=None, full=0, memo=None, indent=0): deref_chr = '*' if not isinstance(self.pts_to, SimTypeArray) else '' name_with_deref = deref_chr if name is None else '%s%s' % (deref_chr, name) return self.pts_to.c_repr(name_with_deref, full, memo, indent) def make(self, pts_to): new = type(self)(pts_to) new._arch = self._arch return new @property def size(self): if self._arch is None: raise ValueError("Can't tell my size without an arch!") return self._arch.bits def _with_arch(self, arch): out = SimTypePointer(self.pts_to.with_arch(arch), self.label) out._arch = arch return out def _init_str(self): return "%s(%s%s, offset=%d)" % ( self.__class__.__name__, self.pts_to._init_str(), (', label="%s"' % self.label) if self.label is not None else "", self.offset ) def copy(self): return SimTypePointer(self.pts_to, label=self.label, offset=self.offset) class SimTypeReference(SimTypeReg): def __init__(self, refs, label=None): super().__init__(None, label=label) self.refs: SimType = refs def __repr__(self): return f"{self.refs}&" def c_repr(self, name=None, full=0, memo=None, indent=0): name = '&' if name is None else '&%s' % name return self.refs.c_repr(name, full, memo, indent) def make(self, refs): new = type(self)(refs) new._arch = self._arch return new @property def size(self): if self._arch is None: raise ValueError("Can't tell my size without an arch!") return self._arch.bits def _with_arch(self, arch): out = SimTypeReference(self.refs.with_arch(arch), label=self.label) out._arch = arch return out def _init_str(self): return "%s(%s%s)" % ( self.__class__.__name__, self.refs._init_str(), (', label="%s"' % self.label) if self.label is not None else "", ) def copy(self): return SimTypeReference(self.refs, label=self.label) class SimTypeFixedSizeArray(SimType): def __init__(self, elem_type, length): super().__init__() self.elem_type = elem_type self.length = length def __repr__(self): return '{}[{}]'.format(self.elem_type, self.length) def c_repr(self, name=None, full=0, memo=None, indent=0): if name is None: return repr(self) name = '%s[%s]' % (name, self.length) return self.elem_type.c_repr(name, full, memo, indent) _can_refine_int = True def _refine(self, view, k): return view._deeper(addr=view._addr + k * (self.elem_type.size//view.state.arch.byte_width), ty=self.elem_type) def extract(self, state, addr, concrete=False): return [self.elem_type.extract(state, addr + i*(self.elem_type.size//state.arch.byte_width), concrete) for i in range(self.length)] def store(self, state, addr, values): for i, val in enumerate(values): self.elem_type.store(state, addr + i * (self.elem_type.size // state.arch.byte_width), val) @property def size(self): return self.elem_type.size * self.length @property def alignment(self): return self.elem_type.alignment def _with_arch(self, arch): out = SimTypeFixedSizeArray(self.elem_type.with_arch(arch), self.length) out._arch = arch return out def _init_str(self): return "%s(%s, %d)" % ( self.__class__.__name__, self.elem_type._init_str(), self.length, ) def copy(self): return SimTypeFixedSizeArray(self.elem_type, self.length) class SimTypeArray(SimType): _fields = ('elem_type', 'length') def __init__(self, elem_type, length=None, label=None): super().__init__(label=label) self.elem_type: SimType = elem_type self.length: Optional[int] = length def __repr__(self): return '{}[{}]'.format(self.elem_type, '' if self.length is None else self.length) def c_repr(self, name=None, full=0, memo=None, indent=0): if name is None: return repr(self) name = '%s[%s]' % (name, self.length if self.length is not None else '') return self.elem_type.c_repr(name, full, memo, indent) @property def size(self): if self._arch is None: raise ValueError("I can't tell my size without an arch!") return self._arch.bits @property def alignment(self): return self.elem_type.alignment def _with_arch(self, arch): out = SimTypeArray(self.elem_type.with_arch(arch), self.length, self.label) out._arch = arch return out def copy(self): return SimTypeArray(self.elem_type, length=self.length, label=self.label) class SimTypeString(NamedTypeMixin, SimTypeArray): _fields = SimTypeArray._fields + ('length',) def __init__(self, length=None, label=None, name: Optional[str]=None): super().__init__(SimTypeChar(), label=label, length=length, name=name) def __repr__(self): return 'string_t' def extract(self, state, addr, concrete=False): if self.length is None: out = None last_byte = state.memory.load(addr, 1) if state.solver.symbolic(last_byte): raise ValueError("Trying to extract a symbolic string at %#x" % state.solver.eval(addr)) addr += 1 while not (claripy.is_true(last_byte == 0) or state.solver.symbolic(last_byte)): out = last_byte if out is None else out.concat(last_byte) last_byte = state.memory.load(addr, 1) addr += 1 else: out = state.memory.load(addr, self.length) if not concrete: return out if out is not None else claripy.BVV(0, 0) else: return state.solver.eval(out, cast_to=bytes) if out is not None else '' _can_refine_int = True def _refine(self, view, k): return view._deeper(addr=view._addr + k, ty=SimTypeChar()) @property def size(self): if self.length is None: return 4096 return (self.length + 1) * 8 @property def alignment(self): return 1 def _with_arch(self, arch): return self def copy(self): return SimTypeString(length=self.length, label=self.label, name=self.name) class SimTypeWString(NamedTypeMixin, SimTypeArray): _fields = SimTypeArray._fields + ('length',) def __init__(self, length=None, label=None, name: Optional[str]=None): super().__init__(SimTypeNum(16, False), label=label, length=length, name=name) def __repr__(self): return 'wstring_t' def extract(self, state, addr, concrete=False): if self.length is None: out = None last_byte = state.memory.load(addr, 2) if state.solver.symbolic(last_byte): raise ValueError("Trying to extract a symbolic string at %#x" % state.solver.eval(addr)) addr += 2 while not (claripy.is_true(last_byte == 0) or state.solver.symbolic(last_byte)): out = last_byte if out is None else out.concat(last_byte) last_byte = state.memory.load(addr, 2) addr += 2 else: out = state.memory.load(addr, self.length*2) if out is None: out = claripy.BVV(0, 0) if not concrete: return out else: return u''.join(chr(state.solver.eval(x.reversed if state.arch.memory_endness == 'Iend_LE' else x)) for x in out.chop(16)) _can_refine_int = True def _refine(self, view, k): return view._deeper(addr=view._addr + k * 2, ty=SimTypeNum(16, False)) @property def size(self): if self.length is None: return 4096 return (self.length * 2 + 2) * 8 @property def alignment(self): return 2 def _with_arch(self, arch): return self def copy(self): return SimTypeWString(length=self.length, label=self.label, name=self.name) class SimTypeFunction(SimType): _fields = ('args', 'returnty') base = False def __init__(self, args, returnty, label=None, arg_names=None, variadic=False): super().__init__(label=label) self.args = args self.returnty: Optional[SimType] = returnty self.arg_names = arg_names if arg_names else () self.variadic = variadic def __repr__(self): argstrs = [str(a) for a in self.args] if self.variadic: argstrs.append('...') return '({}) -> {}'.format(', '.join(argstrs), self.returnty) def c_repr(self, name=None, full=0, memo=None, indent=0): name2 = name or '' name3 = '(%s)(%s)' % (name2, ', '.join(a.c_repr(n, full-1, memo, indent) for a, n in zip(self.args, self.arg_names if self.arg_names is not None and full else (None,)*len(self.args)))) name4 = self.returnty.c_repr(name3, full, memo, indent) if self.returnty is not None else 'void %s' % name3 return name4 @property def size(self): return 4096 def _with_arch(self, arch): out = SimTypeFunction([a.with_arch(arch) for a in self.args], self.returnty.with_arch(arch) if self.returnty is not None else None, label=self.label, arg_names=self.arg_names, variadic=self.variadic ) out._arch = arch return out def _arg_names_str(self, show_variadic=True): argnames = list(self.arg_names) if self.variadic and show_variadic: argnames.append('...') return ", ".join('"%s"' % arg_name for arg_name in argnames) def _init_str(self): return "%s([%s], %s%s%s%s)" % ( self.__class__.__name__, ", ".join([arg._init_str() for arg in self.args]), self.returnty._init_str(), (", label=%s" % self.label) if self.label else "", (", arg_names=[%s]" % self._arg_names_str(show_variadic=False)) if self.arg_names else "", ", variadic=True" if self.variadic else "", ) def copy(self): return SimTypeFunction(self.args, self.returnty, label=self.label, arg_names=self.arg_names, variadic=self.variadic) class SimTypeCppFunction(SimTypeFunction): def __init__(self, args, returnty, label=None, arg_names: Tuple[str]=None, ctor: bool=False, dtor: bool=False): super().__init__(args, returnty, label=label, arg_names=arg_names, variadic=False) self.ctor = ctor self.dtor = dtor def __repr__(self): argstrs = [str(a) for a in self.args] if self.variadic: argstrs.append('...') return str(self.label)+'({}) -> {}'.format(', '.join(argstrs), self.returnty) def _init_str(self): return "%s([%s], %s%s%s%s)" % ( self.__class__.__name__, ", ".join([arg._init_str() for arg in self.args]), self.returnty, (", label=%s" % self.label) if self.label else "", (", arg_names=[%s]" % self._arg_names_str(show_variadic=False)) if self.arg_names else "", ", variadic=True" if self.variadic else "", ) def copy(self): return SimTypeCppFunction( self.args, self.returnty, label=self.label, arg_names=self.arg_names, ctor=self.ctor, dtor=self.dtor, ) class SimTypeLength(SimTypeLong): _fields = SimTypeNum._fields + ('addr', 'length')
BSD 2-Clause Simplified License
ucsbarchlab/pyrtl
pyrtl/wire.py
WireVector.__ge__
python
def __ge__(self, other): return ~ self._two_var_op(other, '<')
Creates LogicNets that calculates whether a wire is greater than or equal to another. :return Wirevector: a one bit result wire of the operation
https://github.com/ucsbarchlab/pyrtl/blob/8b42f566a3c2c23de21f1b534900232219a3b313/pyrtl/wire.py#L380-L385
from __future__ import print_function, unicode_literals import numbers import six import re import sys from . import core from .pyrtlexceptions import PyrtlError, PyrtlInternalError from .core import working_block, LogicNet, _NameIndexer _wvIndexer = _NameIndexer("tmp") _constIndexer = _NameIndexer("const_") def _reset_wire_indexers(): global _wvIndexer, _constIndexer _wvIndexer = _NameIndexer("tmp") _constIndexer = _NameIndexer("const_") def next_tempvar_name(name=""): if name == '': wire_name = _wvIndexer.make_valid_string() callpoint = core._get_useful_callpoint_name() if callpoint: filename, lineno = callpoint safename = re.sub(r'[\W]+', '', filename) wire_name += '_%s_line%d' % (safename, lineno) return wire_name else: if name.lower() in ['clk', 'clock']: raise PyrtlError('Clock signals should never be explicit') return name class WireVector(object): _code = 'W' def __init__(self, bitwidth=None, name='', block=None): self._name = None self._block = working_block(block) self.name = next_tempvar_name(name) self._validate_bitwidth(bitwidth) if core._setting_keep_wirevector_call_stack: import traceback self.init_call_stack = traceback.format_stack() @property def name(self): return self._name @name.setter def name(self, value): if not isinstance(value, six.string_types): raise PyrtlError('WireVector names must be strings') self._block.wirevector_by_name.pop(self._name, None) self._name = value self._block.add_wirevector(self) def __hash__(self): return id(self) def __str__(self): return ''.join([self.name, '/', str(self.bitwidth), self._code]) def _validate_bitwidth(self, bitwidth): if bitwidth is not None: if not isinstance(bitwidth, numbers.Integral): raise PyrtlError('bitwidth must be from type int or unspecified, instead "%s"' ' was passed of type %s' % (str(bitwidth), type(bitwidth))) elif bitwidth == 0: raise PyrtlError('bitwidth must be greater than or equal to 1') elif bitwidth < 0: raise PyrtlError('you are trying a negative bitwidth? awesome but wrong') self.bitwidth = bitwidth def _build(self, other): net = LogicNet( op='w', op_param=None, args=(other,), dests=(self,)) working_block().add_net(net) def _prepare_for_assignment(self, rhs): from .corecircuits import as_wires rhs = as_wires(rhs, bitwidth=self.bitwidth) if self.bitwidth is None: self.bitwidth = rhs.bitwidth return rhs def __ilshift__(self, other): other = self._prepare_for_assignment(other) self._build(other) return self def __ior__(self, other): from .conditional import _build, currently_under_condition if not self.bitwidth: raise PyrtlError('Conditional assignment only defined on ' 'WireVectors with pre-defined bitwidths') other = self._prepare_for_assignment(other) if currently_under_condition(): _build(self, other) else: self._build(other) return self def _two_var_op(self, other, op): from .corecircuits import as_wires, match_bitwidth a, b = self, as_wires(other) a, b = match_bitwidth(a, b) resultlen = len(a) if op in '+-': resultlen += 1 elif op == '*': resultlen = resultlen * 2 elif op in '<>=': resultlen = 1 s = WireVector(bitwidth=resultlen) net = LogicNet( op=op, op_param=None, args=(a, b), dests=(s,)) working_block().add_net(net) return s def __bool__(self): raise PyrtlError('cannot convert wirevector to compile-time boolean. This error ' 'often happens when you attempt to use WireVectors with "==" or ' 'something that calls "__eq__", such as when you test if a ' 'wirevector is "in" something') __nonzero__ = __bool__ def __and__(self, other): return self._two_var_op(other, '&') def __rand__(self, other): return self._two_var_op(other, '&') def __iand__(self, other): raise PyrtlError('error, operation not allowed on WireVectors') def __or__(self, other): return self._two_var_op(other, '|') def __ror__(self, other): return self._two_var_op(other, '|') def __xor__(self, other): return self._two_var_op(other, '^') def __rxor__(self, other): return self._two_var_op(other, '^') def __ixor__(self, other): raise PyrtlError('error, operation not allowed on WireVectors') def __add__(self, other): return self._two_var_op(other, '+') def __radd__(self, other): return self._two_var_op(other, '+') def __iadd__(self, other): raise PyrtlError('error, operation not allowed on WireVectors') def __sub__(self, other): return self._two_var_op(other, '-') def __rsub__(self, other): from .corecircuits import as_wires other = as_wires(other) return other._two_var_op(self, '-') def __isub__(self, other): raise PyrtlError('error, operation not allowed on WireVectors') def __mul__(self, other): return self._two_var_op(other, '*') def __rmul__(self, other): return self._two_var_op(other, '*') def __imul__(self, other): raise PyrtlError('error, operation not allowed on WireVectors') def __lt__(self, other): return self._two_var_op(other, '<') def __le__(self, other): return ~ self._two_var_op(other, '>') def __eq__(self, other): return self._two_var_op(other, '=') def __ne__(self, other): return ~ self._two_var_op(other, '=') def __gt__(self, other): return self._two_var_op(other, '>')
BSD 3-Clause New or Revised License
austinoboyle/scrape-linkedin-selenium
scrape_linkedin/utils.py
get_info
python
def get_info(element, mapping, default=None): return {key: text_or_default(element, mapping[key], default=default) for key in mapping}
Turn beautifulsoup element and key->selector dict into a key->value dict Args: - element: A beautifulsoup element - mapping: a dictionary mapping key(str)->css selector(str) - default: The defauly value to be given for any key that has a css selector that matches no elements Returns: A dict mapping key to the text content of the first element that matched the css selector in the element. If no matching element is found, the key's value will be the default param.
https://github.com/austinoboyle/scrape-linkedin-selenium/blob/120900a6973b60d7e1d641f38faf921213bc76bd/scrape_linkedin/utils.py#L104-L118
import logging import re from datetime import datetime from typing import List, Optional import bs4 from selenium.webdriver.chrome.options import Options options = Options() options.add_argument('--headless') HEADLESS_OPTIONS = {'chrome_options': options} logger = logging.getLogger(__name__) def _find_element(driver, by): return driver.find_element(*by) def flatten_list(l): return [item for sublist in l for item in sublist] def split_lists(lst, num): k, m = divmod(len(lst), num) return [lst[i * k + min(i, m): (i+1) * k + min(i + 1, m)] for i in range(num)] class TextChanged(object): def __init__(self, locator, text): self.locator = locator self.text = text def __call__(self, driver): actual_text = _find_element(driver, self.locator).text return actual_text != self.text class AnyEC(object): def __init__(self, *args): self.ecs = args def __call__(self, driver): for fn in self.ecs: try: if fn(driver): return True except: pass return False def one_or_default(element: Optional[bs4.Tag], selector: str, default=None) -> Optional[bs4.Tag]: try: el = element.select_one(selector) if not el: return default return element.select_one(selector) except Exception as e: return default def text_or_default(element, selector, default=None): try: return element.select_one(selector).get_text().strip() except Exception as e: return default def all_or_default(element, selector, default=[]): try: elements = element.select(selector) if len(elements) == 0: return default return element.select(selector) except Exception as e: return default
MIT License
theqrl/qrl
src/qrl/cli.py
tx_transfer
python
def tx_transfer(ctx, src, master, dsts, amounts, message_data, fee, ots_key_index): address_src_pk = None master_addr = None addresses_dst = [] shor_amounts = [] fee_shor = [] signing_object = None message_data = message_data.encode() try: selected_wallet = _select_wallet(ctx, src) if selected_wallet is None or len(selected_wallet) != 2: click.echo("A wallet was not found") quit(1) _, src_xmss = selected_wallet if not src_xmss: click.echo("A local wallet is required to sign the transaction") quit(1) address_src_pk = src_xmss.pk ots_key_index = validate_ots_index(ots_key_index, src_xmss) src_xmss.set_ots_index(ots_key_index) signing_object = src_xmss if master: master_addr = parse_qaddress(master) addresses_dst, shor_amounts = _parse_dsts_amounts(dsts, amounts, check_multi_sig_address=True) fee_shor = _quanta_to_shor(fee) except Exception as e: click.echo("Error validating arguments: {}".format(e)) quit(1) try: tx = TransferTransaction.create(addrs_to=addresses_dst, amounts=shor_amounts, message_data=message_data, fee=fee_shor, xmss_pk=address_src_pk, master_addr=master_addr) tx.sign(signing_object) txjson = tx_unbase64(tx.to_json()) print(txjson) if not tx.validate(): print("It was not possible to validate the signature") quit(1) print("\nTransaction Blob (signed): \n") txblob = tx.pbdata.SerializeToString() txblobhex = hexlify(txblob).decode() print(txblobhex) print("Sending to a QRL Node...") stub = ctx.obj.get_stub_public_api() push_transaction_req = qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata) push_transaction_resp = stub.PushTransaction(push_transaction_req, timeout=CONNECTION_TIMEOUT) print(push_transaction_resp) except Exception as e: print("Error {}".format(str(e)))
Transfer coins from src to dsts
https://github.com/theqrl/qrl/blob/5803997005afceaeffbd17145c2ffb14c937bce6/src/qrl/cli.py#L700-L777
import os from binascii import hexlify, a2b_base64 from collections import namedtuple from decimal import Decimal from typing import List import click import grpc import simplejson as json from google.protobuf.json_format import MessageToJson from pyqrllib.pyqrllib import mnemonic2bin, hstr2bin, bin2hstr from qrl.core import config from qrl.core.Wallet import Wallet, WalletDecryptionError from qrl.core.misc.helper import parse_hexblob, parse_qaddress from qrl.core.MultiSigAddressState import MultiSigAddressState from qrl.core.txs.MessageTransaction import MessageTransaction from qrl.core.txs.SlaveTransaction import SlaveTransaction from qrl.core.txs.TokenTransaction import TokenTransaction from qrl.core.txs.Transaction import Transaction from qrl.core.txs.TransferTokenTransaction import TransferTokenTransaction from qrl.core.txs.TransferTransaction import TransferTransaction from qrl.core.txs.multisig.MultiSigCreate import MultiSigCreate from qrl.core.txs.multisig.MultiSigSpend import MultiSigSpend from qrl.crypto.xmss import XMSS, hash_functions from qrl.generated import qrl_pb2_grpc, qrl_pb2 ENV_QRL_WALLET_DIR = 'ENV_QRL_WALLET_DIR' OutputMessage = namedtuple('OutputMessage', 'error address_items balance_items') BalanceItem = namedtuple('BalanceItem', 'address balance') CONNECTION_TIMEOUT = 5 class CLIContext(object): def __init__(self, verbose, host, port_public, wallet_dir, output_json): self.verbose = verbose self.host = host self.port_public = port_public self.wallet_dir = os.path.abspath(wallet_dir) self.wallet_path = os.path.join(self.wallet_dir, 'wallet.json') self.output_json = output_json def get_stub_public_api(self): node_public_address = '{}:{}'.format(self.host, self.port_public) channel = grpc.insecure_channel(node_public_address) return qrl_pb2_grpc.PublicAPIStub(channel) def _print_error(ctx, error_descr, wallets=None): if ctx.obj.output_json: if wallets is None: wallets = [] msg = {'error': error_descr, 'wallets': wallets} click.echo(json.dumps(msg)) else: print("ERROR: {}".format(error_descr)) def _serialize_output(ctx, addresses: List[OutputMessage], source_description) -> dict: if len(addresses) == 0: msg = {'error': 'No wallet found at {}'.format(source_description), 'wallets': []} return msg msg = {'error': None, 'wallets': []} for pos, item in enumerate(addresses): try: balance_unshored = Decimal(_public_get_address_balance(ctx, item.qaddress)) / config.dev.shor_per_quanta balance = '{:5.8f}'.format(balance_unshored) except Exception as e: msg['error'] = str(e) balance = '?' msg['wallets'].append({ 'number': pos, 'address': item.qaddress, 'balance': balance, 'hash_function': item.hashFunction }) return msg def validate_ots_index(ots_key_index, src_xmss, prompt=True): while not (0 <= ots_key_index < src_xmss.number_signatures): if prompt: ots_key_index = click.prompt('OTS key Index [{}..{}]'.format(0, src_xmss.number_signatures - 1), type=int) prompt = False else: click.echo("OTS key index must be between {} and {} (inclusive)".format(0, src_xmss.number_signatures - 1)) quit(1) return ots_key_index def get_item_from_wallet(wallet, wallet_idx): if 0 <= wallet_idx < len(wallet.address_items): return wallet.address_items[wallet_idx] click.echo('Wallet index not found {}'.format(wallet_idx), color='yellow') return None def _print_addresses(ctx, addresses: List[OutputMessage], source_description): def _normal(wallet): return "{:<8}{:<83}{:<13}".format(wallet['number'], wallet['address'], wallet['balance']) def _verbose(wallet): return "{:<8}{:<83}{:<13}{}".format( wallet['number'], wallet['address'], wallet['balance'], wallet['hash_function'] ) output = _serialize_output(ctx, addresses, source_description) if ctx.obj.output_json: output["location"] = source_description click.echo(json.dumps(output)) else: if output['error'] and output['wallets'] == []: click.echo(output['error']) else: click.echo("Wallet at : {}".format(source_description)) if ctx.obj.verbose: header = "{:<8}{:<83}{:<13}{:<8}".format('Number', 'Address', 'Balance', 'Hash') divider = ('-' * 112) else: header = "{:<8}{:<83}{:<13}".format('Number', 'Address', 'Balance') divider = ('-' * 101) click.echo(header) click.echo(divider) for wallet in output['wallets']: if ctx.obj.verbose: click.echo(_verbose(wallet)) else: click.echo(_normal(wallet)) def _public_get_address_balance(ctx, address): stub = ctx.obj.get_stub_public_api() get_address_state_req = qrl_pb2.GetAddressStateReq(address=parse_qaddress(address)) get_optimized_address_state_resp = stub.GetOptimizedAddressState(get_address_state_req, timeout=CONNECTION_TIMEOUT) return get_optimized_address_state_resp.state.balance def _select_wallet(ctx, address_or_index): try: wallet = Wallet(wallet_path=ctx.obj.wallet_path) if not wallet.addresses: click.echo('This command requires a local wallet') return if wallet.encrypted: secret = click.prompt('The wallet is encrypted. Enter password', hide_input=True) wallet.decrypt(secret) if address_or_index.isdigit(): address_or_index = int(address_or_index) addr_item = get_item_from_wallet(wallet, address_or_index) if addr_item: xmss = wallet.get_xmss_by_index(address_or_index) return wallet.addresses[address_or_index], xmss elif address_or_index.startswith('Q'): for i, addr_item in enumerate(wallet.address_items): if address_or_index == addr_item.qaddress: xmss = wallet.get_xmss_by_address(wallet.addresses[i]) return wallet.addresses[i], xmss click.echo('Source address not found in your wallet', color='yellow') quit(1) return parse_qaddress(address_or_index), None except Exception as e: click.echo("Error selecting wallet") click.echo(str(e)) quit(1) def _quanta_to_shor(x: Decimal, base=Decimal(config.dev.shor_per_quanta)) -> int: return int(Decimal(x * base).to_integral_value()) def _parse_dsts_amounts(addresses: str, amounts: str, token_decimals: int = 0, check_multi_sig_address=False): addresses_split = [parse_qaddress(addr, check_multi_sig_address) for addr in addresses.split(' ')] if token_decimals != 0: multiplier = Decimal(10 ** int(token_decimals)) shor_amounts = [_quanta_to_shor(Decimal(amount), base=multiplier) for amount in amounts.split(' ')] else: shor_amounts = [_quanta_to_shor(Decimal(amount)) for amount in amounts.split(' ')] if len(addresses_split) != len(shor_amounts): raise Exception("dsts and amounts should be the same length") return addresses_split, shor_amounts @click.version_option(version=config.dev.version, prog_name='QRL Command Line Interface') @click.group() @click.option('--verbose', '-v', default=False, is_flag=True, help='verbose output whenever possible') @click.option('--host', default='127.0.0.1', help='remote host address [127.0.0.1]') @click.option('--port_pub', default=19009, help='remote port number (public api) [19009]') @click.option('--wallet_dir', default='.', help='local wallet dir', envvar=ENV_QRL_WALLET_DIR) @click.option('--json', default=False, is_flag=True, help='output in json') @click.pass_context def qrl(ctx, verbose, host, port_pub, wallet_dir, json): ctx.obj = CLIContext(verbose=verbose, host=host, port_public=port_pub, wallet_dir=wallet_dir, output_json=json) @qrl.command(name='wallet_ls') @click.pass_context def wallet_ls(ctx): wallet = Wallet(wallet_path=ctx.obj.wallet_path) _print_addresses(ctx, wallet.address_items, ctx.obj.wallet_dir) @qrl.command(name='wallet_gen') @click.pass_context @click.option('--height', default=config.dev.xmss_tree_height, help='XMSS tree height. The resulting tree will be good for 2^height signatures') @click.option('--hash_function', type=click.Choice(list(hash_functions.keys())), default='shake128', help='Hash function used to build the XMSS tree [default=shake128]') @click.option('--encrypt', default=False, is_flag=True, help='Encrypts important fields with AES') def wallet_gen(ctx, height, hash_function, encrypt): wallet = Wallet(wallet_path=ctx.obj.wallet_path) if len(wallet.address_items) > 0: click.echo("Wallet already exists") return wallet.add_new_address(height, hash_function) _print_addresses(ctx, wallet.address_items, ctx.obj.wallet_path) if encrypt: secret = click.prompt('Enter password to encrypt wallet with', hide_input=True, confirmation_prompt=True) wallet.encrypt(secret) wallet.save() @qrl.command(name='wallet_add') @click.option('--height', type=int, default=config.dev.xmss_tree_height, prompt=False) @click.option('--hash_function', type=click.Choice(list(hash_functions.keys())), default='shake128', help='Hash function used to build the XMSS tree [default=shake128]') @click.pass_context def wallet_add(ctx, height, hash_function): secret = None wallet = Wallet(wallet_path=ctx.obj.wallet_path) wallet_was_encrypted = wallet.encrypted if wallet.encrypted: secret = click.prompt('The wallet is encrypted. Enter password', hide_input=True) wallet.decrypt(secret) wallet.add_new_address(height, hash_function) _print_addresses(ctx, wallet.address_items, config.user.wallet_dir) if wallet_was_encrypted: wallet.encrypt(secret) wallet.save() @qrl.command(name='wallet_recover') @click.option('--seed-type', type=click.Choice(['hexseed', 'mnemonic']), default='hexseed') @click.pass_context def wallet_recover(ctx, seed_type): seed = click.prompt('Please enter your %s' % (seed_type,)) seed = seed.lower().strip() if seed_type == 'mnemonic': words = seed.split() if len(words) != 34: print('You have entered %s words' % (len(words),)) print('Mnemonic seed must contain only 34 words') return bin_seed = mnemonic2bin(seed) else: if len(seed) != 102: print('You have entered hexseed of %s characters' % (len(seed),)) print('Hexseed must be of only 102 characters.') return bin_seed = hstr2bin(seed) wallet = Wallet(wallet_path=ctx.obj.wallet_path) recovered_xmss = XMSS.from_extended_seed(bin_seed) print('Recovered Wallet Address : %s' % (Wallet._get_Qaddress(recovered_xmss.address),)) for addr in wallet.address_items: if recovered_xmss.qaddress == addr.qaddress: print('Wallet Address is already in the wallet list') return if click.confirm('Do you want to save the recovered wallet?'): click.echo('Saving...') wallet.append_xmss(recovered_xmss) wallet.save() click.echo('Done') _print_addresses(ctx, wallet.address_items, config.user.wallet_dir) @qrl.command(name='wallet_secret') @click.option('--wallet-idx', default=1, prompt=True) @click.pass_context def wallet_secret(ctx, wallet_idx): wallet = Wallet(wallet_path=ctx.obj.wallet_path) if wallet.encrypted: secret = click.prompt('The wallet is encrypted. Enter password', hide_input=True) wallet.decrypt(secret) address_item = get_item_from_wallet(wallet, wallet_idx) if address_item: click.echo('Wallet Address : {}'.format(address_item.qaddress)) click.echo('Mnemonic : {}'.format(address_item.mnemonic)) click.echo('Hexseed : {}'.format(address_item.hexseed)) @qrl.command(name='wallet_rm') @click.option('--wallet-idx', type=int, prompt=True, help='index of address in wallet') @click.option('--skip-confirmation', default=False, is_flag=True, prompt=False, help='skip the confirmation prompt') @click.pass_context def wallet_rm(ctx, wallet_idx, skip_confirmation): wallet = Wallet(wallet_path=ctx.obj.wallet_path) address_item = get_item_from_wallet(wallet, wallet_idx) if address_item: if not skip_confirmation: click.echo( 'You are about to remove address [{0}]: {1} from the wallet.'.format(wallet_idx, address_item.qaddress)) click.echo( 'Warning! By continuing, you risk complete loss of access to this address if you do not have a ' 'recovery Mnemonic/Hexseed.') click.confirm('Do you want to continue?', abort=True) wallet.remove(address_item.qaddress) _print_addresses(ctx, wallet.address_items, config.user.wallet_dir) @qrl.command(name='wallet_encrypt') @click.pass_context def wallet_encrypt(ctx): wallet = Wallet(wallet_path=ctx.obj.wallet_path) click.echo('Encrypting wallet at {}'.format(wallet.wallet_path)) secret = click.prompt('Enter password', hide_input=True, confirmation_prompt=True) wallet.encrypt(secret) wallet.save() @qrl.command(name='wallet_decrypt') @click.pass_context def wallet_decrypt(ctx): wallet = Wallet(wallet_path=ctx.obj.wallet_path) click.echo('Decrypting wallet at {}'.format(wallet.wallet_path)) secret = click.prompt('Enter password', hide_input=True) try: wallet.decrypt(secret) except WalletDecryptionError as e: click.echo(str(e)) quit(1) except Exception as e: click.echo(str(e)) quit(1) try: wallet.save() except Exception as e: click.echo(str(e)) quit(1) @qrl.command(name='tx_inspect') @click.option('--txblob', type=str, default='', prompt=True, help='transaction blob') @click.pass_context def tx_inspect(ctx, txblob): tx = None try: txbin = parse_hexblob(txblob) pbdata = qrl_pb2.Transaction() pbdata.ParseFromString(txbin) tx = Transaction.from_pbdata(pbdata) except Exception as e: click.echo("tx blob is not valid") quit(1) tmp_json = tx.to_json() print(tmp_json) @qrl.command(name='tx_push') @click.option('--txblob', type=str, default='', help='transaction blob (unsigned)') @click.pass_context def tx_push(ctx, txblob): tx = None try: txbin = parse_hexblob(txblob) pbdata = qrl_pb2.Transaction() pbdata.ParseFromString(txbin) tx = Transaction.from_pbdata(pbdata) except Exception as e: click.echo("tx blob is not valid") quit(1) tmp_json = tx.to_json() print(tmp_json) if len(tx.signature) == 0: click.echo('Signature missing') quit(1) stub = ctx.obj.get_stub_public_api() pushTransactionReq = qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata) pushTransactionResp = stub.PushTransaction(pushTransactionReq, timeout=CONNECTION_TIMEOUT) print(pushTransactionResp.error_code) @qrl.command(name='tx_message') @click.option('--src', type=str, default='', prompt=True, help='signer QRL address') @click.option('--master', type=str, default='', prompt=True, help='master QRL address') @click.option('--addr_to', type=str, default='', prompt=True, help='QRL Address receiving this message (optional)') @click.option('--message', type=str, prompt=True, help='Message (max 80 bytes)') @click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee in Quanta') @click.option('--ots_key_index', default=1, prompt=True, help='OTS key Index (1..XMSS num signatures)') @click.pass_context def tx_message(ctx, src, master, addr_to, message, fee, ots_key_index): try: _, src_xmss = _select_wallet(ctx, src) if not src_xmss: click.echo("A local wallet is required to sign the transaction") quit(1) address_src_pk = src_xmss.pk ots_key_index = validate_ots_index(ots_key_index, src_xmss) src_xmss.set_ots_index(ots_key_index) message = message.encode() addr_to = parse_qaddress(addr_to, False) master_addr = None if master: master_addr = parse_qaddress(master) fee_shor = _quanta_to_shor(fee) except Exception as e: click.echo("Error validating arguments: {}".format(e)) quit(1) try: stub = ctx.obj.get_stub_public_api() tx = MessageTransaction.create(message_hash=message, addr_to=addr_to, fee=fee_shor, xmss_pk=address_src_pk, master_addr=master_addr) tx.sign(src_xmss) push_transaction_req = qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata) push_transaction_resp = stub.PushTransaction(push_transaction_req, timeout=CONNECTION_TIMEOUT) print(push_transaction_resp) except Exception as e: print("Error {}".format(str(e))) @qrl.command(name='tx_multi_sig_create') @click.option('--src', type=str, default='', prompt=True, help='source QRL address') @click.option('--master', type=str, default='', prompt=True, help='master QRL address') @click.option('--threshold', default=0, prompt=True, help='Threshold') @click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee in Quanta') @click.option('--ots_key_index', default=1, prompt=True, help='OTS key Index (1..XMSS num signatures)') @click.pass_context def tx_multi_sig_create(ctx, src, master, threshold, fee, ots_key_index): signatories = [] weights = [] while True: address = click.prompt('Address of Signatory ', default='') if address == '': break weight = int(click.prompt('Weight ')) signatories.append(parse_qaddress(address)) weights.append(weight) try: _, src_xmss = _select_wallet(ctx, src) if not src_xmss: click.echo("A local wallet is required to sign the transaction") quit(1) address_src_pk = src_xmss.pk ots_key_index = validate_ots_index(ots_key_index, src_xmss) src_xmss.set_ots_index(ots_key_index) master_addr = None if master: master_addr = parse_qaddress(master) fee_shor = _quanta_to_shor(fee) except KeyboardInterrupt: click.echo("Terminated by user") quit(1) except Exception as e: click.echo("Error validating arguments: {}".format(e)) quit(1) try: stub = ctx.obj.get_stub_public_api() tx = MultiSigCreate.create(signatories=signatories, weights=weights, threshold=threshold, fee=fee_shor, xmss_pk=address_src_pk, master_addr=master_addr) tx.sign(src_xmss) push_transaction_req = qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata) push_transaction_resp = stub.PushTransaction(push_transaction_req, timeout=CONNECTION_TIMEOUT) print(push_transaction_resp.error_code) print('Multi sig Address Q{}'.format(bin2hstr(MultiSigAddressState.generate_multi_sig_address(tx.txhash)))) except Exception as e: print("Error {}".format(str(e))) @qrl.command(name='tx_multi_sig_spend') @click.option('--src', type=str, default='', prompt=True, help='signer QRL address') @click.option('--master', type=str, default='', help='master QRL address') @click.option('--multi_sig_address', type=str, default='', prompt=True, help='signer Multi Sig Address') @click.option('--dsts', type=str, prompt=True, help='List of destination addresses') @click.option('--amounts', type=str, prompt=True, help='List of amounts to transfer (Quanta)') @click.option('--expiry_block_number', type=int, prompt=True, help='Expiry Blocknumber') @click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee in Quanta') @click.option('--ots_key_index', default=1, help='OTS key Index (1..XMSS num signatures)') @click.pass_context def tx_multi_sig_spend(ctx, src, master, multi_sig_address, dsts, amounts, expiry_block_number, fee, ots_key_index): address_src_pk = None master_addr = None addresses_dst = [] shor_amounts = [] fee_shor = [] signing_object = None try: selected_wallet = _select_wallet(ctx, src) if selected_wallet is None or len(selected_wallet) != 2: click.echo("A wallet was not found") quit(1) _, src_xmss = selected_wallet if not src_xmss: click.echo("A local wallet is required to sign the transaction") quit(1) address_src_pk = src_xmss.pk ots_key_index = validate_ots_index(ots_key_index, src_xmss) src_xmss.set_ots_index(ots_key_index) signing_object = src_xmss if master: master_addr = parse_qaddress(master) addresses_dst, shor_amounts = _parse_dsts_amounts(dsts, amounts, check_multi_sig_address=True) fee_shor = _quanta_to_shor(fee) except Exception as e: click.echo("Error validating arguments: {}".format(e)) quit(1) multi_sig_address = bytes(hstr2bin(multi_sig_address[1:])) try: tx = MultiSigSpend.create(multi_sig_address=multi_sig_address, addrs_to=addresses_dst, amounts=shor_amounts, expiry_block_number=expiry_block_number, fee=fee_shor, xmss_pk=address_src_pk, master_addr=master_addr) tx.sign(signing_object) if not tx.validate(): print("It was not possible to validate the signature") quit(1) print("\nTransaction Blob (signed): \n") txblob = tx.pbdata.SerializeToString() txblobhex = hexlify(txblob).decode() print(txblobhex) print() print("Sending to a QRL Node...") stub = ctx.obj.get_stub_public_api() push_transaction_req = qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata) push_transaction_resp = stub.PushTransaction(push_transaction_req, timeout=CONNECTION_TIMEOUT) print(push_transaction_resp) except Exception as e: print("Error {}".format(str(e))) def base64tohex(data): return hexlify(a2b_base64(data)) def tx_unbase64(tx_json_str): tx_json = json.loads(tx_json_str) tx_json["publicKey"] = base64tohex(tx_json["publicKey"]) tx_json["signature"] = base64tohex(tx_json["signature"]) tx_json["transactionHash"] = base64tohex(tx_json["transactionHash"]) tx_json["transfer"]["addrsTo"] = [base64tohex(v) for v in tx_json["transfer"]["addrsTo"]] return json.dumps(tx_json, indent=True, sort_keys=True) @qrl.command(name='tx_transfer') @click.option('--src', type=str, default='', prompt=True, help='signer QRL address') @click.option('--master', type=str, default='', help='master QRL address') @click.option('--dsts', type=str, prompt=True, help='List of destination addresses') @click.option('--amounts', type=str, prompt=True, help='List of amounts to transfer (Quanta)') @click.option('--message_data', type=str, prompt=True, help='Message (Optional)') @click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee in Quanta') @click.option('--ots_key_index', default=1, help='OTS key Index (1..XMSS num signatures)') @click.pass_context
MIT License
drexly/openhgsenti
lib/django/contrib/admin/checks.py
ModelAdminChecks._check_search_fields
python
def _check_search_fields(self, obj): if not isinstance(obj.search_fields, (list, tuple)): return must_be('a list or tuple', option='search_fields', obj=obj, id='admin.E126') else: return []
Check search_fields is a sequence.
https://github.com/drexly/openhgsenti/blob/d7806f58c81127d32091d9875a99ac13aef94a8a/lib/django/contrib/admin/checks.py#L824-L830
from __future__ import unicode_literals from itertools import chain from django.contrib.admin.utils import ( NotRelationField, flatten, get_fields_from_path, ) from django.core import checks from django.core.exceptions import FieldDoesNotExist from django.db import models from django.forms.models import ( BaseModelForm, BaseModelFormSet, _get_foreign_key, ) def check_admin_app(**kwargs): from django.contrib.admin.sites import system_check_errors return system_check_errors class BaseModelAdminChecks(object): def check(self, admin_obj, **kwargs): errors = [] errors.extend(self._check_raw_id_fields(admin_obj)) errors.extend(self._check_fields(admin_obj)) errors.extend(self._check_fieldsets(admin_obj)) errors.extend(self._check_exclude(admin_obj)) errors.extend(self._check_form(admin_obj)) errors.extend(self._check_filter_vertical(admin_obj)) errors.extend(self._check_filter_horizontal(admin_obj)) errors.extend(self._check_radio_fields(admin_obj)) errors.extend(self._check_prepopulated_fields(admin_obj)) errors.extend(self._check_view_on_site_url(admin_obj)) errors.extend(self._check_ordering(admin_obj)) errors.extend(self._check_readonly_fields(admin_obj)) return errors def _check_raw_id_fields(self, obj): if not isinstance(obj.raw_id_fields, (list, tuple)): return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001') else: return list(chain(*[ self._check_raw_id_fields_item(obj, obj.model, field_name, 'raw_id_fields[%d]' % index) for index, field_name in enumerate(obj.raw_id_fields) ])) def _check_raw_id_fields_item(self, obj, model, field_name, label): try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E002') else: if not isinstance(field, (models.ForeignKey, models.ManyToManyField)): return must_be('a ForeignKey or ManyToManyField', option=label, obj=obj, id='admin.E003') else: return [] def _check_fields(self, obj): if obj.fields is None: return [] elif not isinstance(obj.fields, (list, tuple)): return must_be('a list or tuple', option='fields', obj=obj, id='admin.E004') elif obj.fieldsets: return [ checks.Error( "Both 'fieldsets' and 'fields' are specified.", hint=None, obj=obj.__class__, id='admin.E005', ) ] fields = flatten(obj.fields) if len(fields) != len(set(fields)): return [ checks.Error( "The value of 'fields' contains duplicate field(s).", hint=None, obj=obj.__class__, id='admin.E006', ) ] return list(chain(*[ self._check_field_spec(obj, obj.model, field_name, 'fields') for field_name in obj.fields ])) def _check_fieldsets(self, obj): if obj.fieldsets is None: return [] elif not isinstance(obj.fieldsets, (list, tuple)): return must_be('a list or tuple', option='fieldsets', obj=obj, id='admin.E007') else: return list(chain(*[ self._check_fieldsets_item(obj, obj.model, fieldset, 'fieldsets[%d]' % index) for index, fieldset in enumerate(obj.fieldsets) ])) def _check_fieldsets_item(self, obj, model, fieldset, label): if not isinstance(fieldset, (list, tuple)): return must_be('a list or tuple', option=label, obj=obj, id='admin.E008') elif len(fieldset) != 2: return must_be('of length 2', option=label, obj=obj, id='admin.E009') elif not isinstance(fieldset[1], dict): return must_be('a dictionary', option='%s[1]' % label, obj=obj, id='admin.E010') elif 'fields' not in fieldset[1]: return [ checks.Error( "The value of '%s[1]' must contain the key 'fields'." % label, hint=None, obj=obj.__class__, id='admin.E011', ) ] elif not isinstance(fieldset[1]['fields'], (list, tuple)): return must_be('a list or tuple', option="%s[1]['fields']" % label, obj=obj, id='admin.E008') fields = flatten(fieldset[1]['fields']) if len(fields) != len(set(fields)): return [ checks.Error( "There are duplicate field(s) in '%s[1]'." % label, hint=None, obj=obj.__class__, id='admin.E012', ) ] return list(chain(*[ self._check_field_spec(obj, model, fieldset_fields, '%s[1]["fields"]' % label) for fieldset_fields in fieldset[1]['fields'] ])) def _check_field_spec(self, obj, model, fields, label): if isinstance(fields, tuple): return list(chain(*[ self._check_field_spec_item(obj, model, field_name, "%s[%d]" % (label, index)) for index, field_name in enumerate(fields) ])) else: return self._check_field_spec_item(obj, model, fields, label) def _check_field_spec_item(self, obj, model, field_name, label): if field_name in obj.readonly_fields: return [] else: try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return [] else: if (isinstance(field, models.ManyToManyField) and not field.remote_field.through._meta.auto_created): return [ checks.Error( ("The value of '%s' cannot include the ManyToManyField '%s', " "because that field manually specifies a relationship model.") % (label, field_name), hint=None, obj=obj.__class__, id='admin.E013', ) ] else: return [] def _check_exclude(self, obj): if obj.exclude is None: return [] elif not isinstance(obj.exclude, (list, tuple)): return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014') elif len(obj.exclude) > len(set(obj.exclude)): return [ checks.Error( "The value of 'exclude' contains duplicate field(s).", hint=None, obj=obj.__class__, id='admin.E015', ) ] else: return [] def _check_form(self, obj): if hasattr(obj, 'form') and not issubclass(obj.form, BaseModelForm): return must_inherit_from(parent='BaseModelForm', option='form', obj=obj, id='admin.E016') else: return [] def _check_filter_vertical(self, obj): if not hasattr(obj, 'filter_vertical'): return [] elif not isinstance(obj.filter_vertical, (list, tuple)): return must_be('a list or tuple', option='filter_vertical', obj=obj, id='admin.E017') else: return list(chain(*[ self._check_filter_item(obj, obj.model, field_name, "filter_vertical[%d]" % index) for index, field_name in enumerate(obj.filter_vertical) ])) def _check_filter_horizontal(self, obj): if not hasattr(obj, 'filter_horizontal'): return [] elif not isinstance(obj.filter_horizontal, (list, tuple)): return must_be('a list or tuple', option='filter_horizontal', obj=obj, id='admin.E018') else: return list(chain(*[ self._check_filter_item(obj, obj.model, field_name, "filter_horizontal[%d]" % index) for index, field_name in enumerate(obj.filter_horizontal) ])) def _check_filter_item(self, obj, model, field_name, label): try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E019') else: if not isinstance(field, models.ManyToManyField): return must_be('a ManyToManyField', option=label, obj=obj, id='admin.E020') else: return [] def _check_radio_fields(self, obj): if not hasattr(obj, 'radio_fields'): return [] elif not isinstance(obj.radio_fields, dict): return must_be('a dictionary', option='radio_fields', obj=obj, id='admin.E021') else: return list(chain(*[ self._check_radio_fields_key(obj, obj.model, field_name, 'radio_fields') + self._check_radio_fields_value(obj, val, 'radio_fields["%s"]' % field_name) for field_name, val in obj.radio_fields.items() ])) def _check_radio_fields_key(self, obj, model, field_name, label): try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E022') else: if not (isinstance(field, models.ForeignKey) or field.choices): return [ checks.Error( "The value of '%s' refers to '%s', which is not an " "instance of ForeignKey, and does not have a 'choices' definition." % ( label, field_name ), hint=None, obj=obj.__class__, id='admin.E023', ) ] else: return [] def _check_radio_fields_value(self, obj, val, label): from django.contrib.admin.options import HORIZONTAL, VERTICAL if val not in (HORIZONTAL, VERTICAL): return [ checks.Error( "The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL." % label, hint=None, obj=obj.__class__, id='admin.E024', ) ] else: return [] def _check_view_on_site_url(self, obj): if hasattr(obj, 'view_on_site'): if not callable(obj.view_on_site) and not isinstance(obj.view_on_site, bool): return [ checks.Error( "The value of 'view_on_site' must be a callable or a boolean value.", hint=None, obj=obj.__class__, id='admin.E025', ) ] else: return [] else: return [] def _check_prepopulated_fields(self, obj): if not hasattr(obj, 'prepopulated_fields'): return [] elif not isinstance(obj.prepopulated_fields, dict): return must_be('a dictionary', option='prepopulated_fields', obj=obj, id='admin.E026') else: return list(chain(*[ self._check_prepopulated_fields_key(obj, obj.model, field_name, 'prepopulated_fields') + self._check_prepopulated_fields_value(obj, obj.model, val, 'prepopulated_fields["%s"]' % field_name) for field_name, val in obj.prepopulated_fields.items() ])) def _check_prepopulated_fields_key(self, obj, model, field_name, label): forbidden_field_types = ( models.DateTimeField, models.ForeignKey, models.ManyToManyField ) try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E027') else: if isinstance(field, forbidden_field_types): return [ checks.Error( "The value of '%s' refers to '%s', which must not be a DateTimeField, " "ForeignKey or ManyToManyField." % ( label, field_name ), hint=None, obj=obj.__class__, id='admin.E028', ) ] else: return [] def _check_prepopulated_fields_value(self, obj, model, val, label): if not isinstance(val, (list, tuple)): return must_be('a list or tuple', option=label, obj=obj, id='admin.E029') else: return list(chain(*[ self._check_prepopulated_fields_value_item(obj, model, subfield_name, "%s[%r]" % (label, index)) for index, subfield_name in enumerate(val) ])) def _check_prepopulated_fields_value_item(self, obj, model, field_name, label): try: model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E030') else: return [] def _check_ordering(self, obj): if obj.ordering is None: return [] elif not isinstance(obj.ordering, (list, tuple)): return must_be('a list or tuple', option='ordering', obj=obj, id='admin.E031') else: return list(chain(*[ self._check_ordering_item(obj, obj.model, field_name, 'ordering[%d]' % index) for index, field_name in enumerate(obj.ordering) ])) def _check_ordering_item(self, obj, model, field_name, label): if field_name == '?' and len(obj.ordering) != 1: return [ checks.Error( ("The value of 'ordering' has the random ordering marker '?', " "but contains other fields as well."), hint='Either remove the "?", or remove the other fields.', obj=obj.__class__, id='admin.E032', ) ] elif field_name == '?': return [] elif '__' in field_name: return [] else: if field_name.startswith('-'): field_name = field_name[1:] try: model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E033') else: return [] def _check_readonly_fields(self, obj): if obj.readonly_fields == (): return [] elif not isinstance(obj.readonly_fields, (list, tuple)): return must_be('a list or tuple', option='readonly_fields', obj=obj, id='admin.E034') else: return list(chain(*[ self._check_readonly_fields_item(obj, obj.model, field_name, "readonly_fields[%d]" % index) for index, field_name in enumerate(obj.readonly_fields) ])) def _check_readonly_fields_item(self, obj, model, field_name, label): if callable(field_name): return [] elif hasattr(obj, field_name): return [] elif hasattr(model, field_name): return [] else: try: model._meta.get_field(field_name) except FieldDoesNotExist: return [ checks.Error( "The value of '%s' is not a callable, an attribute of '%s', or an attribute of '%s.%s'." % ( label, obj.__class__.__name__, model._meta.app_label, model._meta.object_name ), hint=None, obj=obj.__class__, id='admin.E035', ) ] else: return [] class ModelAdminChecks(BaseModelAdminChecks): def check(self, admin_obj, **kwargs): errors = super(ModelAdminChecks, self).check(admin_obj) errors.extend(self._check_save_as(admin_obj)) errors.extend(self._check_save_on_top(admin_obj)) errors.extend(self._check_inlines(admin_obj)) errors.extend(self._check_list_display(admin_obj)) errors.extend(self._check_list_display_links(admin_obj)) errors.extend(self._check_list_filter(admin_obj)) errors.extend(self._check_list_select_related(admin_obj)) errors.extend(self._check_list_per_page(admin_obj)) errors.extend(self._check_list_max_show_all(admin_obj)) errors.extend(self._check_list_editable(admin_obj)) errors.extend(self._check_search_fields(admin_obj)) errors.extend(self._check_date_hierarchy(admin_obj)) return errors def _check_save_as(self, obj): if not isinstance(obj.save_as, bool): return must_be('a boolean', option='save_as', obj=obj, id='admin.E101') else: return [] def _check_save_on_top(self, obj): if not isinstance(obj.save_on_top, bool): return must_be('a boolean', option='save_on_top', obj=obj, id='admin.E102') else: return [] def _check_inlines(self, obj): if not isinstance(obj.inlines, (list, tuple)): return must_be('a list or tuple', option='inlines', obj=obj, id='admin.E103') else: return list(chain(*[ self._check_inlines_item(obj, obj.model, item, "inlines[%d]" % index) for index, item in enumerate(obj.inlines) ])) def _check_inlines_item(self, obj, model, inline, label): inline_label = '.'.join([inline.__module__, inline.__name__]) from django.contrib.admin.options import BaseModelAdmin if not issubclass(inline, BaseModelAdmin): return [ checks.Error( "'%s' must inherit from 'BaseModelAdmin'." % inline_label, hint=None, obj=obj.__class__, id='admin.E104', ) ] elif not inline.model: return [ checks.Error( "'%s' must have a 'model' attribute." % inline_label, hint=None, obj=obj.__class__, id='admin.E105', ) ] elif not issubclass(inline.model, models.Model): return must_be('a Model', option='%s.model' % inline_label, obj=obj, id='admin.E106') else: return inline(model, obj.admin_site).check() def _check_list_display(self, obj): if not isinstance(obj.list_display, (list, tuple)): return must_be('a list or tuple', option='list_display', obj=obj, id='admin.E107') else: return list(chain(*[ self._check_list_display_item(obj, obj.model, item, "list_display[%d]" % index) for index, item in enumerate(obj.list_display) ])) def _check_list_display_item(self, obj, model, item, label): if callable(item): return [] elif hasattr(obj, item): return [] elif hasattr(model, item): try: field = model._meta.get_field(item) except FieldDoesNotExist: try: field = getattr(model, item) except AttributeError: field = None if field is None: return [ checks.Error( "The value of '%s' refers to '%s', which is not a " "callable, an attribute of '%s', or an attribute or method on '%s.%s'." % ( label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name ), hint=None, obj=obj.__class__, id='admin.E108', ) ] elif isinstance(field, models.ManyToManyField): return [ checks.Error( "The value of '%s' must not be a ManyToManyField." % label, hint=None, obj=obj.__class__, id='admin.E109', ) ] else: return [] else: try: model._meta.get_field(item) except FieldDoesNotExist: return [ checks.Error( "The value of '%s' refers to '%s', which is not a callable, " "an attribute of '%s', or an attribute or method on '%s.%s'." % ( label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name ), hint=None, obj=obj.__class__, id='admin.E108', ) ] else: return [] def _check_list_display_links(self, obj): if obj.list_display_links is None: return [] elif not isinstance(obj.list_display_links, (list, tuple)): return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110') else: return list(chain(*[ self._check_list_display_links_item(obj, field_name, "list_display_links[%d]" % index) for index, field_name in enumerate(obj.list_display_links) ])) def _check_list_display_links_item(self, obj, field_name, label): if field_name not in obj.list_display: return [ checks.Error( "The value of '%s' refers to '%s', which is not defined in 'list_display'." % ( label, field_name ), hint=None, obj=obj.__class__, id='admin.E111', ) ] else: return [] def _check_list_filter(self, obj): if not isinstance(obj.list_filter, (list, tuple)): return must_be('a list or tuple', option='list_filter', obj=obj, id='admin.E112') else: return list(chain(*[ self._check_list_filter_item(obj, obj.model, item, "list_filter[%d]" % index) for index, item in enumerate(obj.list_filter) ])) def _check_list_filter_item(self, obj, model, item, label): from django.contrib.admin import ListFilter, FieldListFilter if callable(item) and not isinstance(item, models.Field): if not issubclass(item, ListFilter): return must_inherit_from(parent='ListFilter', option=label, obj=obj, id='admin.E113') elif issubclass(item, FieldListFilter): return [ checks.Error( "The value of '%s' must not inherit from 'FieldListFilter'." % label, hint=None, obj=obj.__class__, id='admin.E114', ) ] else: return [] elif isinstance(item, (tuple, list)): field, list_filter_class = item if not issubclass(list_filter_class, FieldListFilter): return must_inherit_from(parent='FieldListFilter', option='%s[1]' % label, obj=obj, id='admin.E115') else: return [] else: field = item try: get_fields_from_path(model, field) except (NotRelationField, FieldDoesNotExist): return [ checks.Error( "The value of '%s' refers to '%s', which does not refer to a Field." % (label, field), hint=None, obj=obj.__class__, id='admin.E116', ) ] else: return [] def _check_list_select_related(self, obj): if not isinstance(obj.list_select_related, (bool, list, tuple)): return must_be('a boolean, tuple or list', option='list_select_related', obj=obj, id='admin.E117') else: return [] def _check_list_per_page(self, obj): if not isinstance(obj.list_per_page, int): return must_be('an integer', option='list_per_page', obj=obj, id='admin.E118') else: return [] def _check_list_max_show_all(self, obj): if not isinstance(obj.list_max_show_all, int): return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119') else: return [] def _check_list_editable(self, obj): if not isinstance(obj.list_editable, (list, tuple)): return must_be('a list or tuple', option='list_editable', obj=obj, id='admin.E120') else: return list(chain(*[ self._check_list_editable_item(obj, obj.model, item, "list_editable[%d]" % index) for index, item in enumerate(obj.list_editable) ])) def _check_list_editable_item(self, obj, model, field_name, label): try: field = model._meta.get_field(field_name) except FieldDoesNotExist: return refer_to_missing_field(field=field_name, option=label, model=model, obj=obj, id='admin.E121') else: if field_name not in obj.list_display: return [ checks.Error( "The value of '%s' refers to '%s', which is not " "contained in 'list_display'." % (label, field_name), hint=None, obj=obj.__class__, id='admin.E122', ) ] elif obj.list_display_links and field_name in obj.list_display_links: return [ checks.Error( "The value of '%s' cannot be in both 'list_editable' and 'list_display_links'." % field_name, hint=None, obj=obj.__class__, id='admin.E123', ) ] elif (obj.list_display[0] in obj.list_editable and obj.list_display[0] != obj.list_editable[0] and obj.list_display_links is not None): return [ checks.Error( "The value of '%s' refers to the first field in 'list_display' ('%s'), " "which cannot be used unless 'list_display_links' is set." % ( label, obj.list_display[0] ), hint=None, obj=obj.__class__, id='admin.E124', ) ] elif not field.editable: return [ checks.Error( "The value of '%s' refers to '%s', which is not editable through the admin." % ( label, field_name ), hint=None, obj=obj.__class__, id='admin.E125', ) ] else: return []
Apache License 2.0
dragonfly/dragonfly
dragonfly/gp/cartesian_product_gp.py
CPGP._get_training_kernel_matrix
python
def _get_training_kernel_matrix(self): n = len(self.X) ret = self.kernel.hyperparams['scale'] * np.ones((n, n)) for idx, kern in enumerate(self.kernel.kernel_list): if self.domain_lists_of_dists[idx] is not None: ret *= kern.evaluate_from_dists(self.domain_lists_of_dists[idx]) else: curr_X = get_idxs_from_list_of_lists(self.X, idx) ret *= kern(curr_X, curr_X) return ret
Returns the training kernel matrix.
https://github.com/dragonfly/dragonfly/blob/a579b5eadf452e23b07d4caf27b402703b0012b7/dragonfly/gp/cartesian_product_gp.py#L238-L248
from __future__ import print_function from argparse import Namespace import numpy as np from ..exd import domains from ..exd.cp_domain_utils import load_cp_domain_from_config_file from . import gp_core, mf_gp from .euclidean_gp import get_euclidean_integral_gp_kernel_with_scale, prep_euclidean_integral_kernel_hyperparams from .kernel import CartesianProductKernel, HammingKernel from ..utils.general_utils import get_idxs_from_list_of_lists from ..utils.option_handler import get_option_specs, load_options from ..utils.reporters import get_reporter _DFLT_DOMAIN_EUC_KERNEL_TYPE = 'matern' _DFLT_DOMAIN_INT_KERNEL_TYPE = 'matern' _DFLT_DOMAIN_DISCRETE_NUMERIC_KERNEL_TYPE = 'matern' _DFLT_DOMAIN_DISCRETE_KERNEL_TYPE = 'hamming' _DFLT_DOMAIN_NN_KERNEL_TYPE = 'otmann' _DFLT_DOMAIN_MATERN_NU = 2.5 _DFLT_FIDEL_EUC_KERNEL_TYPE = 'se' _DFLT_FIDEL_INT_KERNEL_TYPE = 'se' _DFLT_FIDEL_DISCRETE_NUMERIC_KERNEL_TYPE = 'se' _DFLT_FIDEL_DISCRETE_KERNEL_TYPE = 'hamming' _DFLT_FIDEL_MATERN_NU = 2.5 basic_cart_product_gp_args = [ get_option_specs('dom_euc_kernel_type', False, 'default', 'Kernel type for euclidean domains. '), get_option_specs('dom_euc_use_same_bandwidth', False, False, ('If true, will use same bandwidth on all dimensions. Applies only ' 'when kernel_type is se or matern. Default=False.')), get_option_specs('dom_euc_matern_nu', False, 'default', 'Specify nu value for matern kernel. If negative, will fit.'), get_option_specs('dom_euc_poly_order', False, 1, 'Order of the polynomial kernle to be used for Euclidean domains. ' + 'Default is 1 (linear kernel).'), get_option_specs('dom_euc_use_additive_gp', False, False, 'Whether or not to use an additive GP. '), get_option_specs('dom_euc_add_max_group_size', False, 6, 'The maximum number of groups in the additive grouping. '), get_option_specs('dom_euc_add_grouping_criterion', False, 'randomised_ml', 'Specify the grouping algorithm, should be one of {randomised_ml}'), get_option_specs('dom_euc_num_groups_per_group_size', False, -1, 'The number of groups to try per group size.'), get_option_specs('dom_euc_add_group_size_criterion', False, 'sampled', 'Specify how to pick the group size, should be one of {max,sampled}.'), get_option_specs('dom_euc_esp_order', False, -1, 'Order of the esp kernel. '), get_option_specs('dom_euc_esp_kernel_type', False, 'se', 'Specify type of kernel. This depends on the application.'), get_option_specs('dom_euc_esp_matern_nu', False, 'default', ('Specify the nu value for matern kernel. If negative, will fit.')), get_option_specs('dom_int_kernel_type', False, 'default', 'Kernel type for integral domains. '), get_option_specs('dom_int_use_same_bandwidth', False, False, ('If true, will use same bandwidth on all dimensions. Applies only ' 'when kernel_type is se or matern. Default=False.')), get_option_specs('dom_int_matern_nu', False, 'default', 'Specify nu value for matern kernel. If negative, will fit.'), get_option_specs('dom_int_poly_order', False, 1, 'Order of the polynomial kernle to be used for Integral domains. ' + 'Default is 1 (linear kernel).'), get_option_specs('dom_int_use_additive_gp', False, False, 'Whether or not to use an additive GP. '), get_option_specs('dom_int_add_max_group_size', False, 6, 'The maximum number of groups in the additive grouping. '), get_option_specs('dom_int_add_grouping_criterion', False, 'randomised_ml', 'Specify the grouping algorithm, should be one of {randomised_ml}'), get_option_specs('dom_int_num_groups_per_group_size', False, -1, 'The number of groups to try per group size.'), get_option_specs('dom_int_add_group_size_criterion', False, 'sampled', 'Specify how to pick the group size, should be one of {max,sampled}.'), get_option_specs('dom_int_esp_order', False, -1, 'Order of the esp kernel. '), get_option_specs('dom_int_esp_kernel_type', False, 'se', 'Specify type of kernel. This depends on the application.'), get_option_specs('dom_int_esp_matern_nu', False, 'default', ('Specify the nu value for matern kernel. If negative, will fit.')), get_option_specs('dom_disc_num_kernel_type', False, 'default', 'Kernel type for discrete numeric domains. '), get_option_specs('dom_disc_num_use_same_bandwidth', False, False, ('If true, will use same bandwidth on all dimensions. Applies only ' 'when kernel_type is se or matern. Default=False.')), get_option_specs('dom_disc_num_matern_nu', False, 'default', 'Specify nu value for matern kernel. If negative, will fit.'), get_option_specs('dom_disc_num_poly_order', False, 1, 'Order of the polynomial kernle to be used for Integral domains. ' + 'Default is 1 (linear kernel).'), get_option_specs('dom_disc_num_esp_order', False, -1, 'Order of the esp kernel. '), get_option_specs('dom_disc_num_esp_kernel_type', False, 'se', 'Specify type of kernel. This depends on the application.'), get_option_specs('dom_disc_num_esp_matern_nu', False, 'default', ('Specify the nu value for matern kernel. If negative, will fit.')), get_option_specs('dom_disc_kernel_type', False, 'default', 'Kernel type for discrete domains.'), get_option_specs('dom_disc_hamming_use_same_weight', False, False, 'If true, use same weight for all dimensions of the hamming kernel.'), get_option_specs('dom_nn_kernel_type', False, 'default', 'Kernel type for NN Domains.'), get_option_specs('otmann_dist_type', False, 'lp-emd', 'The type of distance. Should be lp, emd or lp-emd.'), get_option_specs('otmann_kernel_type', False, 'lpemd_sum', 'The Otmann kernel type. Should be one of lp, emd, lpemd_sum, or lpemd_prod.'), get_option_specs('otmann_choose_mislabel_struct_coeffs', False, 'use_given', ('How to choose the mislabel and struct coefficients. Should be one of ' + 'tune_coeffs or use_given. In the latter case, otmann_mislabel_coeffs and ' + 'otmann_struct_coeffs should be non-empty.')), get_option_specs('otmann_mislabel_coeffs', False, '1.0-1.0-1.0-1.0', 'The mislabel coefficients specified as a string. If -1, it means we will tune.'), get_option_specs('otmann_struct_coeffs', False, '0.1-0.25-0.61-1.5', 'The struct coefficients specified as a string. If -1, it means we will tune.'), get_option_specs('otmann_lp_power', False, 1, 'The powers to use in the LP distance for the kernel.'), get_option_specs('otmann_emd_power', False, 2, 'The powers to use in the EMD distance for the kernel.'), get_option_specs('otmann_non_assignment_penalty', False, 1.0, 'The non-assignment penalty for the OTMANN distance.'), ] cartesian_product_gp_args = gp_core.mandatory_gp_args + basic_cart_product_gp_args basic_mf_cart_product_gp_args = [ get_option_specs('fidel_euc_kernel_type', False, 'se', ('Type of kernel for the Euclidean part of the fidelity space. Should be se, ' + 'matern, poly or expdecay')), get_option_specs('fidel_euc_matern_nu', False, 2.5, ('Specify the nu value for the matern kernel. If negative, will fit.')), get_option_specs('fidel_euc_use_same_bandwidth', False, False, ('If true, will use same bandwidth on all Euclidean fidelity dimensions. Applies ' + 'only when fidel_kernel_type is se or matern. Default=False.')), get_option_specs('fidel_int_kernel_type', False, 'se', 'Type of kernel for the fidelity space. Should be se, matern, poly or expdecay'), get_option_specs('fidel_int_matern_nu', False, 2.5, ('Specify the nu value for the matern kernel. If negative, will fit.')), get_option_specs('fidel_int_use_same_bandwidth', False, False, ('If true, will use same bandwidth on all integral fidelity dimensions. Applies ' + 'only when fidel_kernel_type is se or matern. Default=False.')), get_option_specs('fidel_disc_num_kernel_type', False, 'se', 'Type of kernel for the fidelity space. Should be se, matern, poly or expdecay'), get_option_specs('fidel_disc_num_matern_nu', False, 2.5, ('Specify the nu value for the matern kernel. If negative, will fit.')), get_option_specs('fidel_disc_num_use_same_bandwidth', False, False, ('If true, will use same bandwidth on all integral fidelity dimensions. Applies ' + 'only when fidel_kernel_type is se or matern. Default=False.')), get_option_specs('fidel_disc_kernel_type', False, 'default', 'Kernel type for discrete domains.'), get_option_specs('fidel_disc_hamming_use_same_weight', False, False, 'If true, use same weight for all dimensions of the hamming kernel.'), ] cartesian_product_mf_gp_args = cartesian_product_gp_args + basic_mf_cart_product_gp_args def get_default_kernel_type(domain_type): if domain_type == 'euclidean': return _DFLT_DOMAIN_EUC_KERNEL_TYPE elif domain_type == 'discrete_euclidean': return _DFLT_DOMAIN_EUC_KERNEL_TYPE elif domain_type == 'integral': return _DFLT_DOMAIN_INT_KERNEL_TYPE elif domain_type == 'prod_discrete': return _DFLT_DOMAIN_DISCRETE_KERNEL_TYPE elif domain_type == 'prod_discrete_numeric': return _DFLT_DOMAIN_DISCRETE_NUMERIC_KERNEL_TYPE elif domain_type == 'neural_network': return _DFLT_DOMAIN_NN_KERNEL_TYPE else: raise ValueError('Unknown domain_type: %s.'%(domain_type)) class CPGP(gp_core.GP): def __init__(self, X, Y, kernel, mean_func, noise_var, domain_lists_of_dists=None, build_posterior=True, reporter=None, handle_non_psd_kernels='project_first'): if domain_lists_of_dists is None: domain_lists_of_dists = [None] * kernel.num_kernels self.domain_lists_of_dists = domain_lists_of_dists super(CPGP, self).__init__(X, Y, kernel, mean_func, noise_var, build_posterior, reporter, handle_non_psd_kernels) def set_domain_lists_of_dists(self, domain_lists_of_dists): self.domain_lists_of_dists = domain_lists_of_dists def _child_str(self): if len(self.X) > 0: mean_str = 'mu[#0]=%0.4f, '%(self.mean_func([self.X[0]])[0]) else: mean_str = '' return mean_str + str(self.kernel)
MIT License
henriquemiranda/yambopy
qepy/projwfcxml.py
ProjwfcXML.write_proj
python
def write_proj(self,filename='proj'): np.savez(filename,proj=self.proj,weights=self.weights)
Write the projection array in a numpy file
https://github.com/henriquemiranda/yambopy/blob/41b860c47e95a0d65be2a138b0043278508caee9/qepy/projwfcxml.py#L185-L189
from __future__ import print_function, division import re import xml.etree.ElementTree as ET from numpy import array, zeros from .lattice import Path, calculate_distances from .auxiliary import * RytoeV = 13.605698066 class ProjwfcXML(object): _proj_file = 'atomic_proj.xml' def __init__(self,prefix,output_filename='projwfc.log',path='.'): self.prefix = prefix self.path = path self.datafile_xml = ET.parse( "%s/%s.save/%s"%(path, prefix, self._proj_file)).getroot() self.nkpoints = int(self.datafile_xml.findall("HEADER/NUMBER_OF_K-POINTS")[0].text.strip()) self.nbands = int(self.datafile_xml.find("HEADER/NUMBER_OF_BANDS").text) self.fermi = float(self.datafile_xml.find("HEADER/FERMI_ENERGY").text)*RytoeV self.nproj = int(self.datafile_xml.find("HEADER/NUMBER_OF_ATOMIC_WFC").text) self.weights = list(map(float,self.datafile_xml.find("WEIGHT_OF_K-POINTS").text.split())) kpoints_lines = self.datafile_xml.find("K-POINTS").text.strip().split('\n') kpoints_float = [ list(map(float, kline.split())) for kline in kpoints_lines ] self.kpoints = np.array(kpoints_float) self.eigen = self.get_eigen() self.proj = self.get_proj() try: f = open("%s/%s"%(path,output_filename),'r') except: print("The output file of projwfc.x: %s was not found"%output_filename) exit(1) states = [] for line in re.findall('state\s+\#\s+([0-9]+):\s+atom\s+([0-9]+)\s+\(([a-zA-Z]+)\s+\),\s+wfc\s+([0-9])\s+\((?:j=([0-9.]+))? ?(?:l=([0-9.]+))? ?(?:m=\s+([0-9.]+))? ?(?:m_j=([ \-0-9.]+))?',f.read()): _, iatom, atype, wfc, j, l, m, m_j = line state = {'iatom':int(iatom), 'atype':atype, 'wfc':int(wfc)} if j: j = float(j) if l: l = int(l) if m: m = int(m) if m_j: m_j = float(m_j) states.append({'iatom':int(iatom), 'atype':atype, 'wfc':int(wfc), 'j':j, 'l':l, 'm':m, 'm_j':m_j}) self.states = states f.close() def __str__(self): s = "" s += "nkpoints: %d\n"%self.nkpoints s += "nbands: %d\n"%self.nbands return s def get_indexes(self): proj = zeros([self.nkpoints,self.nproj],dtype=int) for ik in range(self.nkpoints): for ip in range(self.nproj): proj[ik,ip] = np.argmax(np.absolute(self.proj[ik,ip,:])**2) return proj def plot_eigen(self, ax, size=20, cmap=None, color='r', path=[], label_1=None, selected_orbitals=[], selected_orbitals_2=[],bandmin=0,bandmax=None,alpha=1,size_projection=False): import matplotlib.pyplot as plt import matplotlib as mpl if path: if isinstance(path,Path): path = path.get_indexes() if bandmax is None or bandmax > self.nbands: bandmax = self.nbands if cmap: color_map = plt.get_cmap(cmap) kpoints_dists = calculate_distances(self.kpoints) ticks, labels = list(zip(*path)) ax.set_xticks([kpoints_dists[t] for t in ticks]) ax.set_xticklabels(labels) ax.set_ylabel('E (eV)') for t in ticks: ax.axvline(kpoints_dists[t],c='k',lw=2) ax.axhline(0,c='k') if selected_orbitals_2: w_rel = self.get_relative_weight(selected_orbitals=selected_orbitals, selected_orbitals_2=selected_orbitals_2) for ib in range(bandmin,bandmax): eig = self.eigen[:,ib] - self.fermi if size_projection==True: cax = ax.scatter(kpoints_dists,eig,s=size[:,ib],c=w_rel[:,ib],cmap=color_map,vmin=0,vmax=1,edgecolors='none',label=label_1) else: cax = ax.scatter(kpoints_dists,eig,s=size,c=w_rel[:,ib],cmap=color_map,vmin=0,vmax=1,edgecolors='none',label=label_1) else: w_proj = self.get_weights(selected_orbitals=selected_orbitals) for ib in range(bandmin,bandmax): eig = self.eigen[:,ib] - self.fermi cax = ax.scatter(kpoints_dists,eig,s=w_proj[:,ib]*size,c=color,edgecolors='none',alpha=alpha,label=label_1) ax.set_xlim(0, max(kpoints_dists)) return cax def get_weights(self,selected_orbitals=[],bandmin=0,bandmax=None): if bandmax is None: bandmax = self.nbands w_proj = zeros([self.nkpoints,self.nbands]) for ik in range(self.nkpoints): for ib in range(bandmin,bandmax): w_proj[ik,ib] = sum(abs(self.proj[ik,selected_orbitals,ib])**2) return w_proj def get_relative_weight(self,selected_orbitals=[],selected_orbitals_2=[],bandmin=0,bandmax=None): if bandmax is None: bandmax = self.nbands w_rel = zeros([self.nkpoints,self.nbands]) for ik in range(self.nkpoints): for ib in range(bandmin,bandmax): a = sum(abs(self.proj[ik,selected_orbitals,ib])**2) b = sum(abs(self.proj[ik,selected_orbitals_2,ib])**2) w_rel[ik,ib] = a/(a+b) return w_rel def get_eigen(self): datafile_xml = self.datafile_xml eigen = [] for ik in range(self.nkpoints): eigen.append( list(map(float, self.datafile_xml.find("EIGENVALUES/K-POINT.%d/EIG"%(ik+1)).text.split() ))) self.eigen = np.array(eigen)*RytoeV return self.eigen
BSD 3-Clause New or Revised License
adalca/neurite
neurite/tf/utils/seg.py
next_label
python
def next_label(model, data_generator): batch_proc = next_pred_label(model, data_generator) return (batch_proc[2], batch_proc[3])
predict the next sample batch from the generator, and compute max labels return max_labels
https://github.com/adalca/neurite/blob/c7bb05d5dae47d2a79e0fe5a8284f30b2304d335/neurite/tf/utils/seg.py#L276-L282
import itertools import numpy as np from tqdm import tqdm_notebook as tqdm from pprint import pformat import tensorflow as tf from tensorflow import keras import tensorflow.keras.backend as K import neurite as ne import neurite.py.utils import pystrum.pynd.ndutils as nd import pystrum.pynd.patchlib as pl import pystrum.pytools.timer as timer def predict_volumes(models, data_generator, batch_size, patch_size, patch_stride, grid_size, nan_func=np.nanmedian, do_extra_vol=False, do_prob_of_true=False, verbose=False): if not isinstance(models, (list, tuple)): models = (models,) with timer.Timer('predict_volume_stack', verbose): vol_stack = predict_volume_stack(models, data_generator, batch_size, grid_size, verbose) if len(models) == 1: do_prior = len(vol_stack) == 4 else: do_prior = len(vol_stack[0]) == 4 ret = () for midx, _ in enumerate(models): stack = vol_stack if len(models) == 1 else vol_stack[midx] if do_prior: all_true, all_pred, all_vol, all_prior = stack else: all_true, all_pred, all_vol = stack all_true_label, all_pred_label = pred_to_label(all_true, all_pred) args = [patch_size, grid_size, patch_stride] label_kwargs = {'nan_func_layers': nan_func, 'nan_func_K': nan_func, 'verbose': verbose} vol_true_label = _quilt(all_true_label, *args, **label_kwargs).astype('int') vol_pred_label = _quilt(all_pred_label, *args, **label_kwargs).astype('int') ret_set = (vol_true_label, vol_pred_label) if do_extra_vol: vol_input = _quilt(all_vol, *args) ret_set += (vol_input, ) if do_prior: all_prior_label, = pred_to_label(all_prior) vol_prior_label = _quilt(all_prior_label, *args, **label_kwargs).astype('int') ret_set += (vol_prior_label, ) if do_extra_vol and do_prob_of_true: all_pp = prob_of_label(all_pred, all_true_label) pred_prob_of_true = _quilt(all_pp, *args, **label_kwargs) ret_set += (pred_prob_of_true, ) if do_prior: all_pp = prob_of_label(all_prior, all_true_label) prior_prob_of_true = _quilt(all_pp, *args, **label_kwargs) ret_set += (prior_prob_of_true, ) ret += (ret_set, ) if len(models) == 1: ret = ret[0] return ret def predict_volume_stack(models, data_generator, batch_size, grid_size, verbose=False): if not isinstance(models, (list, tuple)): models = (models,) nb_patches = np.prod(grid_size) nb_batches = ((nb_patches - 1) // batch_size) + 1 batch_gen = tqdm(range(nb_batches)) if verbose else range(nb_batches) for batch_idx in batch_gen: sample = next(data_generator) nb_vox = np.prod(sample[1].shape[1:-1]) do_prior = isinstance(sample[0], (list, tuple)) if batch_idx == 0: nb_labels = sample[1].shape[-1] all_vol = [np.zeros((nb_patches, nb_vox)) for f in models] all_true = [np.zeros((nb_patches, nb_vox * nb_labels)) for f in models] all_pred = [np.zeros((nb_patches, nb_vox * nb_labels)) for f in models] all_prior = [np.zeros((nb_patches, nb_vox * nb_labels)) for f in models] for idx, model in enumerate(models): pred = model.predict(sample[0]) assert pred.shape[0] == batch_size, "batch size mismatch. sample has batch size %d, given batch size is %d" % ( pred.shape[0], batch_size) input_batch = sample[0] if not do_prior else sample[0][0] batch_start = batch_idx * batch_size batch_end = np.minimum(batch_start + batch_size, nb_patches) batch_range = np.arange(batch_start, batch_end) batch_vox_idx = batch_end - batch_start all_vol[idx][batch_range, :] = K.batch_flatten(input_batch)[0:batch_vox_idx, :] all_true[idx][batch_range, :] = K.batch_flatten(sample[1])[0:batch_vox_idx, :] all_pred[idx][batch_range, :] = K.batch_flatten(pred)[0:batch_vox_idx, :] if do_prior: all_prior[idx][batch_range, :] = K.batch_flatten(sample[0][1])[0:batch_vox_idx, :] for idx, _ in enumerate(models): all_true[idx] = np.reshape(all_true[idx], [nb_patches, nb_vox, nb_labels]) all_pred[idx] = np.reshape(all_pred[idx], [nb_patches, nb_vox, nb_labels]) if do_prior: all_prior[idx] = np.reshape(all_prior[idx], [nb_patches, nb_vox, nb_labels]) ret = () for midx, _ in enumerate(models): if do_prior: ret += ((all_true[midx], all_pred[midx], all_vol[midx], all_prior[midx]), ) else: ret += ((all_true[midx], all_pred[midx], all_vol[midx]), ) if len(models) == 1: ret = ret[0] return ret def prob_of_label(vol, labelvol): nb_dims = np.ndim(labelvol) assert np.ndim(vol) == nb_dims + 1, "vol dimensions do not match [%d] vs [%d]" % (np.ndim(vol) - 1, nb_dims) shp = vol.shape nb_voxels = np.prod(shp[0:nb_dims]) nb_labels = shp[-1] flat_vol = np.reshape(vol, (nb_voxels, nb_labels)) rows_sums = flat_vol.sum(axis=1) flat_vol_norm = flat_vol / rows_sums[:, np.newaxis] idx = list(range(nb_voxels)) v = flat_vol_norm[idx, labelvol.flat] return np.reshape(v, labelvol.shape) def next_pred_label(model, data_generator, verbose=False): sample = next(data_generator) with timer.Timer('prediction', verbose): pred = model.predict(sample[0]) sample_input = sample[0] if not isinstance(sample[0], (list, tuple)) else sample[0][0] max_labels = pred_to_label(sample_input, pred) return (sample, pred) + max_labels
Apache License 2.0
mikevoets/jama16-retina-replication
lib/preprocess.py
resize
python
def resize(images_paths, size=299): for image_path in images_paths: image = cv2.imread(image_path) image = cv2.resize(image, (size, size)) cv2.imwrite(image_path, image, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
Function for resizing images. :param images_paths: Required. Paths to images. :param size: Optional. Size to which resize to. Defaults to 299. :return: Nothing.
https://github.com/mikevoets/jama16-retina-replication/blob/15f2af375c5a581a2002f346efc9a72f4b47f667/lib/preprocess.py#L239-L259
import tensorflow as tf import os import sys import cv2 import matplotlib matplotlib.use('agg') from pylab import array, arange, uint8 def _increase_contrast(image): copy = image.copy() maxIntensity = 255.0 x = arange(maxIntensity) phi = 1.3 theta = 1.5 y = (maxIntensity/phi)*(x/(maxIntensity/theta))**0.5 copy = (maxIntensity/phi)*(copy/(maxIntensity/theta))**2 copy = array(copy, dtype=uint8) return copy def _find_contours(image): processed = _increase_contrast(image) gray = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY) cnts = cv2.findContours( gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None if len(cnts) > 0: c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) if radius > 100: M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) return (center, radius) def _get_filename(file_path): return file_path.split("/")[-1] def _resize_and_center_fundus(image, diameter): copy = image.copy() contours = _find_contours(image) if contours is None: return None center, radius = contours x_min = max(0, int(center[0] - radius)) y_min = max(0, int(center[1] - radius)) z = int(radius*2) x_max = x_min + z y_max = y_min + z copy = copy[y_min:y_max, x_min:x_max] fx = fy = (diameter / 2) / radius copy = cv2.resize(copy, (0, 0), fx=fx, fy=fy) shape = copy.shape top = bottom = int((diameter - shape[0])/2) left = right = int((diameter - shape[1])/2) if shape[0] + top + bottom == diameter - 1: top += 1 if shape[1] + left + right == diameter - 1: left += 1 border = [top, bottom, left, right] copy = cv2.copyMakeBorder(copy, *border, borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]) return copy def _get_image_paths(images_path): return [os.path.join(images_path, fn) for fn in os.listdir(images_path)] def _resize_and_center_fundus_all(image_paths, save_path, diameter, verbosity): num_images = len(image_paths) success = 0 for i, image_path in enumerate(image_paths): if verbosity > 0: msg = "\r- Preprocessing image: {0:>6} / {1}".format( i+1, num_images) sys.stdout.write(msg) sys.stdout.flush() try: image = cv2.imread(os.path.abspath(image_path), -1) processed = _resize_and_center_fundus(image, diameter=diameter) if processed is None: print("Could not preprocess {}...".format(image_path)) else: image_filename = _get_filename(image_path) image_jpeg_filename = "{0}.jpg".format(os.path.splitext( os.path.basename(image_filename))[0]) output_path = os.path.join(save_path, image_jpeg_filename) cv2.imwrite(output_path, processed, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) success += 1 except AttributeError as e: print(e) print("Could not preprocess {}...".format(image_path)) return success def resize_and_center_fundus(save_path=None, images_path=None, image_paths=None, image_path=None, diameter=299, verbosity=1): if save_path is None: raise ValueError("Save path not specified!") save_path = os.path.abspath(save_path) if image_paths is not None: return _resize_and_center_fundus_all(image_paths=image_paths, save_path=save_path, diameter=diameter, verbosity=verbosity) elif images_path is not None: image_paths = _get_image_paths(images_path) return _resize_and_center_fundus_all(image_paths=image_paths, save_path=save_path, diameter=diameter, verbosity=verbosity) elif image_path is not None: return _resize_and_center_fundus_all(image_paths=[image_path], save_path=save_path, diameter=diameter, verbosity=verbosity)
MIT License
fxtd-odyssey/qbinder
research/pyqtConfig/config.py
_set_QPushButton
python
def _set_QPushButton(self, v): self.setChecked(v)
Set checked state of QPushButton
https://github.com/fxtd-odyssey/qbinder/blob/734fc2aaf80a495c1216b2c27530ab752279d103/research/pyqtConfig/config.py#L289-L293
from __future__ import unicode_literals from __future__ import print_function __author__ = 'timmyliang' __email__ = '820472580@qq.com' __date__ = '2020-03-09 16:19:41' from Qt.QtGui import * from Qt.QtCore import * from Qt.QtWidgets import * import os import sys import types from collections import defaultdict, OrderedDict import logging try: import xml.etree.cElementTree as et except ImportError: import xml.etree.ElementTree as et try: QVariant except NameError: QVariant = None RECALCULATE_ALL = 1 RECALCULATE_VIEW = 2 def types_MethodType(fn, handler): try: return types.MethodType(fn, handler, type(handler)) except TypeError: return types.MethodType(fn, handler) def _convert_list_type_from_XML(vs): vlist = vs.findall('ListItem') + vs.findall('ConfigListItem') l = [] for xconfig in vlist: v = xconfig.text if xconfig.get('type') in CONVERT_TYPE_FROM_XML: v = CONVERT_TYPE_FROM_XML[xconfig.get('type')](xconfig) l.append(v) return l def _convert_list_type_to_XML(co, vs): for cv in vs: c = et.SubElement(co, "ListItem") t = type(cv).__name__ c.set("type", t) c = CONVERT_TYPE_TO_XML[t](c, cv) return co def _convert_dict_type_from_XML(vs): vlist = vs.findall('DictItem') d = {} for xconfig in vlist: v = xconfig.text if xconfig.get('type') in CONVERT_TYPE_FROM_XML: v = CONVERT_TYPE_FROM_XML[xconfig.get('type')](xconfig) d[xconfig.get('key')] = v return d def _convert_dict_type_to_XML(co, vs): for k, v in vs.items(): c = et.SubElement(co, "DictItem") t = type(v).__name__ c.set("type", t) c.set("key", k) c = CONVERT_TYPE_TO_XML[t](c, v) return co def _apply_text_str(co, s): co.text = str(s) return co CONVERT_TYPE_TO_XML = { 'str': _apply_text_str, 'unicode': _apply_text_str, 'int': _apply_text_str, 'float': _apply_text_str, 'bool': _apply_text_str, 'list': _convert_list_type_to_XML, 'tuple': _convert_list_type_to_XML, 'dict': _convert_dict_type_to_XML, 'NoneType': _apply_text_str, } CONVERT_TYPE_FROM_XML = { 'str': lambda x: str(x.text), 'unicode': lambda x: str(x.text), 'int': lambda x: int(x.text), 'float': lambda x: float(x.text), 'bool': lambda x: bool(x.text.lower() == 'true'), 'list': _convert_list_type_from_XML, 'tuple': _convert_list_type_from_XML, 'dict': _convert_dict_type_from_XML, 'NoneType': lambda x: None, } def build_dict_mapper(mdict): rdict = {v: k for k, v in mdict.items()} return ( lambda x: mdict[x] if x in mdict else x, lambda x: rdict[x] if x in rdict else x, ) try: unicode except: def unicode(s): if isinstance(s, bytes): return s.decode('utf-8') else: return s try: basestring except: basestring = str def build_tuple_mapper(mlist): mdict = {k: v for k, v in mlist} rdict = {v: k for k, v in mlist} return ( lambda x: mdict[x] if x in mdict else x, lambda x: rdict[x] if x in rdict else x, ) def _get_QComboBox(self): return self._get_map(self.currentText()) def _set_QComboBox(self, v): self.setCurrentIndex(self.findText(unicode(self._set_map(v)))) def _event_QComboBox(self): return self.currentIndexChanged def _get_QCheckBox(self): return self.isChecked() def _set_QCheckBox(self, v): self.setChecked(v) def _event_QCheckBox(self): return self.stateChanged def _get_QAction(self): return self.isChecked() def _set_QAction(self, v): self.setChecked(v) def _event_QAction(self): return self.toggled def _get_QActionGroup(self): if self.checkedAction(): return self.actions().index(self.checkedAction()) else: return None def _set_QActionGroup(self, v): self.actions()[v].setChecked(True) def _event_QActionGroup(self): return self.triggered def _get_QPushButton(self): return self.isChecked()
MIT License
beanbaginc/django-evolution
django_evolution/utils/migrations.py
create_pre_migrate_state
python
def create_pre_migrate_state(executor): assert supports_migrations, 'This cannot be called on Django 1.6 or earlier.' if django_version >= (1, 10): return executor._create_project_state(with_applied_migrations=True) return None
Create state needed before migrations are applied. The return value is dependent on the version of Django. Args: executor (django.db.migrations.executor.MigrationExecutor): The migration executor that will handle the migrations. Returns: django.db.migrations.state.ProjectState: The state needed for applying migrations.
https://github.com/beanbaginc/django-evolution/blob/fb76e44a2361a69a440dca086c0cc67ac6a4300d/django_evolution/utils/migrations.py#L788-L809
from __future__ import unicode_literals from importlib import import_module import django try: from django.core.management.sql import (emit_post_migrate_signal, emit_pre_migrate_signal) from django.db.migrations import Migration from django.db.migrations.executor import (MigrationExecutor as DjangoMigrationExecutor) from django.db.migrations.loader import (MigrationLoader as DjangoMigrationLoader) from django.db.migrations.recorder import MigrationRecorder from django.db.migrations.state import ModelState emit_post_sync_signal = None emit_pre_sync_signal = None except ImportError: from django.core.management.sql import (emit_post_sync_signal, emit_pre_sync_signal) DjangoMigrationExecutor = object DjangoMigrationLoader = object Migration = None MigrationRecorder = None ModelState = None emit_post_migrate_signal = None emit_pre_migrate_signal = None from django_evolution.compat import six from django_evolution.compat.models import get_model from django_evolution.errors import (DjangoEvolutionSupportError, MigrationConflictsError, MigrationHistoryError) from django_evolution.signals import applied_migration, applying_migration from django_evolution.support import supports_migrations from django_evolution.utils.apps import get_app_name django_version = django.VERSION[:2] class MigrationList(object): @classmethod def from_app_sig(cls, app_sig): return cls.from_names(app_label=app_sig.app_id, migration_names=app_sig.applied_migrations) @classmethod def from_names(cls, app_label, migration_names): migration_list = cls() if migration_names: for name in migration_names: migration_list.add_migration_info(app_label=app_label, name=name) return migration_list @classmethod def from_database(cls, connection, app_label=None): recorder = MigrationRecorder(connection) recorder.ensure_schema() migration_list = cls() queryset = recorder.migration_qs if app_label: queryset = queryset.filter(app=app_label) for recorded_migration in queryset.all(): migration_list.add_recorded_migration(recorded_migration) return migration_list def __init__(self): self._by_app_label = {} self._by_id = {} def has_migration_info(self, app_label, name): return (app_label, name) in self._by_id def add_migration_targets(self, targets): for app_label, name in targets: self.add_migration_info(app_label=app_label, name=name) def add_migration(self, migration): assert Migration is not None assert isinstance(migration, Migration) self.add_migration_info(app_label=migration.app_label, name=migration.name, migration=migration) def add_recorded_migration(self, recorded_migration): assert MigrationRecorder is not None assert isinstance(recorded_migration, MigrationRecorder.Migration) self.add_migration_info(app_label=recorded_migration.app, name=recorded_migration.name, recorded_migration=recorded_migration) def add_migration_info(self, app_label, name, migration=None, recorded_migration=None): info = { 'app_label': app_label, 'migration': migration, 'name': name, 'recorded_migration': recorded_migration, } self._by_app_label.setdefault(app_label, []).append(info) self._by_id[(app_label, name)] = info def update(self, other): for other_info in other: app_label = other_info['app_label'] name = other_info['name'] info = self._by_id.get((app_label, name)) if info is None: self.add_migration_info(app_label=app_label, name=name) else: for key in ('migration', 'recorded_migration'): if info[key] is None: info[key] = other_info[key] def to_targets(self): return set( (info['app_label'], info['name']) for info in self ) def get_app_labels(self): return list(sorted(six.iterkeys(self._by_app_label))) def clone(self): new_migration_list = MigrationList() for info in self: new_migration_list.add_migration_info(**info) return new_migration_list def __bool__(self): return bool(self._by_id) def __len__(self): return len(self._by_id) def __eq__(self, other): if other is None or not isinstance(other, MigrationList): return False return self._by_id == other._by_id def __iter__(self): for app_label, info_list in sorted(six.iteritems(self._by_app_label), key=lambda pair: pair[0]): for info in info_list: yield info def __add__(self, other): new_migration_list = self.clone() new_migration_list.update(other) return new_migration_list def __sub__(self, other): new_migration_list = MigrationList() for info in self: if not other.has_migration_info(app_label=info['app_label'], name=info['name']): new_migration_list.add_migration_info(**info) return new_migration_list def __repr__(self): return '<MigrationList%s>' % list(self) class MigrationLoader(DjangoMigrationLoader): def __init__(self, connection, custom_migrations=None, *args, **kwargs): self._custom_migrations = custom_migrations or MigrationList() self._applied_migrations = None self._lock_migrations = False self.extra_applied_migrations = MigrationList() super(MigrationLoader, self).__init__(connection, *args, **kwargs) @property def applied_migrations(self): extra_migrations = self.extra_applied_migrations if isinstance(self._applied_migrations, dict): applied_migrations = self._applied_migrations.copy() for info in extra_migrations: app_label = info['app_label'] name = info['name'] recorded_migration = info['recorded_migration'] if recorded_migration is None: recorded_migration = MigrationRecorder.Migration( app=app_label, name=name, applied=True) applied_migrations[(app_label, name)] = recorded_migration elif isinstance(self._applied_migrations, set): applied_migrations = self._applied_migrations | set( (info['app_label'], info['name']) for info in extra_migrations ) else: raise DjangoEvolutionSupportError( 'Migration.applied_migrations is an unexpected type (%s)' % type(self._applied_migrations)) return applied_migrations @applied_migrations.setter def applied_migrations(self, value): if value is not None and not isinstance(value, (dict, set)): raise DjangoEvolutionSupportError( 'Migration.applied_migrations was set to an unexpected type ' '(%s)' % type(value)) if value is None: self._applied_migrations = None else: if django_version >= (3, 0): self._applied_migrations = dict(value) else: self._applied_migrations = value def build_graph(self, reload_migrations=True): if not reload_migrations: self._lock_migrations = True try: super(MigrationLoader, self).build_graph() finally: self._lock_migrations = False def load_disk(self): if self._lock_migrations: return super(MigrationLoader, self).load_disk() for info in self._custom_migrations: migration = info['migration'] assert migration is not None app_label = info['app_label'] name = info['name'] self.migrated_apps.add(app_label) self.unmigrated_apps.discard(app_label) self.disk_migrations[(app_label, name)] = migration class MigrationExecutor(DjangoMigrationExecutor): def __init__(self, connection, custom_migrations=None, signal_sender=None): self._signal_sender = signal_sender or self super(MigrationExecutor, self).__init__( connection=connection, progress_callback=self._on_progress) self.loader = MigrationLoader(connection=connection, custom_migrations=custom_migrations) def run_checks(self): if hasattr(self.loader, 'check_consistent_history'): from django.db.migrations.exceptions import InconsistentMigrationHistory try: self.loader.check_consistent_history(self.connection) except InconsistentMigrationHistory as e: raise MigrationHistoryError(six.text_type(e)) conflicts = self.loader.detect_conflicts() if conflicts: raise MigrationConflictsError(conflicts) def _on_progress(self, action, migration=None, *args, **kwargs): if action == 'apply_start': applying_migration.send(sender=self._signal_sender, migration=migration) elif action == 'apply_success': applied_migration.send(sender=self._signal_sender, migration=migration) def has_migrations_module(app): app_name = get_app_name(app) try: import_module('%s.migrations' % app_name) return True except ImportError: return False def record_applied_migrations(connection, migrations): assert supports_migrations, 'This cannot be called on Django 1.6 or earlier.' recorder = MigrationRecorder(connection) recorder.ensure_schema() recorder.migration_qs.bulk_create( recorder.Migration(app=info['app_label'], name=info['name']) for info in migrations ) def unrecord_applied_migrations(connection, app_label, migration_names=None): assert supports_migrations, 'This cannot be called on Django 1.6 or earlier.' recorder = MigrationRecorder(connection) recorder.ensure_schema() queryset = recorder.migration_qs.filter(app=app_label) if migration_names: queryset = queryset.filter(name__in=migration_names) queryset.delete() def filter_migration_targets(targets, app_labels=None, exclude=None): if app_labels is not None: if not isinstance(app_labels, set): app_labels = set(app_labels) targets = ( target for target in targets if target[0] in app_labels ) if exclude: if not isinstance(exclude, set): exclude = set(exclude) targets = ( target for target in targets if target not in exclude ) return list(targets) def is_migration_initial(migration): initial = getattr(migration, 'initial', None) if initial is False: return False elif initial is None: for dep_app_label, dep_app_name in migration.dependencies: if dep_app_label == migration.app_label: return False return True
BSD 3-Clause New or Revised License
jeffersonheard/sondra
sondra/document/__init__.py
Document.suite
python
def suite(self): return self.application.suite
The suite instance this document's application is attached to.
https://github.com/jeffersonheard/sondra/blob/da9159924824aeb2dd3db7b72cefa40c197bc7cb/sondra/document/__init__.py#L214-L216
import json import logging from abc import ABCMeta from collections import OrderedDict from collections.abc import MutableMapping from copy import deepcopy import jsonschema from sondra.api.expose import method_schema, expose_method_explicit from sondra.document.schema_parser import ListHandler, ForeignKey try: from shapely.geometry import mapping, shape from shapely.geometry.base import BaseGeometry except: logging.warning("Shapely not imported. Geometry objects will not be supported directly.") from sondra import help from sondra.utils import mapjson, split_camelcase, deprecated from sondra.schema import S, merge from sondra.api.ref import Reference __all__ = ( "Document", "DocumentMetaclass" ) def _reference(v): if isinstance(v, Document): if not v.id: v.save() return v.url else: return v class DocumentMetaclass(ABCMeta): def __new__(mcs, name, bases, attrs): definitions = {} schema = attrs.get('schema', S.object()) for base in bases: if hasattr(base, "definitions") and base.definitions is not None: definitions = merge(deepcopy(base.definitions), definitions) if hasattr(base, "schema") and base.schema is not None: schema = merge(deepcopy(base.schema), schema) if hasattr(base, "__doc__"): docstring = base.__doc__ if "definitions" in attrs: merge(attrs['definitions'], definitions) else: attrs['definitions'] = definitions if 'title' not in attrs or (attrs['title'] is None): if 'title' in schema: attrs['title'] = schema['title'] else: attrs['title'] = split_camelcase(name) attrs['schema'] = schema return super().__new__(mcs, name, bases, attrs) def __init__(cls, name, bases, nmspc): cls.exposed_methods = {} for base in bases: if hasattr(base, 'exposed_methods'): cls.exposed_methods.update(base.exposed_methods) for name, method in (n for n in nmspc.items() if hasattr(n[1], 'exposed')): cls.exposed_methods[name] = method cls.schema['methods'] = [m.slug for m in cls.exposed_methods.values()] cls.schema['definitions'] = nmspc.get('definitions', {}) cls.schema['template'] = nmspc.get('template','{id}') cls.defaults = {k: cls.schema['properties'][k]['default'] for k in cls.schema['properties'] if 'default' in cls.schema['properties'][k]} super(DocumentMetaclass, cls).__init__(name, bases, nmspc) class Document(MutableMapping, metaclass=DocumentMetaclass): title = None defaults = {} template = "${id}" display_name_template = "{id}" processors = [] specials = {} store_nulls = set() debug_validate_on_retrieval = False def constructor(self, obj): if self.collection.primary_key in obj: self._url = '/'.join((self.collection.url, _reference(obj[self.collection.primary_key]))) if '_url' in obj: del obj['_url'] if '_display_name' in obj: del obj['_display_name'] if obj: for k, v in obj.items(): try: self[k] = v except Exception as e: raise KeyError(k, str(e)) for k in self.defaults: if k not in self: if callable(self.defaults[k]): try: self[k] = self.defaults[k]() except: self[k] = self.defaults[k](self.suite) else: self[k] = self.defaults[k] for k, vh in self.specials.items(): if k not in self: if vh.has_default: self[k] = vh.default_value() for p in self.processors: p.run_on_constructor(self) if self.debug_validate_on_retrieval and self.saved and self.suite.debug: self.validate() def __init__(self, obj, collection=None, from_db=False, metadata=None): self.collection = collection self.saved = from_db self.metadata = metadata or {} self.obj = OrderedDict() if self.collection is not None: self.schema = self.collection.schema else: self.schema = mapjson(lambda x: x(context=self) if callable(x) else x, self.schema) self._url = None self.constructor(obj) def __str__(self): return self.template.format(**self.obj) def refresh(self): new = self.collection[self.id] self.obj = new.obj return self @property def application(self): return self.collection.application @property
Apache License 2.0
ucam-smt/sgnmt
cam/sgnmt/predictors/tokenization.py
CombinedState._get_token_score
python
def _get_token_score(self, token, predictor): return utils.common_get(self.posterior, token, predictor.get_unk_probability(self.posterior))
Look up ``token`` in ``self.posterior``.
https://github.com/ucam-smt/sgnmt/blob/c663ec7b251552e36b6b4f992f0ac21aad87cb7b/cam/sgnmt/predictors/tokenization.py#L148-L152
import copy import logging from cam.sgnmt import utils from cam.sgnmt.misc.trie import SimpleTrie from cam.sgnmt.predictors.core import UnboundedVocabularyPredictor, Predictor from cam.sgnmt.utils import NEG_INF, common_get EPS_ID = 0 class CombinedState(object): def __init__(self, fst_node, pred_state, posterior, unconsumed = [], pending_score = 0.0): self.fst_node = fst_node self.pred_state = pred_state self.posterior = posterior self.unconsumed = list(unconsumed) self.pending_score = pending_score def traverse_fst(self, trans_fst, char): ret = [] self._dfs(trans_fst, ret, self.fst_node, char, self.unconsumed) return ret def _dfs(self, trans_fst, acc, root_node, char, cur_unconsumed): for arc in trans_fst.arcs(root_node): next_unconsumed = list(cur_unconsumed) if arc.olabel != EPS_ID: next_unconsumed.append(arc.olabel) if arc.ilabel == EPS_ID: self._dfs(trans_fst, acc, arc.nextstate, char, next_unconsumed) elif arc.ilabel == char: acc.append(CombinedState(arc.nextstate, self.pred_state, self.posterior, next_unconsumed, self.pending_score)) def score(self, token, predictor): if token and self.unconsumed: self.consume_all(predictor) s = self.pending_score if token: s += self._get_token_score(token, predictor) return s def consume_all(self, predictor): if not self.unconsumed: return if self.posterior is None: self.update_posterior(predictor) predictor.set_state(copy.deepcopy(self.pred_state)) for token in self.unconsumed: self.pending_score += self._get_token_score(token, predictor) predictor.consume(token) self.posterior = predictor.predict_next() self.pred_state = copy.deepcopy(predictor.get_state()) self.unconsumed = [] def consume_single(self, predictor): if not self.unconsumed: return if not self.posterior is None: self.pending_score += self._get_token_score(self.unconsumed[0], predictor) self.posterior = None
Apache License 2.0
sarugaku/vistir
tasks/__init__.py
full_release
python
def full_release(ctx, type_, repo, prebump=PREBUMP, yes=False): if prebump not in REL_TYPES: raise ValueError(f"{type_} not in {REL_TYPES}") prebump = REL_TYPES.index(prebump) version = bump_version(ctx, type_, log=True) tag_release(version, yes=yes) ctx.run(f"python setup.py sdist bdist_wheel") dist_pattern = f'{PACKAGE_NAME.replace("-", "[-_]")}-*' artifacts = list(ROOT.joinpath("dist").glob(dist_pattern)) filename_display = "\n".join(f" {a}" for a in artifacts) print(f"[release] Will upload:\n{filename_display}") if not yes: try: input("[release] Release ready. ENTER to upload, CTRL-C to abort: ") except KeyboardInterrupt: print("\nAborted!") return arg_display = " ".join(f'"{n}"' for n in artifacts) ctx.run(f'twine upload --repository="{repo}" {arg_display}') version = _prebump(version, prebump) _write_version(version) ctx.run(f'git commit -am "Prebump to {version}"')
Make a new release.
https://github.com/sarugaku/vistir/blob/834be8ea29d032e07763ff3fce020de0ed49997e/tasks/__init__.py#L222-L254
import datetime import pathlib import re import shutil import subprocess import time import invoke import parver from towncrier._builder import find_fragments, render_fragments, split_fragments from towncrier._settings import load_config def _get_git_root(ctx): return pathlib.Path( ctx.run("git rev-parse --show-toplevel", hide=True).stdout.strip() ) def _get_branch(ctx): return ctx.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() ROOT = pathlib.Path(__file__).resolve().parent.parent PACKAGE_NAME = "vistir" INIT_PY = ROOT.joinpath("src", PACKAGE_NAME, "__init__.py") @invoke.task() def clean(ctx): dist = ROOT.joinpath("dist") build = ROOT.joinpath("build") print("[clean] Removing dist and build dirs") if dist.exists(): shutil.rmtree(dist.as_posix()) if build.exists(): shutil.rmtree(build.as_posix()) def _read_version(): out = subprocess.check_output(["git", "tag"], encoding="ascii") try: version = max( parver.Version.parse(v).normalize() for v in (line.strip() for line in out.split("\n")) if v ) except ValueError: version = parver.Version.parse("0.0.0") return version def _read_text_version(): lines = INIT_PY.read_text().splitlines() match = next(iter(line for line in lines if line.startswith("__version__")), None) if match is not None: _, _, version_text = match.partition("=") version_text = version_text.strip().strip('"').strip("'") version = parver.Version.parse(version_text).normalize() return version else: return _read_version() def _write_version(v): lines = [] with INIT_PY.open() as f: for line in f: if line.startswith("__version__ = "): line = f"__version__ = {repr(str(v))}\n".replace("'", '"') lines.append(line) with INIT_PY.open("w", newline="\n") as f: f.write("".join(lines)) def _render_log(): config = load_config(ROOT) definitions = config["types"] fragments, fragment_filenames = find_fragments( pathlib.Path(config["directory"]).absolute(), config["sections"], None, definitions, ) rendered = render_fragments( pathlib.Path(config["template"]).read_text(encoding="utf-8"), config["issue_format"], split_fragments(fragments, definitions), definitions, config["underlines"][1:], False, ) return rendered REL_TYPES = ("major", "minor", "patch") def _bump_release(version, type_, log=False): if type_ not in REL_TYPES: raise ValueError(f"{type_} not in {REL_TYPES}") index = REL_TYPES.index(type_) current_version = version.base_version() if version.is_prerelease and type_ == "patch": next_version = current_version else: next_version = current_version.bump_release(index=index) if log: print(f"[bump] {version} -> {next_version}") print(f"{next_version}") return next_version def _prebump(version, prebump, log=False): next_version = version.bump_release(index=prebump).bump_dev() if log: print(f"[bump] {version} -> {next_version}") print(f"{next_version}") return next_version PREBUMP = "patch" @invoke.task(pre=[clean]) def build(ctx): ctx.run("python setup.py sdist bdist_wheel") @invoke.task() def get_next_version(ctx, type_="patch", log=False): version = _read_version() if type_ in ("dev", "pre"): idx = REL_TYPES.index("patch") new_version = _prebump(version, idx, log=log) else: new_version = _bump_release(version, type_, log=log) return new_version @invoke.task() def bump_version(ctx, type_="patch", log=False, dry_run=False): new_version = get_next_version(ctx, type_, log=log) if not dry_run: _write_version(new_version) return new_version @invoke.task() def generate_news(ctx, yes=False, dry_run=False): command = "towncrier" if dry_run: command = f"{command} --draft" elif yes: command = f"{command} --yes" ctx.run(command) @invoke.task() def get_changelog(ctx): changelog = _render_log() print(changelog) return changelog @invoke.task(optional=["version", "type_"]) def tag_release(ctx, version=None, type_="patch", yes=False, dry_run=False): if version is None: version = bump_version(ctx, type_, log=not dry_run, dry_run=dry_run) else: _write_version(version) tag_content = get_changelog(ctx) generate_news(ctx, yes=yes, dry_run=dry_run) git_commit_cmd = f'git commit -am "Release {version}"' tag_content = tag_content.replace('"', '\\"') git_tag_cmd = f'git tag -a {version} -m "Version {version}\n\n{tag_content}"' if dry_run: print("Would run commands:") print(f" {git_commit_cmd}") print(f" {git_tag_cmd}") else: ctx.run(git_commit_cmd) ctx.run(git_tag_cmd) @invoke.task(optional=["version", "type_"]) def release(ctx, version=None, type_="patch", yes=False, dry_run=False): if version is None: version = bump_version(ctx, type_, log=not dry_run, dry_run=dry_run) else: _write_version(version) tag_content = get_changelog(ctx) current_branch = _get_branch(ctx) generate_news(ctx, yes=yes, dry_run=dry_run) git_commit_cmd = f'git commit -am "Release {version}"' git_tag_cmd = f'git tag -a {version} -m "Version {version}\n\n{tag_content}"' git_push_cmd = f"git push origin {current_branch}" git_push_tags_cmd = "git push --tags" if dry_run: print("Would run commands:") print(f" {git_commit_cmd}") print(f" {git_tag_cmd}") print(f" {git_push_cmd}") print(f" {git_push_tags_cmd}") else: ctx.run(git_commit_cmd) ctx.run(git_tag_cmd) ctx.run(git_push_cmd) print("Waiting 5 seconds before pushing tags...") time.sleep(5) ctx.run(git_push_tags_cmd) @invoke.task(pre=[clean])
ISC License
futureag/mvp
MVP/python/__pycache__/LogSensorsExtra.py
LogSensorExtra.getNDIRCO2Obsv
python
def getNDIRCO2Obsv(self, test=False): status_qualifier = 'Success' co2 = 0 temp = 0 rh = 0 try: from NDIR import Sensor sensor = Sensor() sensor.begin(test) co2 = sensor.getCO2(test) if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Canopy', 'Air', 'CO2', "{:3.1f}".format(co2), 'ppm', 'NDIR', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("NDIR Canopy CO2", status_qualifier, co2)) except Exception as e: status_qualifier = 'Failure' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Canopy', 'Air', 'CO2', '', 'ppm', 'NDIR', status_qualifier,str(e)]) self._logger.error("{}, {}".format("NDIR Canopy CO2", e))
CO2 from the canopy (NDIR sensor) Args: self: test: Returns: None: Raises: None
https://github.com/futureag/mvp/blob/e190927abb22eb9ad4e8d0ef9a2a35e71ebab7f5/MVP/python/__pycache__/LogSensorsExtra.py#L268-L300
from oneWireTemp import * from TSL2561 import TSL2561 from EC import EC from CCS811 import CCS811, SLAVE from CouchUtil import saveList from LogUtil import get_logger from scd30 import SCD30 class LogSensorExtra(object): def __init__(self): self._logger = get_logger('LogSensorExtra') self._activity_type = "Environment_Observation" def getAirAmbientTempObsv(self, test=False): try: temp = getTempC(ambientTemp) status_qualifier = 'Success' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Ambient', 'Air', 'Temperature', "{:10.1f}".format(temp), 'Centigrade', 'DS18B20_1', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("Ambient Temp", status_qualifier, temp)) except Exception as e: status_qualifier = 'Failure' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Ambient', 'Air', 'Temperature', '', 'Centigrade', 'DS18B20_1', status_qualifier, str(e)]) self._logger.error("{}, {}".format("Ambient Temp", e)) def getAirBoxTempObsv(self, test=False): try: temp = getTempC(boxTemp) status_qualifier = 'Success' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Ambient', 'Air', 'Temperature', "{:10.1f}".format(temp), 'Centigrade', 'DS18B20_2', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("Box Air Temp", status_qualifier, temp)) except Exception as e: status_qualifier = 'Failure' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Ambient', 'Air', 'Temperature', '', 'Centigrade', 'DS18B20_2', status_qualifier, str(e)]) self._logger.error("{}, {}".format("Box Air Temp", e)) def getAirTopTempObsv(self, test=False): try: temp = getTempC(topTemp) status_qualifier = 'Success' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Top', 'Air', 'Temperature', "{:10.1f}".format(temp), 'Centigrade', 'DS18B20_3', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("Top Air Temp", status_qualifier, temp)) except Exception as e: status_qualifier = 'Failure' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Top', 'Air', 'Temperature', '', 'Centigrade', 'DS18B20_3', status_qualifier, str(e)]) self._logger.error("{}, {}".format("Top Air Temp", e)) def getNutrientReservoirTempObsv(self, test=False): try: temp = getTempC(reservoirTemp) status_qualifier = 'Success' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Reservoir', 'Air', 'Temperature', "{:10.1f}".format(temp), 'Centigrade', 'DS18B20_4', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("Reservoir Temp", status_qualifier, temp)) except Exception as e: status_qualifier = 'Failure' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Reservoir', 'Air', 'Temperature', '', 'Centigrade', 'DS18B20_4', status_qualifier, str(e)]) self._logger.error("{}, {}".format("Reservoir Temp", e)) def getLightCanopyLUXObsv(self, test=False): lx = TSL2561() try: lux = lx.getLux() status_qualifier = 'Success' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Canopy', 'Light', 'LUX', "{:3.1f}".format(lux), 'lux', 'TSL2561', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("Canopy LUX", status_qualifier, lux)) except Exception as e: status_qualifier = 'Failure' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Canopy', 'Light', 'LUX', '', 'lux', 'TSL2561', status_qualifier,str(e)]) self._logger.error("{}, {}".format("Canopy LUX", e)) def getNutrientReservoirECObsv(self, test=False): try: s = EC() ec = s.getEC() status_qualifier = 'Success' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Reservoir', 'Nutrient', 'EC', "{:3.1f}".format(ec), 'EC', 'EC', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("Reservoir EC", status_qualifier, ec)) except Exception as e: status_qualifier = 'Failure' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Reservoir', 'Nutrient', 'EC', '', 'EC', 'EC', status_qualifier,str(e)]) self._logger.error("{}, {}".format("Reservoir Depth", e)) def getAirCanopyCO2Obsv(self, test=False): status_qualifier = 'Success' co2 = 0 temp = 0 rh = 0 try: sensor = SCD30() sensor.start_periodic_measurement(test) co2, temp, rh = sensor.get_data(test) if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Canopy', 'Air', 'CO2', "{:3.1f}".format(co2), 'ppm', 'SCD30', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("Canopy CO2", status_qualifier, co2)) saveList([self._activity_type, '', 'Canopy', 'Air', 'Temperature', "{:3.1f}".format(temp), 'ppm', 'SCD30', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("Canopy Temperature", status_qualifier, temp)) saveList([self._activity_type, '', 'Canopy', 'Air', 'Humidity', "{:3.1f}".format(rh), 'ppm', 'SCD30', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("Canopy Humidity", status_qualifier, rh)) except Exception as e: status_qualifier = 'Failure' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Canopy', 'Air', 'CO2', '', 'ppm', 'SCD30', status_qualifier,str(e)]) self._logger.error("{}, {}".format("Canopy CO2", e)) saveList([self._activity_type, '', 'Canopy', 'Air', 'Temperature', '', 'c', 'SCD30', status_qualifier,str(e)]) self._logger.error("{}, {}".format("Canopy CO2", e)) saveList([self._activity_type, '', 'Canopy', 'Air', 'Humidity', '', 'percent', 'SCD30', status_qualifier,str(e)]) self._logger.error("{}, {}".format("Canopy Humidity", e)) def getSecondCO2(self, test=False): try: sensor = CCS811(SLAVE) co2 = sensor.get_co2() status_qualifier = 'Success' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Canopy', 'Air', 'CO2', "{:3.1f}".format(co2), 'ppm', 'CCS811', status_qualifier,'']) self._logger.debug("{}, {}, {:10.1f}".format("Alt CO2", status_qualifier, co2)) except Exception as e: status_qualifier = 'Failure' if test: status_qualifier = 'Test' saveList([self._activity_type, '', 'Canopy', 'Air', 'CO2','', 'ppm', 'CCS811', status_qualifier,str(e)]) self._logger.error("{}, {}".format("Alt CO2", e) )
MIT License
reactivex/rxpy
rx/disposable/multipleassignmentdisposable.py
MultipleAssignmentDisposable.dispose
python
def dispose(self): old = None with self.lock: if not self.is_disposed: self.is_disposed = True old = self.current self.current = None if old is not None: old.dispose()
Disposes the underlying disposable as well as all future replacements.
https://github.com/reactivex/rxpy/blob/e920d4cb33a8c4ba046ec01c1dd753c23944b213/rx/disposable/multipleassignmentdisposable.py#L34-L47
from threading import RLock from rx.core.typing import Disposable class MultipleAssignmentDisposable(Disposable): def __init__(self): self.current = None self.is_disposed = False self.lock = RLock() super().__init__() def get_disposable(self): return self.current def set_disposable(self, value): with self.lock: should_dispose = self.is_disposed if not should_dispose: self.current = value if should_dispose and value is not None: value.dispose() disposable = property(get_disposable, set_disposable)
MIT License
frank-zyw/chinese-text-classification
bert/transformers/tokenization_utils.py
PreTrainedTokenizer._prepare_for_model
python
def _prepare_for_model( self, ids: List[int], pair_ids: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, return_tensors: Optional[str] = None, prepend_batch_axis: bool = False, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_lengths: bool = False, verbose: bool = True, ) -> BatchEncoding: pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ids, pair_ids, overflowing_tokens = self.truncate_sequences( ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["num_truncated_tokens"] = total_len - max_length if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else []) encoded_inputs["input_ids"] = sequence if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length and verbose: logger.warning( "Token indices sequence length is longer than the specified maximum sequence length " "for this model ({} > {}). Running this sequence through the model will result in " "indexing errors".format(len(ids), self.model_max_length) ) encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, return_attention_mask=return_attention_mask, ) if return_lengths: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: ids: list of tokenized input ids. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods.
https://github.com/frank-zyw/chinese-text-classification/blob/546addea8d96d93b182cffcdcb7dda8ff9d53d55/bert/transformers/tokenization_utils.py#L530-L625
import itertools import logging import re from typing import List, Optional, Tuple, Union from .file_utils import add_end_docstrings from .tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, BatchEncoding, EncodedInput, EncodedInputPair, PaddingStrategy, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TensorType, TextInput, TextInputPair, TruncationStrategy, ) logger = logging.getLogger(__name__) class PreTrainedTokenizer(PreTrainedTokenizerBase): def __init__(self, **kwargs): super().__init__(**kwargs) self.added_tokens_encoder = {} self.unique_added_tokens_encoder = set() self.added_tokens_decoder = {} @property def is_fast(self) -> bool: return False @property def vocab_size(self) -> int: raise NotImplementedError def get_vocab(self): raise NotImplementedError() def __len__(self): return self.vocab_size + len(self.added_tokens_encoder) def add_tokens(self, new_tokens: Union[str, List[str]]) -> int: if not new_tokens: return 0 if not isinstance(new_tokens, list): new_tokens = [new_tokens] tokens_to_add = [] for token in new_tokens: assert isinstance(token, str) if self.init_kwargs.get("do_lower_case", False) and token not in self.all_special_tokens: token = token.lower() if ( token != self.unk_token and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token) and token not in tokens_to_add ): tokens_to_add.append(token) if self.verbose: logger.info("Adding %s to the vocabulary", token) added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add)) added_tok_decoder = {v: k for k, v in added_tok_encoder.items()} self.added_tokens_encoder.update(added_tok_encoder) self.unique_added_tokens_encoder = set(self.added_tokens_encoder.keys()).union(set(self.all_special_tokens)) self.added_tokens_decoder.update(added_tok_decoder) return len(tokens_to_add) def num_special_tokens_to_add(self, pair=False): token_ids_0 = [] token_ids_1 = [] return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None)) def tokenize(self, text: TextInput, **kwargs): all_special_tokens = self.all_special_tokens text = self.prepare_for_tokenization(text, **kwargs) def lowercase_text(t): escaped_special_toks = [re.escape(s_tok) for s_tok in all_special_tokens] pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)" return re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), t) if self.init_kwargs.get("do_lower_case", False): text = lowercase_text(text) def split_on_token(tok, text): result = [] split_text = text.split(tok) for i, sub_text in enumerate(split_text): sub_text = sub_text.rstrip() if i == 0 and not sub_text: result += [tok] elif i == len(split_text) - 1: if sub_text: result += [sub_text] else: pass else: if sub_text: result += [sub_text] result += [tok] return result def split_on_tokens(tok_list, text): if not text.strip(): return [] if not tok_list: return self._tokenize(text) tokenized_text = [] text_list = [text] for tok in tok_list: tokenized_text = [] for sub_text in text_list: if sub_text not in self.unique_added_tokens_encoder: tokenized_text += split_on_token(tok, sub_text) else: tokenized_text += [sub_text] text_list = tokenized_text return list( itertools.chain.from_iterable( ( self._tokenize(token) if token not in self.unique_added_tokens_encoder else [token] for token in tokenized_text ) ) ) added_tokens = self.unique_added_tokens_encoder tokenized_text = split_on_tokens(added_tokens, text) return tokenized_text def _tokenize(self, text, **kwargs): raise NotImplementedError def convert_tokens_to_ids(self, tokens): if tokens is None: return None if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) return ids def _convert_token_to_id_with_added_voc(self, token): if token is None: return None if token in self.added_tokens_encoder: return self.added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError def _encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_pretokenized: tokens = list( itertools.chain( *( self.tokenize(t, add_special_tokens=False, add_prefix_space=True, **kwargs) for t in text ) ) ) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: raise ValueError( f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers." "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) first_ids = get_input_ids(text) second_ids = get_input_ids(text_pair) if text_pair is not None else None return self._prepare_for_model( first_ids, pair_ids=second_ids, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, verbose=verbose, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_masks: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_masks: bool = False, return_offsets_mapping: bool = False, return_lengths: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_pretokenized: tokens = list( itertools.chain( *( self.tokenize(t, add_special_tokens=False, add_prefix_space=True, **kwargs) for t in text ) ) ) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: raise ValueError( "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers." "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) input_ids = [] for ids_or_pair_ids in batch_text_or_text_pairs: if not isinstance(ids_or_pair_ids, (list, tuple)): ids, pair_ids = ids_or_pair_ids, None elif is_pretokenized and not isinstance(ids_or_pair_ids[0], (list, tuple)): ids, pair_ids = ids_or_pair_ids, None else: ids, pair_ids = ids_or_pair_ids first_ids = get_input_ids(ids) second_ids = get_input_ids(pair_ids) if pair_ids is not None else None input_ids.append((first_ids, second_ids)) batch_outputs = self._batch_prepare_for_model( input_ids, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, return_attention_masks=return_attention_masks, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_masks=return_special_tokens_masks, return_lengths=return_lengths, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_masks: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_masks: bool = False, return_lengths: bool = False, verbose: bool = True, ) -> BatchEncoding: if padding_strategy == PaddingStrategy.LONGEST: def total_sequence_length(input_pairs): first_ids, second_ids = input_pairs return len(first_ids) + ( self.num_special_tokens_to_add() if second_ids is None else (len(second_ids) + self.num_special_tokens_to_add(pair=True)) ) max_length = max([total_sequence_length(input_pairs) for input_pairs in batch_ids_pairs]) padding_strategy = PaddingStrategy.MAX_LENGTH batch_outputs = {} for first_ids, second_ids in batch_ids_pairs: outputs = self._prepare_for_model( first_ids, second_ids, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, return_attention_mask=return_attention_masks, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_masks, return_lengths=return_lengths, return_tensors=None, prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
MIT License
boto/botoflow
botoflow/context/decision_context.py
DecisionContext._replaying
python
def _replaying(self): return self.__replaying
Do not use directly, instead please use ``botoflow.workflow_time.is_replaying``
https://github.com/boto/botoflow/blob/49d8ed3bc9c57294504be82e933a051e1901b76e/botoflow/context/decision_context.py#L35-L39
from .context_base import ContextBase from ..workflow_execution import WorkflowExecution class DecisionContext(ContextBase): def __init__(self, decider): self.decider = decider self._workflow_time = 0 self._replaying = True self._activity_options_overrides = dict() self._workflow_options_overrides = dict() self._workflow_instance = None self._workflow_execution = WorkflowExecution(None, None) @property
Apache License 2.0
kmayerb/tcrdist3
tcrdist/vdjtools_funcs.py
import_vdjtools
python
def import_vdjtools( vdj_tools_file, chain = 'beta', organism = 'human', db_file = 'alphabeta_gammadelta_db.tsv', validate = True): assert chain in ['alpha','beta','gamma','delta'] assert organism in ['human','mouse'] _chain = { 'alpha' : {'cdr3aa':'cdr3_a_aa','v':'v_a_gene', 'j':'j_a_gene', 'cdr3nt':'cdr3_a_nucseq', 'AB' : 'A'}, 'beta' : {'cdr3aa':'cdr3_b_aa','v':'v_b_gene', 'j':'j_b_gene', 'cdr3nt':'cdr3_b_nucseq', 'AB' : 'B'}, 'gamma' : {'cdr3aa':'cdr3_g_aa','v':'v_g_gene', 'j':'j_g_gene', 'cdr3nt':'cdr3_g_nucseq', 'AB' : 'A'}, 'delta' : {'cdr3aa':'cdr3_d_aa','v':'v_d_gene', 'j':'j_d_gene', 'cdr3nt':'cdr3_d_nucseq', 'AB' : 'B'} }. get(chain) df = pd.read_csv(vdj_tools_file, sep = "\t") df = df[['count', 'freq', 'cdr3aa', 'v', 'j','cdr3nt']]. rename(columns = { 'cdr3aa': _chain['cdr3aa'], 'v' : _chain['v'], 'j' : _chain['j'], 'cdr3nt': _chain['cdr3nt'] }) df[_chain['v']] = df[_chain['v']].apply(lambda x: f"{x}*01") df[_chain['j']] = df[_chain['j']].apply(lambda x: f"{x}*01") all_genes = RefGeneSet(db_file = db_file).all_genes[organism] all_valid_genes = [x for x in all_genes.keys() if all_genes[x].chain == _chain['AB']] warnings.warn(f"ADDING *01 allele to each V- and J-gene") df['valid_v'] = df[_chain['v']].apply(lambda x : all_genes.get(x) is not None ) df['valid_j'] = df[_chain['j']].apply(lambda x : all_genes.get(x) is not None ) df['valid_cdr3'] = df[_chain['cdr3aa']].apply(lambda x : _valid_cdr3(x) and len(x) >=5 ) x1 = df.shape[0] invalid_df = df[~(df['valid_v'] & df['valid_j'] & df['valid_cdr3'])].copy() valid_df = df[(df['valid_v'] & df['valid_j'] & df['valid_cdr3'])].reset_index(drop = True).copy() xiv = invalid_df.shape[0] warnings.warn(f"{xiv} of {x1} had invalid {_chain['v']}, {_chain['j']}, or {_chain['cdr3aa']}") if validate: warnings.warn(f"REMOVED {xiv} of {x1} with invalid {_chain['v']}, {_chain['j']}, or {_chain['cdr3aa']}") return valid_df else: warnings.warn(f"Invalid clones were not removed, do so manually before proceeding") return df
Import VDJtools formated input .tsv or .tsv.gz file. Input file must have columns ['count', 'freq', 'cdr3aa', 'v', 'j','cdr3nt'], see (https://vdjtools-doc.readthedocs.io/en/master/input.html#vdjtools-format) for examples of how you can use VDJtool to directly import and convert from the following: MiTCR, MiGEC, IgBlast (MIGMAP), ImmunoSEQ, VDJdb, Vidjil, MiXCR Parameters ----------- vdj_tools_file : str e.g., os.path.join(path_to_base, 'tcrdist','data','formats','vdj.M_15_CD8_beta.clonotypes.TRB.txt.gz') chain : str 'alpha','beta','gamma', or 'delta' organism : str 'human', 'mouse' db_file : str 'alphabeta_gammadelta_db.tsv', validate : bool If True, only clones with valid CDR3AA, V-Gene and J-Gene are returned) CDR3 length must be >= 5 for tcrdist3 include_nucseq : True If True, retain nucletoide sequence Returns ------- df or valid_df : DataFrame with columns : ['count', 'freq', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'cdr3_b_nucseq','valid_v', 'valid_j', 'valid_cdr3']
https://github.com/kmayerb/tcrdist3/blob/04b0b8c2573d04a9d2cb77f7a3aeeed3a0eab167/tcrdist/vdjtools_funcs.py#L8-L86
import pandas as pd from tcrdist.adpt_funcs import _valid_cdr3 from tcrdist.paths import path_to_base from tcrdist.repertoire_db import RefGeneSet import warnings import os
MIT License