repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
clusterhq/flocker
flocker/provision/_install.py
apt_get_update
python
def apt_get_update(sudo=False): return _from_args(sudo)(["apt-get", "update"])
Update apt's package metadata cache.
https://github.com/clusterhq/flocker/blob/eaa586248986d7cd681c99c948546c2b507e44de/flocker/provision/_install.py#L169-L173
from pipes import quote import posixpath from textwrap import dedent from urlparse import urljoin, urlparse from effect import Func, Effect, Constant, parallel from effect.retry import retry from time import time import yaml from zope.interface import implementer from eliot import write_failure from characteristic import attributes from pyrsistent import PClass, field from txeffect import perform from twisted.internet.error import ProcessTerminated from ._libcloud import INode from ._common import PackageSource, Variants from ._ssh import ( Run, Sudo, run_network_interacting_from_args, sudo_network_interacting_from_args, run, run_from_args, sudo_from_args, put, run_remotely, ) from ._ssh._conch import make_dispatcher from ._effect import sequence, http_get from ..common import retry_effect_with_timeout from flocker import __version__ as version from flocker.cli import configure_ssh from flocker.common.version import ( get_installable_version, get_package_key_suffix, is_release, ) START = "restart" ZFS_REPO = { 'centos-7': "https://s3.amazonaws.com/archive.zfsonlinux.org/" "epel/zfs-release.el7.noarch.rpm", } ARCHIVE_BUCKET = 'clusterhq-archive' class UnknownAction(Exception): def __init__(self, action): Exception.__init__(self, action) def tag_as_test_install(flocker_version, distribution, package_name): repository_url = get_repository_url( distribution=distribution, flocker_version=flocker_version) repository_host = urlparse(repository_url).hostname tag_url = bytes( "https://{host}/clusterhq-internal-acceptance-test/{distribution}/" "{package}/{version}".format( host=repository_host, distribution=distribution, package=package_name, version=flocker_version ) ) return http_get(tag_url) def is_rhel(distribution): return distribution.startswith("rhel-") def is_centos_or_rhel(distribution): return (distribution.startswith(("centos-", "rhel-"))) def is_systemd_distribution(distribution): return ( is_centos_or_rhel(distribution) or distribution == "ubuntu-16.04" ) def _from_args(sudo): if sudo: return sudo_network_interacting_from_args else: return run_network_interacting_from_args def yum_install(args, package_manager="yum", sudo=False): return _from_args(sudo)([package_manager, "install", "-y"] + args) def apt_get_install(args, sudo=False): return _from_args(sudo)( ["apt-get", "-y", "install", ] + args )
Apache License 2.0
amz-driverless/rbb_core
rbb_client/src/rbb_client/models/bag_detailed.py
BagDetailed.store_data
python
def store_data(self): return self._store_data
Gets the store_data of this BagDetailed. Data that is specific to the bag store type. :return: The store_data of this BagDetailed. :rtype: object
https://github.com/amz-driverless/rbb_core/blob/618617270314af5335de30179072244e1f440c4c/rbb_client/src/rbb_client/models/bag_detailed.py#L142-L150
from pprint import pformat from six import iteritems class BagDetailed(object): def __init__(self): self.swagger_types = { 'detail_type': 'str', 'name': 'str', 'store_data': 'object', 'discovered': 'datetime', 'extraction_failure': 'bool', 'in_trash': 'bool', 'is_extracted': 'bool', 'meta_available': 'bool', 'size': 'int', 'start_time': 'datetime', 'end_time': 'datetime', 'duration': 'float', 'messages': 'int', 'tags': 'list[Tag]', 'topics': 'list[Topic]', 'products': 'list[Product]', 'comment': 'str' } self.attribute_map = { 'detail_type': 'detail_type', 'name': 'name', 'store_data': 'store_data', 'discovered': 'discovered', 'extraction_failure': 'extraction_failure', 'in_trash': 'in_trash', 'is_extracted': 'is_extracted', 'meta_available': 'meta_available', 'size': 'size', 'start_time': 'start_time', 'end_time': 'end_time', 'duration': 'duration', 'messages': 'messages', 'tags': 'tags', 'topics': 'topics', 'products': 'products', 'comment': 'comment' } self._detail_type = None self._name = None self._store_data = None self._discovered = None self._extraction_failure = None self._in_trash = None self._is_extracted = None self._meta_available = None self._size = None self._start_time = None self._end_time = None self._duration = None self._messages = None self._tags = None self._topics = None self._products = None self._comment = None @property def detail_type(self): return self._detail_type @detail_type.setter def detail_type(self, detail_type): self._detail_type = detail_type @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property
MIT License
drexly/openhgsenti
lib/django/contrib/gis/db/backends/postgis/operations.py
PostGISOperations.spatial_version
python
def spatial_version(self): if hasattr(settings, 'POSTGIS_VERSION'): version = settings.POSTGIS_VERSION else: self._get_postgis_func('version') try: vtup = self.postgis_version_tuple() except ProgrammingError: raise ImproperlyConfigured( 'Cannot determine PostGIS version for database "%s" ' 'using command "SELECT postgis_lib_version()". ' 'GeoDjango requires at least PostGIS version 2.0. ' 'Was the database created from a spatial database ' 'template?' % self.connection.settings_dict['NAME'] ) version = vtup[1:] return version
Determine the version of the PostGIS library.
https://github.com/drexly/openhgsenti/blob/d7806f58c81127d32091d9875a99ac13aef94a8a/lib/django/contrib/gis/db/backends/postgis/operations.py#L145-L172
import re from django.conf import settings from django.contrib.gis.db.backends.base.operations import BaseSpatialOperations from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter from django.contrib.gis.db.backends.postgis.pgraster import ( from_pgraster, to_pgraster, ) from django.contrib.gis.db.backends.utils import SpatialOperator from django.contrib.gis.geometry.backend import Geometry from django.contrib.gis.measure import Distance from django.core.exceptions import ImproperlyConfigured from django.db.backends.postgresql.operations import DatabaseOperations from django.db.utils import ProgrammingError from django.utils.functional import cached_property from .models import PostGISGeometryColumns, PostGISSpatialRefSys from .pgraster import get_pgraster_srid class PostGISOperator(SpatialOperator): def __init__(self, geography=False, **kwargs): self.geography = geography super(PostGISOperator, self).__init__(**kwargs) def as_sql(self, connection, lookup, *args): if lookup.lhs.output_field.geography and not self.geography: raise ValueError('PostGIS geography does not support the "%s" ' 'function/operator.' % (self.func or self.op,)) return super(PostGISOperator, self).as_sql(connection, lookup, *args) class PostGISDistanceOperator(PostGISOperator): sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s' def as_sql(self, connection, lookup, template_params, sql_params): if not lookup.lhs.output_field.geography and lookup.lhs.output_field.geodetic(connection): sql_template = self.sql_template if len(lookup.rhs) == 3 and lookup.rhs[-1] == 'spheroid': template_params.update({'op': self.op, 'func': 'ST_Distance_Spheroid'}) sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s) %(op)s %%s' else: template_params.update({'op': self.op, 'func': 'ST_Distance_Sphere'}) return sql_template % template_params, sql_params return super(PostGISDistanceOperator, self).as_sql(connection, lookup, template_params, sql_params) class PostGISOperations(BaseSpatialOperations, DatabaseOperations): name = 'postgis' postgis = True geography = True geom_func_prefix = 'ST_' version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)') Adapter = PostGISAdapter Adaptor = Adapter gis_operators = { 'bbcontains': PostGISOperator(op='~'), 'bboverlaps': PostGISOperator(op='&&', geography=True), 'contained': PostGISOperator(op='@'), 'contains': PostGISOperator(func='ST_Contains'), 'overlaps_left': PostGISOperator(op='&<'), 'overlaps_right': PostGISOperator(op='&>'), 'overlaps_below': PostGISOperator(op='&<|'), 'overlaps_above': PostGISOperator(op='|&>'), 'left': PostGISOperator(op='<<'), 'right': PostGISOperator(op='>>'), 'strictly_below': PostGISOperator(op='<<|'), 'strictly_above': PostGISOperator(op='|>>'), 'same_as': PostGISOperator(op='~='), 'exact': PostGISOperator(op='~='), 'contains_properly': PostGISOperator(func='ST_ContainsProperly'), 'coveredby': PostGISOperator(func='ST_CoveredBy', geography=True), 'covers': PostGISOperator(func='ST_Covers', geography=True), 'crosses': PostGISOperator(func='ST_Crosses'), 'disjoint': PostGISOperator(func='ST_Disjoint'), 'equals': PostGISOperator(func='ST_Equals'), 'intersects': PostGISOperator(func='ST_Intersects', geography=True), 'overlaps': PostGISOperator(func='ST_Overlaps'), 'relate': PostGISOperator(func='ST_Relate'), 'touches': PostGISOperator(func='ST_Touches'), 'within': PostGISOperator(func='ST_Within'), 'dwithin': PostGISOperator(func='ST_DWithin', geography=True), 'distance_gt': PostGISDistanceOperator(func='ST_Distance', op='>', geography=True), 'distance_gte': PostGISDistanceOperator(func='ST_Distance', op='>=', geography=True), 'distance_lt': PostGISDistanceOperator(func='ST_Distance', op='<', geography=True), 'distance_lte': PostGISDistanceOperator(func='ST_Distance', op='<=', geography=True), } unsupported_functions = set() function_names = { 'BoundingCircle': 'ST_MinimumBoundingCircle', 'MemSize': 'ST_Mem_Size', 'NumPoints': 'ST_NPoints', } def __init__(self, connection): super(PostGISOperations, self).__init__(connection) prefix = self.geom_func_prefix self.area = prefix + 'Area' self.bounding_circle = prefix + 'MinimumBoundingCircle' self.centroid = prefix + 'Centroid' self.collect = prefix + 'Collect' self.difference = prefix + 'Difference' self.distance = prefix + 'Distance' self.distance_sphere = prefix + 'distance_sphere' self.distance_spheroid = prefix + 'distance_spheroid' self.envelope = prefix + 'Envelope' self.extent = prefix + 'Extent' self.extent3d = prefix + '3DExtent' self.force_rhr = prefix + 'ForceRHR' self.geohash = prefix + 'GeoHash' self.geojson = prefix + 'AsGeoJson' self.gml = prefix + 'AsGML' self.intersection = prefix + 'Intersection' self.kml = prefix + 'AsKML' self.length = prefix + 'Length' self.length3d = prefix + '3DLength' self.length_spheroid = prefix + 'length_spheroid' self.makeline = prefix + 'MakeLine' self.mem_size = prefix + 'mem_size' self.num_geom = prefix + 'NumGeometries' self.num_points = prefix + 'npoints' self.perimeter = prefix + 'Perimeter' self.perimeter3d = prefix + '3DPerimeter' self.point_on_surface = prefix + 'PointOnSurface' self.polygonize = prefix + 'Polygonize' self.reverse = prefix + 'Reverse' self.scale = prefix + 'Scale' self.snap_to_grid = prefix + 'SnapToGrid' self.svg = prefix + 'AsSVG' self.sym_difference = prefix + 'SymDifference' self.transform = prefix + 'Transform' self.translate = prefix + 'Translate' self.union = prefix + 'Union' self.unionagg = prefix + 'Union' @cached_property
Apache License 2.0
subutux/rmapy
rmapy/document.py
from_request_stream
python
def from_request_stream(_id: str, stream: Response) -> ZipDocument: tmp = BytesIO() for chunk in stream.iter_content(chunk_size=8192): tmp.write(chunk) zd = ZipDocument(_id=_id) zd.load(tmp) return zd
Return a ZipDocument from a request stream containing a zipfile. This is used with the BlobGETUrl from a :class:`rmapy.document.Document`. Args: _id: The object ID this zipfile represents. stream: a stream containing the zipfile. Returns: the object of the downloaded zipfile.
https://github.com/subutux/rmapy/blob/8916261afd9452751dd822aab0c8a3325d02624a/rmapy/document.py#L410-L427
import os from io import BytesIO from zipfile import ZipFile, ZIP_DEFLATED import shutil from uuid import uuid4 import json from typing import TypeVar, List, Tuple from logging import getLogger from requests import Response from .meta import Meta log = getLogger("rmapy") BytesOrString = TypeVar("BytesOrString", BytesIO, str) class RmPage(object): def __init__(self, page, metadata=None, order=0, thumbnail=None, _id=None): self.page = page if metadata: self.metadata = metadata else: self.metadata = {"layers": [{"name": "Layer 1"}]} self.order = order if thumbnail: self.thumbnail = thumbnail if _id: self.ID = _id else: self.ID = str(uuid4()) def __str__(self) -> str: return f"<rmapy.document.RmPage {self.order} for {self.ID}>" def __repr__(self) -> str: return self.__str__() class Highlight(object): def __init__(self, page_id: str, highlight_data: str): self.page_id = page_id self.highlight_data = json.loads(highlight_data) def __str__(self) -> str: return f"<rmapy.document.Highlight {self.page_id}>" def __repr__(self) -> str: return self.__str__() class Document(Meta): def __init__(self, **kwargs): super(Document, self).__init__(**kwargs) self.Type = "DocumentType" def __str__(self): return f"<rmapy.document.Document {self.ID}>" def __repr__(self): return self.__str__() class ZipDocument(object): def __init__(self, _id=None, doc=None, file=None): self.content = { "extraMetadata": { }, "lastOpenedPage": 0, "lineHeight": -1, "margins": 180, "pageCount": 0, "textScale": 1, "transform": { } } self.metadata = { "deleted": False, "lastModified": "1568368808000", "metadatamodified": False, "modified": False, "parent": "", "pinned": False, "synced": True, "type": "DocumentType", "version": 1, "VissibleName": "New Document" } self.pagedata = "b''" self.zipfile = BytesIO() self.pdf = None self.epub = None self.rm: List[RmPage] = [] self.ID = None self.highlights: List[Highlight] = [] if not _id: _id = str(uuid4()) self.ID = _id if doc: ext = doc[-4:] if ext.endswith("pdf"): self.content["fileType"] = "pdf" self.pdf = BytesIO() with open(doc, 'rb') as fb: self.pdf.write(fb.read()) self.pdf.seek(0) if ext.endswith("epub"): self.content["fileType"] = "epub" self.epub = BytesIO() with open(doc, 'rb') as fb: self.epub.write(fb.read()) self.epub.seek(0) elif ext.endswith("rm"): self.content["fileType"] = "notebook" with open(doc, 'rb') as fb: self.rm.append(RmPage(page=BytesIO(fb.read()))) name = os.path.splitext(os.path.basename(doc))[0] self.metadata["VissibleName"] = name if file: self.load(file) def __str__(self) -> str: return f"<rmapy.document.ZipDocument {self.ID}>" def __repr__(self) -> str: return self.__str__() def create_request(self) -> Tuple[BytesIO, dict]: return self.zipfile, { "ID": self.ID, "Type": "DocumentType", "Version": self.metadata["version"] } def dump(self, file: BytesOrString) -> None: with ZipFile(file, "w", ZIP_DEFLATED) as zf: zf.writestr(f"{self.ID}.content", json.dumps(self.content)) zf.writestr(f"{self.ID}.pagedata", self.pagedata) if self.pdf: zf.writestr(f"{self.ID}.pdf", self.pdf.read()) if self.epub: zf.writestr(f"{self.ID}.epub", self.epub.read()) for highlight in self.highlights: zf.writestr(f"{self.ID}.highlights/{highlight.page_id}.json", json.dumps(highlight.highlight_data)) for page in self.rm: zf.writestr(f"{self.ID}/{page.order}.rm", page.page.read()) zf.writestr(f"{self.ID}/{page.order}-metadata.json", json.dumps(page.metadata)) page.page.seek(0) try: zf.writestr(f"{self.ID}.thumbnails/{page.order}.jpg", page.thumbnail.read()) except AttributeError: log.debug(f"missing thumbnail during dump: {self.ID}: {page.order}") pass if isinstance(file, BytesIO): file.seek(0) def load(self, file: BytesOrString) -> None: self.zipfile = BytesIO() self.zipfile.seek(0) if isinstance(file, str): with open(file, 'rb') as f: shutil.copyfileobj(f, self.zipfile) elif isinstance(file, BytesIO): self.zipfile = file self.zipfile.seek(0) else: raise Exception("Unsupported file type.") with ZipFile(self.zipfile, 'r') as zf: with zf.open(f"{self.ID}.content", 'r') as content: self.content = json.load(content) try: with zf.open(f"{self.ID}.metadata", 'r') as metadata: self.metadata = json.load(metadata) except KeyError: pass try: with zf.open(f"{self.ID}.pagedata", 'r') as pagedata: self.pagedata = str(pagedata.read()) except KeyError: pass try: with zf.open(f"{self.ID}.pdf", 'r') as pdf: self.pdf = BytesIO(pdf.read()) except KeyError: pass try: with zf.open(f"{self.ID}.epub", 'r') as epub: self.epub = BytesIO(epub.read()) except KeyError: pass highlights = [x for x in zf.namelist() if x.startswith(f"{self.ID}.highlights/") and x.endswith('.json')] for highlight in highlights: with zf.open(highlight, 'r') as highlight_fp: page_id = highlight.replace(f"{self.ID}.highlights/", "").replace(".json", "") self.highlights.append(Highlight(page_id, highlight_fp.read())) pages = [x for x in zf.namelist() if x.startswith(f"{self.ID}/") and x.endswith('.rm')] for p in pages: page_number = int(p.replace(f"{self.ID}/", "") .replace(".rm", "")) with zf.open(p, 'r') as rm: page = BytesIO(rm.read()) page.seek(0) p_meta = p.replace(".rm", "-metadata.json") try: with zf.open(p_meta, 'r') as md: metadata = json.load(md) except KeyError: log.debug(f"missing metadata: {p_meta}") metadata = None thumbnail_name = p.replace(".rm", ".jpg") thumbnail_name = thumbnail_name.replace("/", ".thumbnails/") try: with zf.open(thumbnail_name, 'r') as tn: thumbnail = BytesIO(tn.read()) thumbnail.seek(0) except KeyError: log.debug(f"missing thumbnail: {thumbnail_name}") thumbnail = None self.rm.append(RmPage(page, metadata, page_number, thumbnail, self.ID)) self.zipfile.seek(0) def from_zip(_id: str, file: str) -> ZipDocument: return ZipDocument(_id, file=file)
MIT License
partho-maple/coding-interview-gym
leetcode.com/python/21_Merge_Two_Sorted_Lists.py
MyLinkedList.addAtTail
python
def addAtTail(self, val): self.addAtIndex(self.size, val)
:type val: int :rtype: None
https://github.com/partho-maple/coding-interview-gym/blob/f11c78b6e42d1014296fc0f360aa6fc530600493/leetcode.com/python/21_Merge_Two_Sorted_Lists.py#L43-L48
class ListNode(object): def __init__(self, x, next=None): self.val = x self.next = next class MyLinkedList(object): INVALID = -1 def __init__(self): self.first = None self.size = 0 def get(self, index): node = self.getNode(index) return node.val if node else self.INVALID def getNode(self, index): if index >= self.size or index < 0: return None node = self.first while index > 0: node = node.next index -= 1 return node def addAtHead(self, val): self.addAtIndex(0, val)
MIT License
python-openxml/python-docx
docx/oxml/text/font.py
CT_RPr._new_color
python
def _new_color(self): return parse_xml('<w:color %s w:val="000000"/>' % nsdecls('w'))
Override metaclass method to set `w:color/@val` to RGB black on create.
https://github.com/python-openxml/python-docx/blob/36cac78de080d412e9e50d56c2784e33655cad59/docx/oxml/text/font.py#L94-L99
from .. import parse_xml from ...enum.dml import MSO_THEME_COLOR from ...enum.text import WD_COLOR, WD_UNDERLINE from ..ns import nsdecls, qn from ..simpletypes import ( ST_HexColor, ST_HpsMeasure, ST_String, ST_VerticalAlignRun ) from ..xmlchemy import ( BaseOxmlElement, OptionalAttribute, RequiredAttribute, ZeroOrOne ) class CT_Color(BaseOxmlElement): val = RequiredAttribute('w:val', ST_HexColor) themeColor = OptionalAttribute('w:themeColor', MSO_THEME_COLOR) class CT_Fonts(BaseOxmlElement): ascii = OptionalAttribute('w:ascii', ST_String) hAnsi = OptionalAttribute('w:hAnsi', ST_String) class CT_Highlight(BaseOxmlElement): val = RequiredAttribute('w:val', WD_COLOR) class CT_HpsMeasure(BaseOxmlElement): val = RequiredAttribute('w:val', ST_HpsMeasure) class CT_RPr(BaseOxmlElement): _tag_seq = ( 'w:rStyle', 'w:rFonts', 'w:b', 'w:bCs', 'w:i', 'w:iCs', 'w:caps', 'w:smallCaps', 'w:strike', 'w:dstrike', 'w:outline', 'w:shadow', 'w:emboss', 'w:imprint', 'w:noProof', 'w:snapToGrid', 'w:vanish', 'w:webHidden', 'w:color', 'w:spacing', 'w:w', 'w:kern', 'w:position', 'w:sz', 'w:szCs', 'w:highlight', 'w:u', 'w:effect', 'w:bdr', 'w:shd', 'w:fitText', 'w:vertAlign', 'w:rtl', 'w:cs', 'w:em', 'w:lang', 'w:eastAsianLayout', 'w:specVanish', 'w:oMath' ) rStyle = ZeroOrOne('w:rStyle', successors=_tag_seq[1:]) rFonts = ZeroOrOne('w:rFonts', successors=_tag_seq[2:]) b = ZeroOrOne('w:b', successors=_tag_seq[3:]) bCs = ZeroOrOne('w:bCs', successors=_tag_seq[4:]) i = ZeroOrOne('w:i', successors=_tag_seq[5:]) iCs = ZeroOrOne('w:iCs', successors=_tag_seq[6:]) caps = ZeroOrOne('w:caps', successors=_tag_seq[7:]) smallCaps = ZeroOrOne('w:smallCaps', successors=_tag_seq[8:]) strike = ZeroOrOne('w:strike', successors=_tag_seq[9:]) dstrike = ZeroOrOne('w:dstrike', successors=_tag_seq[10:]) outline = ZeroOrOne('w:outline', successors=_tag_seq[11:]) shadow = ZeroOrOne('w:shadow', successors=_tag_seq[12:]) emboss = ZeroOrOne('w:emboss', successors=_tag_seq[13:]) imprint = ZeroOrOne('w:imprint', successors=_tag_seq[14:]) noProof = ZeroOrOne('w:noProof', successors=_tag_seq[15:]) snapToGrid = ZeroOrOne('w:snapToGrid', successors=_tag_seq[16:]) vanish = ZeroOrOne('w:vanish', successors=_tag_seq[17:]) webHidden = ZeroOrOne('w:webHidden', successors=_tag_seq[18:]) color = ZeroOrOne('w:color', successors=_tag_seq[19:]) sz = ZeroOrOne('w:sz', successors=_tag_seq[24:]) highlight = ZeroOrOne('w:highlight', successors=_tag_seq[26:]) u = ZeroOrOne('w:u', successors=_tag_seq[27:]) vertAlign = ZeroOrOne('w:vertAlign', successors=_tag_seq[32:]) rtl = ZeroOrOne('w:rtl', successors=_tag_seq[33:]) cs = ZeroOrOne('w:cs', successors=_tag_seq[34:]) specVanish = ZeroOrOne('w:specVanish', successors=_tag_seq[38:]) oMath = ZeroOrOne('w:oMath', successors=_tag_seq[39:]) del _tag_seq
MIT License
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_2/api/support_api.py
SupportApi.api22_support_get_with_http_info
python
def api22_support_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, filter=None, limit=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api22_support_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api22_support_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.2/support', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SupportGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
List connection paths Displays connection paths between the current array and each connected array. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api22_support_get_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters. :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria. :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size. :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned. :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values. :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: SupportGetResponse If the method is called asynchronously, returns the request thread.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_2/api/support_api.py#L29-L139
from __future__ import absolute_import import re import six from typing import List, Optional from .. import models class SupportApi(object): def __init__(self, api_client): self.api_client = api_client
BSD 2-Clause Simplified License
janasunrise/hypixelio
hypixelio/models/skyblock/active_auctions/active_auctions.py
SkyblockActiveAuction.__init__
python
def __init__(self, data: dict) -> None: self.PAGE_NUMBER = data["page"] self.TOTAL_PAGES = data["totalPages"] self.TOTAL_AUCTION = data["totalAuctions"] self.AUCTIONS = [SkyblockAuction(auction) for auction in data["auctions"]]
Parameters ---------- data: dict The data from the Hypixel API endpoint.
https://github.com/janasunrise/hypixelio/blob/ec34fb664e7f3bc114624195a48f3e374d6e53c5/hypixelio/models/skyblock/active_auctions/active_auctions.py#L7-L17
import typing as t from hypixelio.models.skyblock.auction import SkyblockAuction class SkyblockActiveAuction:
MIT License
myriadrf/pylms7002soapy
pyLMS7002Soapy/LMS7002_SX.py
LMS7002_SX.CMPLO_CTRL
python
def CMPLO_CTRL(self, value): if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1]: raise ValueError("Value must be [0,1]") self._writeReg('PFDCP', 'CMPLO_CTRL', value) else: raise ValueError("Bitfield CMPLO_CTRL is not supported on chip version " + str(self.chip.chipID))
Set the value of CMPLO_CTRL
https://github.com/myriadrf/pylms7002soapy/blob/4f828eb9282c302dc6b187d91df5e77c8a6f2d61/pyLMS7002Soapy/LMS7002_SX.py#L722-L731
from pyLMS7002Soapy.LMS7002_base import LMS7002_base from time import sleep from math import floor class LMS7002_SX(LMS7002_base): __slots__ = [] def __init__(self, chip, Channel): if Channel not in ['R', 'T']: raise ValueError("Parameter Channel must be 'R' or 'T'") self.chip = chip self.channel = Channel self.prefix = "SXT_SXR_" @property def EN_DIR(self): prefix = self.prefix self.prefix = "" en_dir = self._readReg('TRX_EN_DIR', 'EN_DIR') self.prefix = prefix return en_dir @EN_DIR.setter def EN_DIR(self, value): if value not in [0, 1]: raise ValueError("Value must be [0,1]") prefix = self.prefix self.prefix = "" self._writeReg('TRX_EN_DIR', 'EN_DIR', value) self.prefix = prefix @property def RESET_N(self): return self._readReg('CFG0', 'RESET_N') @RESET_N.setter def RESET_N(self, value): if value not in [0, 1, 'RESET', 'NORMAL']: raise ValueError("Value must be [0,1,'RESET', 'NORMAL']") if value == 0 or value == 'RESET': val = 0 else: val = 1 self._writeReg('CFG0', 'RESET_N', val) @property def SPDUP_VCO(self): return self._readReg('CFG0', 'SPDUP_VCO') @SPDUP_VCO.setter def SPDUP_VCO(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG0', 'SPDUP_VCO', val) @property def BYPLDO_VCO(self): return self._readReg('CFG0', 'BYPLDO_VCO') @BYPLDO_VCO.setter def BYPLDO_VCO(self, value): if value not in [0, 1, 'BYP', 'ACT']: raise ValueError("Value must be [0,1,'BYP', 'ACT']") if value == 0 or value == 'ACT': val = 0 else: val = 1 self._writeReg('CFG0', 'BYPLDO_VCO', val) @property def EN_COARSEPLL(self): return self._readReg('CFG0', 'EN_COARSEPLL') @EN_COARSEPLL.setter def EN_COARSEPLL(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG0', 'EN_COARSEPLL', val) @property def CURLIM_VCO(self): return self._readReg('CFG0', 'CURLIM_VCO') @CURLIM_VCO.setter def CURLIM_VCO(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG0', 'CURLIM_VCO', val) @property def EN_DIV2_DIVPROG(self): return self._readReg('CFG0', 'EN_DIV2_DIVPROG') @EN_DIV2_DIVPROG.setter def EN_DIV2_DIVPROG(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG0', 'EN_DIV2_DIVPROG', val) @property def EN_INTONLY_SDM(self): return self._readReg('CFG0', 'EN_INTONLY_SDM') @EN_INTONLY_SDM.setter def EN_INTONLY_SDM(self, value): if value not in [0, 1, 'FRACN', 'INTN']: raise ValueError("Value must be [0,1,'FRACN', 'INTN']") if value == 0 or value == 'FRACN': val = 0 else: val = 1 self._writeReg('CFG0', 'EN_INTONLY_SDM', val) @property def EN_SDM_CLK(self): return self._readReg('CFG0', 'EN_SDM_CLK') @EN_SDM_CLK.setter def EN_SDM_CLK(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG0', 'EN_SDM_CLK', val) @property def PD_FBDIV(self): return self._readReg('CFG0', 'PD_FBDIV') @PD_FBDIV.setter def PD_FBDIV(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'ON': val = 0 else: val = 1 self._writeReg('CFG0', 'PD_FBDIV', val) @property def PD_LOCH_T2RBUF(self): return self._readReg('CFG0', 'PD_LOCH_T2RBUF') @PD_LOCH_T2RBUF.setter def PD_LOCH_T2RBUF(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'ON': val = 0 else: val = 1 self._writeReg('CFG0', 'PD_LOCH_T2RBUF', val) @property def PD_CP(self): return self._readReg('CFG0', 'PD_CP') @PD_CP.setter def PD_CP(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'ON': val = 0 else: val = 1 self._writeReg('CFG0', 'PD_CP', val) @property def PD_FDIV(self): return self._readReg('CFG0', 'PD_FDIV') @PD_FDIV.setter def PD_FDIV(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'ON': val = 0 else: val = 1 self._writeReg('CFG0', 'PD_FDIV', val) @property def PD_SDM(self): return self._readReg('CFG0', 'PD_SDM') @PD_SDM.setter def PD_SDM(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'ON': val = 0 else: val = 1 self._writeReg('CFG0', 'PD_SDM', val) @property def PD_VCO_COMP(self): return self._readReg('CFG0', 'PD_VCO_COMP') @PD_VCO_COMP.setter def PD_VCO_COMP(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'ON': val = 0 else: val = 1 self._writeReg('CFG0', 'PD_VCO_COMP', val) @property def PD_VCO(self): return self._readReg('CFG0', 'PD_VCO') @PD_VCO.setter def PD_VCO(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'ON': val = 0 else: val = 1 self._writeReg('CFG0', 'PD_VCO', val) @property def EN_G(self): return self._readReg('CFG0', 'EN_G') @EN_G.setter def EN_G(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG0', 'EN_G', val) @property def FRAC_SDM(self): lsb = self._readReg('FRACL', 'FRAC_SDM_L<15:0>') msb = self._readReg('FRACH', 'FRAC_SDM_H<3:0>') return (msb << 16) + lsb @FRAC_SDM.setter def FRAC_SDM(self, value): if not (0 <= value <= 2 ** 20 - 1): raise ValueError("Value must be [0,2*20-1]") lsb = value & 0xFFFF msb = (value >> 16) & 0xF self._writeReg('FRACL', 'FRAC_SDM_L<15:0>', lsb) self._writeReg('FRACH', 'FRAC_SDM_H<3:0>', msb) @property def INT_SDM(self): return self._readReg('FRACH', 'INT_SDM<9:0>') @INT_SDM.setter def INT_SDM(self, value): if not (0 <= value <= 1023): raise ValueError("Value must be [0..1023]") self._writeReg('FRACH', 'INT_SDM<9:0>', value) @property def PW_DIV2_LOCH(self): return self._readReg('CFG1', 'PW_DIV2_LOCH<2:0>') @PW_DIV2_LOCH.setter def PW_DIV2_LOCH(self, value): if not (0 <= value <= 7): raise ValueError("Value must be [0..7]") self._writeReg('CFG1', 'PW_DIV2_LOCH<2:0>', value) @property def PW_DIV4_LOCH(self): return self._readReg('CFG1', 'PW_DIV4_LOCH<2:0>') @PW_DIV4_LOCH.setter def PW_DIV4_LOCH(self, value): if not (0 <= value <= 7): raise ValueError("Value must be [0..7]") self._writeReg('CFG1', 'PW_DIV4_LOCH<2:0>', value) @property def DIV_LOCH(self): return self._readReg('CFG1', 'DIV_LOCH<2:0>') @DIV_LOCH.setter def DIV_LOCH(self, value): if not (0 <= value <= 7): raise ValueError("Value must be [0..7]") self._writeReg('CFG1', 'DIV_LOCH<2:0>', value) @property def TST_SX(self): return self._readReg('CFG1', 'TST_SX<2:0>') @TST_SX.setter def TST_SX(self, value): if not (0 <= value <= 7): raise ValueError("Value must be [0..7]") self._writeReg('CFG1', 'TST_SX<2:0>', value) @property def SEL_SDMCLK(self): return self._readReg('CFG1', 'SEL_SDMCLK') @SEL_SDMCLK.setter def SEL_SDMCLK(self, value): if value not in [0, 1, 'CLK_DIV', 'CLK_REF']: raise ValueError("Value must be [0,1,'CLK_DIV', 'CLK_REF']") if value == 0 or value == 'CLK_DIV': val = 0 else: val = 1 self._writeReg('CFG1', 'SEL_SDMCLK', val) @property def SX_DITHER_EN(self): return self._readReg('CFG1', 'SX_DITHER_EN') @SX_DITHER_EN.setter def SX_DITHER_EN(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG1', 'SX_DITHER_EN', val) @property def REV_SDMCLK(self): return self._readReg('CFG1', 'REV_SDMCLK') @REV_SDMCLK.setter def REV_SDMCLK(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG1', 'REV_SDMCLK', val) @property def VDIV_VCO(self): return self._readReg('VCO_BIAS', 'VDIV_VCO<7:0>') @VDIV_VCO.setter def VDIV_VCO(self, value): if not (0 <= value <= 255): raise ValueError("Value must be [0..255]") self._writeReg('VCO_BIAS', 'VDIV_VCO<7:0>', value) @property def ICT_VCO(self): return self._readReg('VCO_BIAS', 'ICT_VCO<7:0>') @ICT_VCO.setter def ICT_VCO(self, value): if not (0 <= value <= 255): raise ValueError("Value must be [0..255]") self._writeReg('VCO_BIAS', 'ICT_VCO<7:0>', value) @property def RSEL_LDO_VCO(self): return self._readReg('VCO_CFG', 'RSEL_LDO_VCO<4:0>') @RSEL_LDO_VCO.setter def RSEL_LDO_VCO(self, value): if not (0 <= value <= 31): raise ValueError("Value must be [0..31]") self._writeReg('VCO_CFG', 'RSEL_LDO_VCO<4:0>', value) @property def CSW_VCO(self): return self._readReg('VCO_CFG', 'CSW_VCO<7:0>') @CSW_VCO.setter def CSW_VCO(self, value): if not (0 <= value <= 255): raise ValueError("Value must be [0..255]") self._writeReg('VCO_CFG', 'CSW_VCO<7:0>', value) @property def SEL_VCO(self): return self._readReg('VCO_CFG', 'SEL_VCO<1:0>') @SEL_VCO.setter def SEL_VCO(self, value): if value not in [0, 1, 2, 'VCOL', 'VCOM', 'VCOH']: raise ValueError("Value must be [0,1,2, 'VCOL', 'VCOM', 'VCOH']") if value == 0 or value == 'VCOL': val = 0 elif value == 1 or value == 'VCOM': val = 1 else: val = 2 self._writeReg('VCO_CFG', 'SEL_VCO<1:0>', val) @property def COARSE_START(self): return self._readReg('VCO_CFG', 'COARSE_START') @COARSE_START.setter def COARSE_START(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('VCO_CFG', 'COARSE_START', val) @property def RZ_CTRL(self): if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('PFDCP', 'RZ_CTRL<1:0>') else: raise ValueError("Bitfield RZ_CTRL<1:0> is not supported on chip version " + str(self.chip.chipID)) @RZ_CTRL.setter def RZ_CTRL(self, value): if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1, 2, 3]: raise ValueError("Value must be [0,1, 2, 3]") self._writeReg('PFDCP', 'RZ_CTRL<1:0>', value) else: raise ValueError("Bitfield RZ_CTRL<1:0> is not supported on chip version " + str(self.chip.chipID)) @property def CMPLO_CTRL(self): if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('PFDCP', 'CMPLO_CTRL') else: raise ValueError("Bitfield CMPLO_CTRL is not supported on chip version " + str(self.chip.chipID)) @CMPLO_CTRL.setter
Apache License 2.0
google-research/language
language/search_agents/muzero/env.py
NQEnv._final_reward
python
def _final_reward(self) -> float: if common_flags.INACTION_PENALTY.value < 0: if not len(self.state.history) > 1: return common_flags.INACTION_PENALTY.value current_documents_list = self.get_final_document_list() return self._compute_reward( current_documents=current_documents_list) - self._compute_reward( current_documents=self.state.history[0].documents)
Computes the final step reward, r_T = S(d_T | q) - S(d_0 | q). Returns: The final reward.
https://github.com/google-research/language/blob/240cd2a1fd0307c6822b6f1f6c2abf1349a5a4da/language/search_agents/muzero/env.py#L783-L798
import collections import functools import pickle import random import string from typing import Any, Collection, Dict, List, Mapping, Optional, Sequence, Set, Tuple, Union from absl import logging import dataclasses import grpc import gym from language.search_agents import environment_pb2 from language.search_agents.muzero import bert_state_lib from language.search_agents.muzero import common_flags from language.search_agents.muzero import grammar_lib from language.search_agents.muzero import server from language.search_agents.muzero import state_tree from language.search_agents.muzero import types from language.search_agents.muzero import utils import numpy as np import pygtrie from seed_rl.common import common_flags as seed_common_flags import tensorflow as tf import transformers from muzero import core as mzcore from muzero import learner_flags from official.nlp.bert import configs @dataclasses.dataclass class ValidWords: tfidf_tokens: Dict[int, int] word_piece_actions: Sequence[int] words: List[str] full_words: Set[str] @dataclasses.dataclass class ValidWordsByType: all_valid_words: ValidWords question_valid_words: ValidWords answer_valid_words: ValidWords document_valid_words: ValidWords title_valid_words: ValidWords diff_valid_words: ValidWords intersect_valid_words: ValidWords @dataclasses.dataclass class Observations: observation: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] valid_words: ValidWordsByType def new_valid_words() -> ValidWords: return ValidWords( tfidf_tokens=collections.defaultdict(int), word_piece_actions=[], words=[], full_words=set()) def get_descriptor() -> mzcore.EnvironmentDescriptor: sequence_length = common_flags.BERT_SEQUENCE_LENGTH.value bert_config = configs.BertConfig.from_json_file( common_flags.BERT_CONFIG.value) grammar_config = grammar_lib.grammar_config_from_flags() max_len_type_vocab = max(map(len, bert_state_lib.TYPE_VOCABS.values())) tokenizer = bert_state_lib.get_tokenizer() grammar = grammar_lib.construct_grammar( grammar_config=grammar_config, vocab=list(tokenizer.get_vocab().keys())) observation_space = gym.spaces.Tuple([ gym.spaces.Box(0, bert_config.vocab_size, (sequence_length,), np.int32), gym.spaces.Box(0, max_len_type_vocab, (sequence_length, len(bert_state_lib.TYPE_VOCABS)), np.int32), gym.spaces.Box(-np.inf, np.inf, (sequence_length, len(bert_state_lib.FLOAT_NAMES)), np.float32), gym.spaces.Box(0, len(grammar.productions()) + 1, (common_flags.N_ACTIONS_ENCODING.value,), np.int32), ]) max_episode_length = common_flags.MAX_NUM_ACTIONS.value min_possible_score, max_possible_score = { 'curiosity+dcg': (-1, 1), }.get(common_flags.REWARD.value, (-1, 1)) min_possible_cumulative_score, max_possible_cumulative_score = { 'curiosity+dcg': (-2, 2), }.get(common_flags.REWARD.value, (min_possible_score, max_possible_score)) logging.info('Max episode length: %d; Score range: [%.2f, %.2f]', max_episode_length, min_possible_score, max_possible_score) learner_stats = ( ('ndcg_score', tf.float32), ('ndcg_score_improvement', tf.float32), ('em_at_1', tf.float32), ('em_at_1_improvement', tf.float32), (f'em_at_{int(common_flags.K.value)}', tf.float32), (f'em_at_{int(common_flags.K.value)}_improvement', tf.float32), (f'recall_at_{int(common_flags.K.value)}', tf.float32), (f'recall_at_{int(common_flags.K.value)}_improvement', tf.float32), ('recall_at_1', tf.float32), ('recall_at_1_improvement', tf.float32), ('documents_explored', tf.float32), ) return mzcore.EnvironmentDescriptor( observation_space=observation_space, action_space=gym.spaces.Discrete(len(grammar.productions())), reward_range=mzcore.Range(min_possible_score, max_possible_score), value_range=mzcore.Range(min_possible_cumulative_score, max_possible_cumulative_score), pretraining_space=gym.spaces.Tuple([ observation_space, gym.spaces.Box(0, len(grammar.productions()), (), np.int32), gym.spaces.Box(0., 1., (), np.float32), gym.spaces.Box(0., 1., (), np.float32), gym.spaces.Box(0., 1., (), np.float32), ] * common_flags.PRETRAINING_NUM_UNROLL_STEPS.value), extras={ 'bert_config': bert_config, 'sequence_length': sequence_length, 'float_names': bert_state_lib.FLOAT_NAMES, 'type_vocabs': bert_state_lib.TYPE_VOCABS, 'num_float_features': len(bert_state_lib.FLOAT_NAMES), 'type_vocab_sizes': [ len(v) for v in bert_state_lib.TYPE_VOCABS.values() ], 'grammar': grammar, 'max_episode_length': max_episode_length + 5, 'learner_stats': learner_stats, 'bert_init_ckpt': common_flags.BERT_INIT_CKPT.value if learner_flags.INIT_CHECKPOINT.value is None else None, 'action_encoder_hidden_size': common_flags.ACTION_ENCODER_HIDDEN_SIZE.value, 'tokenizer': tokenizer, 'grammar_config': grammar_config, 'pretraining_num_unroll_steps': common_flags.PRETRAINING_NUM_UNROLL_STEPS.value, }) def to_action_tuple( word: str, grammar: state_tree.NQCFG, tokenizer: transformers.BertTokenizer, valid_actions: Optional[Collection[int]] = None) -> Tuple[int, ...]: if (common_flags.EXCLUDE_PUNCTUATION_FROM_TRIE.value == 1 and word in string.punctuation): return () tokens = tokenizer.tokenize(word) actions = [] for i, token in enumerate(tokens): if i > 0 and not token.startswith('##'): break token = state_tree.NQStateTree.clean_escape_characters(token) if token not in grammar.terminal_to_action: return () token_action = grammar.terminal_to_action[token] if valid_actions is not None: if token_action not in valid_actions: return () actions.append(token_action) return tuple(actions) class NQEnv(gym.Env): def __init__(self, nq_server: server.NQServer, state: Optional[types.EnvState] = None, random_state: Optional[np.random.RandomState] = None, training: bool = True, stop_after_seeing_new_results: bool = False): super().__init__() self.nq_server = nq_server self.training = training self.first_time = True self.stop_after_seeing_new_results = stop_after_seeing_new_results self.descriptor = get_descriptor() self.grammar = self.descriptor.extras['grammar'] self.tokenizer = self.descriptor.extras['tokenizer'] self.action_space = len(self.grammar.productions()) self.idf_lookup = utils.IDFLookup.get_instance( path=common_flags.IDF_LOOKUP_PATH.value) trie_start_time = tf.timestamp() if common_flags.GLOBAL_TRIE_PATH.value is None: self.global_trie = pygtrie.Trie.fromkeys((x for x in map( functools.partial( to_action_tuple, grammar=self.grammar, tokenizer=self.tokenizer), self.idf_lookup.lookup) if x)) self._logging_info('Built trie of size %s in %s s', len(self.global_trie), tf.timestamp() - trie_start_time) else: with tf.io.gfile.GFile(common_flags.GLOBAL_TRIE_PATH.value, 'rb') as trie_f: self.global_trie = pickle.load(trie_f) self._logging_info('Restored trie of size %s in %s s', len(self.global_trie), tf.timestamp() - trie_start_time) self.training_steps = 0 self.known_word_tries = None self.valid_word_actions = None self.use_rf_restrict = False self.state = state if state and state.tree is None: self.state.tree = state_tree.NQStateTree(grammar=self.grammar) self.bert_config: configs.BertConfig = self.descriptor.extras['bert_config'] self.sequence_length: int = self.descriptor.extras['sequence_length'] self.action_history = [] self.n_episode = 0 self._rand = np.random.RandomState() if random_state: self._rand.set_state(random_state) def _logging_info(self, prefix, *args, **kwargs): if self.training: prefix = 'TRAIN: ' + prefix else: prefix = 'TEST : ' + prefix logging.info(prefix, *args, **kwargs) def _get_query(self, index: Optional[int] = None): try: query = self.nq_server.get_query( index=index, dataset_type='TRAIN' if self.training else 'DEV') except grpc.RpcError as rpc_exception: raise mzcore.RLEnvironmentError from rpc_exception return query def _get_env_output( self, query: str, original_query: environment_pb2.GetQueryResponse, documents: Optional[Sequence[environment_pb2.Document]] = None ) -> types.HistoryEntry: if documents is None: try: response = self.nq_server.get_documents( query, original_query=original_query.query, num_documents=common_flags.NUM_DOCUMENTS_TO_RETRIEVE.value, num_ir_documents=common_flags.NUM_IR_DOCUMENTS_TO_RETRIEVE.value) except grpc.RpcError as rpc_exception: raise mzcore.RLEnvironmentError from rpc_exception docs = response.documents else: docs = documents ranked_docs = sorted( docs, key=lambda doc: doc.answer.mr_score, reverse=True) entry = types.HistoryEntry( query=query, original_query=original_query, documents=ranked_docs) return entry def _obs(self) -> Observations: def special_convert_tokens_to_ids(*args, **kwargs): ids = self.tokenizer.convert_tokens_to_ids(*args, **kwargs) original_vocab_size = self.tokenizer.vocab_size return [ id_ if id_ < original_vocab_size else (id_ % original_vocab_size) + 1 for id_ in ids ] bert_state = make_bert_state( environment_state=self.state, tokenizer=self.tokenizer, idf_lookup=self.idf_lookup) token_ids, type_ids, float_values = utils.ObsFragment.combine_and_expand( fragments=bert_state, length=self.sequence_length, type_vocabs=self.descriptor.extras['type_vocabs'], float_names=self.descriptor.extras['float_names'], tokens_to_id_fn=special_convert_tokens_to_ids, ) def _add_token(token: str, tfidf_score: float, valid_words: ValidWords): valid_words.words.append(token) target_tok_id = self.grammar.terminal_to_action[ state_tree.NQStateTree.clean_escape_characters(token)] valid_words.tfidf_tokens[target_tok_id] += tfidf_score def _convert_word_tokens_to_full_words(valid_words_list: List[ValidWords]): for valid_words in valid_words_list: for full_word in ' '.join(valid_words.words).replace(' ##', '').split(): valid_words.full_words.add(full_word) def _convert_tokens_to_actions(valid_words_list: List[ValidWords]): for valid_words in valid_words_list: tfidf_tokens = valid_words.tfidf_tokens valid_words.word_piece_actions = sorted( tfidf_tokens, key=lambda x: tfidf_tokens[x], reverse=True) def _discard_full_word(full_word: str, valid_words_list: List[ValidWords]): for valid_words in valid_words_list: valid_words.full_words.discard(full_word) def _add_tokens(valid_words: ValidWords): for full_word in valid_words.full_words: tfidf_score = self.idf_lookup[full_word] tokens = self.tokenizer.tokenize(full_word) for token in tokens: _add_token(token, tfidf_score, valid_words) def _extract_vocabulary(bert_state, token_ids): all_valid_words = new_valid_words() question_valid_words = new_valid_words() answer_valid_words = new_valid_words() document_valid_words = new_valid_words() title_valid_words = new_valid_words() for obs in bert_state: for (token, label, tfidf_score) in zip(obs.token_list(), obs.type_values['state_part'], obs.float_values['idf_score']): if token == '[UNK]': continue source_tok_id = self.tokenizer.convert_tokens_to_ids(token) if source_tok_id in token_ids and tfidf_score > 0: if label == 'history_context': _add_token(token, tfidf_score, all_valid_words) _add_token(token, tfidf_score, document_valid_words) elif label == 'history_answer': _add_token(token, tfidf_score, answer_valid_words) elif label == 'history_title': _add_token(token, tfidf_score, all_valid_words) _add_token(token, tfidf_score, title_valid_words) elif label == 'original_query': _add_token(token, tfidf_score, question_valid_words) return ValidWordsByType( all_valid_words=all_valid_words, question_valid_words=question_valid_words, answer_valid_words=answer_valid_words, document_valid_words=document_valid_words, title_valid_words=title_valid_words, diff_valid_words=new_valid_words(), intersect_valid_words=new_valid_words()) valid_words_by_type = _extract_vocabulary(bert_state, token_ids) valid_words_list = [ valid_words_by_type.all_valid_words, valid_words_by_type.question_valid_words, valid_words_by_type.answer_valid_words, valid_words_by_type.document_valid_words, valid_words_by_type.title_valid_words ] target_valid_words_by_type = None if self.use_rf_restrict and self.state.target_documents: target_bert_state = make_bert_state( environment_state=self.state, tokenizer=self.tokenizer, idf_lookup=self.idf_lookup, use_target_documents=True) target_token_ids, _, _ = utils.ObsFragment.combine_and_expand( fragments=target_bert_state, length=self.sequence_length, type_vocabs=self.descriptor.extras['type_vocabs'], float_names=self.descriptor.extras['float_names'], tokens_to_id_fn=special_convert_tokens_to_ids, ) target_valid_words_by_type = _extract_vocabulary(target_bert_state, target_token_ids) valid_words_list.append(target_valid_words_by_type.all_valid_words) _convert_word_tokens_to_full_words(valid_words_list) for history_entry in self.state.history: query_substr = history_entry.query[len(self.state.original_query.query):] _, adjustments = state_tree.from_lucene_str(query_substr) if adjustments: for adjustment, _, _ in adjustments: _discard_full_word(adjustment.term, valid_words_list) for word in query_substr.split(): _discard_full_word(word, valid_words_list) if target_valid_words_by_type: v_t = target_valid_words_by_type.all_valid_words.full_words v_current = valid_words_by_type.all_valid_words.full_words valid_words_by_type.diff_valid_words.full_words = v_current - v_t valid_words_by_type.intersect_valid_words.full_words = ( v_current.intersection(v_t)) _add_tokens(valid_words_by_type.diff_valid_words) _add_tokens(valid_words_by_type.intersect_valid_words) valid_words_list.append(valid_words_by_type.diff_valid_words) valid_words_list.append(valid_words_by_type.intersect_valid_words) _convert_tokens_to_actions(valid_words_list) token_ids = np.array(token_ids, np.int32) type_ids = np.array(type_ids, np.int32).T float_values = np.array(float_values, np.float32).T return Observations( observation=( token_ids, type_ids, float_values, np.array( self.action_history[-common_flags.N_ACTIONS_ENCODING.value:] + [self.action_space + 1] * (common_flags.N_ACTIONS_ENCODING.value - len(self.action_history)), np.int32)), valid_words=valid_words_by_type, ) def seed(self, seed=None) -> None: self._rand = np.random.RandomState(seed) def get_final_document_list(self): if common_flags.USE_AGGREGATED_DOCUMENTS.value == 1: if self.stop_after_seeing_new_results: return get_aggregated_documents(self.state.history[:-1]) else: return get_aggregated_documents(self.state.history) else: if self.stop_after_seeing_new_results and len(self.state.history) > 1: return self.state.history[-2].documents else: return self.state.history[-1].documents def special_episode_statistics_learner(self, return_as_dict=False): stats = {} documents_list = self.get_final_document_list() original_documents_list = self.state.history[0].documents stats['ndcg_score'] = self.state.score( 'ndcg', documents_list=documents_list, k=int(common_flags.K.value)) stats['ndcg_score_improvement'] = stats['ndcg_score'] - self.state.score( 'ndcg', documents_list=original_documents_list, k=int(common_flags.K.value)) stats['em_at_1'] = self.state.score( 'em_at_k', documents_list=documents_list, k=1) stats['em_at_1_improvement'] = stats['em_at_1'] - self.state.score( 'em_at_k', documents_list=original_documents_list, k=1) stats[f'em_at_{int(common_flags.K.value)}'] = self.state.score( 'em_at_k', documents_list=documents_list, k=int(common_flags.K.value)) stats[f'em_at_{int(common_flags.K.value)}_improvement'] = stats[ f'em_at_{int(common_flags.K.value)}'] - self.state.score( 'em_at_k', documents_list=original_documents_list, k=int(common_flags.K.value)) stats[f'recall_at_{int(common_flags.K.value)}'] = self.state.score( 'recall_at_k', documents_list=documents_list, k=int(common_flags.K.value)) stats[f'recall_at_{int(common_flags.K.value)}_improvement'] = stats[ f'recall_at_{int(common_flags.K.value)}'] - self.state.score( 'recall_at_k', documents_list=original_documents_list, k=int(common_flags.K.value)) stats['recall_at_1'] = self.state.score( 'recall_at_k', documents_list=documents_list, k=1) stats['recall_at_1_improvement'] = stats['recall_at_1'] - self.state.score( 'recall_at_k', documents_list=original_documents_list, k=1) stats['documents_explored'] = len( self.state.sorted_unique_documents( step=self.state.num_completed_requests)) if return_as_dict: return stats return ( np.float32(stats['ndcg_score']), np.float32(stats['ndcg_score_improvement']), np.float32(stats['em_at_1']), np.float32(stats['em_at_1_improvement']), np.float32(stats[f'em_at_{int(common_flags.K.value)}']), np.float32(stats[f'em_at_{int(common_flags.K.value)}_improvement']), np.float32(stats[f'recall_at_{int(common_flags.K.value)}']), np.float32( stats[f'recall_at_{int(common_flags.K.value)}_improvement']), np.float32(stats['recall_at_1']), np.float32(stats['recall_at_1_improvement']), np.float32(stats['documents_explored']), ) def special_episode_statistics(self): stats = {} stats['num_queries'] = len(self.state.history) stats['queries'] = ('\n' + '-' * 20 + '\n').join( [str(entry) for entry in self.state.history]) return stats def reset( self, index: Optional[Union[int, environment_pb2.GetQueryResponse]] = None, documents: Optional[Sequence[environment_pb2.Document]] = None ) -> Tuple[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], Mapping[ str, Any]]: if index is None or isinstance(index, int): query = self._get_query(index) else: query = index self._logging_info('Original Query [%d/%d]: %s | gold answers: %s.', query.index, query.total, query.query, query.gold_answer) self.state = types.EnvState(original_query=query, k=common_flags.K.value) self.state.add_history_entry( self._get_env_output( query=utils.escape_for_lucene(query.query), original_query=query, documents=documents)) if (common_flags.RELEVANCE_FEEDBACK_RESTRICT.value == 1 and self.training and query.gold_answer): target_query = ( f'{utils.escape_for_lucene(query.query)} ' f'+(contents:"{utils.escape_for_lucene(query.gold_answer[0])}")') self.state.target_documents = self._get_env_output( query=target_query, original_query=query).documents if self._target_reward() < self._initial_reward(): self.state.target_documents = None self.known_word_tries = None self.valid_word_actions = None self.use_rf_restrict = False documents = self.state.history[-1].documents if not documents: raise mzcore.SkipEpisode(f'No documents for original query {query.query}') self.state.tree = state_tree.NQStateTree(grammar=self.grammar) self._logging_info('Initial result score: %s', self._compute_reward(self.state.history[0].documents)) self.action_history = [] self.n_episode += 1 obs = self._obs() info = self._info_dict(obs=obs) return obs.observation, info def _apply_action(self, action) -> None: self.state.tree.apply_action(action) def _episode_reward(self, metric: str) -> float: stats = self.special_episode_statistics_learner(return_as_dict=True) self._logging_info('Episode Statistics: %s', ' '.join([f'{k}: {v}' for k, v in stats.items()])) if metric not in stats: raise NotImplementedError( f'Final episode reward for type {metric} is not implemented.') return stats[metric] def _get_current_documents_list(self, step: int) -> List[environment_pb2.Document]: if common_flags.USE_AGGREGATED_DOCUMENTS.value == 1: return get_aggregated_documents(self.state.history[:step]) else: return self.state.history[step - 1].documents def _get_previous_documents_list(self, step: int) -> List[environment_pb2.Document]: if common_flags.USE_AGGREGATED_DOCUMENTS.value == 1: return get_aggregated_documents(self.state.history[:step - 1]) else: return self.state.history[step - 2].documents def _intermediate_reward(self, step: int) -> float: assert step >= 2, (f'Intermediate reward computation requires at least 2 ' f'history entries. Requested was "{step}".') current_documents_list = self._get_current_documents_list(step) previous_documents_list = self._get_previous_documents_list(step) if common_flags.REWARD.value == 'curiosity+dcg': curiosity = len( set([d.content for d in current_documents_list]) - set([d.content for d in previous_documents_list])) / float( common_flags.NUM_DOCUMENTS_TO_RETRIEVE.value) dcg_current = self.state.score( identifier='dcg', documents_list=current_documents_list, k=common_flags.K.value) dcg_previous = self.state.score( identifier='dcg', documents_list=previous_documents_list, k=common_flags.K.value) ideal_dcg = utils.dcg_score(relevances=[1.] * common_flags.K.value) ndcg_improvement = (dcg_current - dcg_previous) / ideal_dcg return common_flags.REWARD_INTERPOLATION_VALUE.value * curiosity + ( 1 - common_flags.REWARD_INTERPOLATION_VALUE.value) * ndcg_improvement else: reward = common_flags.REWARD.value raise NotImplementedError( f'Intermediate episode reward for type {reward} is not implemented.') def _compute_reward( self, current_documents: List[environment_pb2.Document]) -> float: if common_flags.REWARD.value == 'curiosity+dcg': curiosity = len( set([d.content for d in current_documents]) - set([d.content for d in self.state.history[0].documents])) / float( common_flags.NUM_DOCUMENTS_TO_RETRIEVE.value) dcg_current = self.state.score( identifier='dcg', documents_list=current_documents, k=common_flags.K.value) ideal_dcg = utils.dcg_score(relevances=[1.] * common_flags.K.value) ndcg = dcg_current / ideal_dcg return common_flags.REWARD_INTERPOLATION_VALUE.value * curiosity + ( 1 - common_flags.REWARD_INTERPOLATION_VALUE.value) * ndcg else: reward = common_flags.REWARD.value raise NotImplementedError( f'Episode reward for type {reward} is not implemented.')
Apache License 2.0
dswah/pygam
pygam/links.py
InvSquaredLink.__init__
python
def __init__(self): super(InvSquaredLink, self).__init__(name='inv_squared')
creates an instance of an InverseLink object Parameters ---------- name : str, default: None Returns ------- self
https://github.com/dswah/pygam/blob/b57b4cf8783a90976031e1857e748ca3e6ec650b/pygam/links.py#L276-L288
from __future__ import division, absolute_import import numpy as np from pygam.core import Core class Link(Core): def __init__(self, name=None): super(Link, self).__init__(name=name) class IdentityLink(Link): def __init__(self): super(IdentityLink, self).__init__(name='identity') def link(self, mu, dist): return mu def mu(self, lp, dist): return lp def gradient(self, mu, dist): return np.ones_like(mu) class LogitLink(Link): def __init__(self): super(LogitLink, self).__init__(name='logit') def link(self, mu, dist): return np.log(mu) - np.log(dist.levels - mu) def mu(self, lp, dist): elp = np.exp(lp) return dist.levels * elp / (elp + 1) def gradient(self, mu, dist): return dist.levels/(mu*(dist.levels - mu)) class LogLink(Link): def __init__(self): super(LogLink, self).__init__(name='log') def link(self, mu, dist): return np.log(mu) def mu(self, lp, dist): return np.exp(lp) def gradient(self, mu, dist): return 1. / mu class InverseLink(Link): def __init__(self): super(InverseLink, self).__init__(name='inverse') def link(self, mu, dist): return mu ** -1. def mu(self, lp, dist): return lp ** -1. def gradient(self, mu, dist): return -1 * mu**-2. class InvSquaredLink(Link):
Apache License 2.0
cohesity/management-sdk-python
cohesity_management_sdk/models/object_search_results.py
ObjectSearchResults.__init__
python
def __init__(self, object_snapshot_info=None, total_count=None): self.object_snapshot_info = object_snapshot_info self.total_count = total_count
Constructor for the ObjectSearchResults class
https://github.com/cohesity/management-sdk-python/blob/1c085d5a10f5f1a87b700e7ad1fc1dcabda41ae5/cohesity_management_sdk/models/object_search_results.py#L31-L38
import cohesity_management_sdk.models.object_snapshot_info class ObjectSearchResults(object): _names = { "object_snapshot_info":'objectSnapshotInfo', "total_count":'totalCount' }
Apache License 2.0
pytorchlightning/metrics
torchmetrics/functional/pairwise/helpers.py
_reduce_distance_matrix
python
def _reduce_distance_matrix(distmat: Tensor, reduction: Optional[str] = None) -> Tensor: if reduction == "mean": return distmat.mean(dim=-1) if reduction == "sum": return distmat.sum(dim=-1) if reduction is None or reduction == "none": return distmat raise ValueError(f"Expected reduction to be one of `['mean', 'sum', None]` but got {reduction}")
Final reduction of distance matrix. Args: distance: a ``[N,M]`` matrix reduction: string determining how to reduce along last dimension
https://github.com/pytorchlightning/metrics/blob/8242b0bdee70432b932084111e365b075d8f404f/torchmetrics/functional/pairwise/helpers.py#L46-L59
from typing import Optional, Tuple from torch import Tensor def _check_input( x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None ) -> Tuple[Tensor, Tensor, bool]: if x.ndim != 2: raise ValueError(f"Expected argument `x` to be a 2D tensor of shape `[N, d]` but got {x.shape}") if y is not None: if y.ndim != 2 or y.shape[1] != x.shape[1]: raise ValueError( "Expected argument `y` to be a 2D tensor of shape `[M, d]` where" " `d` should be same as the last dimension of `x`" ) zero_diagonal = False if zero_diagonal is None else zero_diagonal else: y = x.clone() zero_diagonal = True if zero_diagonal is None else zero_diagonal return x, y, zero_diagonal
Apache License 2.0
nuagenetworks/vspk-python
vspk/v5_0/nuapplication.py
NUApplication.read_only
python
def read_only(self, value): self._read_only = value
Set read_only value. Notes: determines whether this entity is read only. Read only objects cannot be modified or deleted. This attribute is named `readOnly` in VSD API.
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v5_0/nuapplication.py#L301-L311
from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from .fetchers import NUMonitorscopesFetcher from .fetchers import NUApplicationBindingsFetcher from bambou import NURESTObject class NUApplication(NURESTObject): __rest_name__ = "application" __resource_name__ = "applications" CONST_POST_CLASSIFICATION_PATH_ANY = "ANY" CONST_PROTOCOL_NONE = "NONE" CONST_PERFORMANCE_MONITOR_TYPE_FIRST_PACKET = "FIRST_PACKET" CONST_PRE_CLASSIFICATION_PATH_PRIMARY = "PRIMARY" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_PRE_CLASSIFICATION_PATH_SECONDARY = "SECONDARY" CONST_PERFORMANCE_MONITOR_TYPE_CONTINUOUS = "CONTINUOUS" CONST_OPTIMIZE_PATH_SELECTION_PACKETLOSS = "PACKETLOSS" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" CONST_OPTIMIZE_PATH_SELECTION_LATENCY = "LATENCY" CONST_OPTIMIZE_PATH_SELECTION_JITTER = "JITTER" CONST_PROTOCOL_UDP = "UDP" CONST_POST_CLASSIFICATION_PATH_PRIMARY = "PRIMARY" CONST_POST_CLASSIFICATION_PATH_SECONDARY = "SECONDARY" CONST_PERFORMANCE_MONITOR_TYPE_FIRST_PACKET_AND_CONTINUOUS = "FIRST_PACKET_AND_CONTINUOUS" CONST_PROTOCOL_TCP = "TCP" CONST_PRE_CLASSIFICATION_PATH_DEFAULT = "DEFAULT" def __init__(self, **kwargs): super(NUApplication, self).__init__() self._dscp = None self._name = None self._bandwidth = None self._last_updated_by = None self._read_only = None self._performance_monitor_type = None self._certificate_common_name = None self._description = None self._destination_ip = None self._destination_port = None self._network_symmetry = None self._enable_pps = None self._one_way_delay = None self._one_way_jitter = None self._one_way_loss = None self._entity_scope = None self._post_classification_path = None self._source_ip = None self._source_port = None self._app_id = None self._optimize_path_selection = None self._pre_classification_path = None self._protocol = None self._associated_l7_application_signature_id = None self._ether_type = None self._external_id = None self._symmetry = None self.expose_attribute(local_name="dscp", remote_name="DSCP", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="bandwidth", remote_name="bandwidth", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="read_only", remote_name="readOnly", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="performance_monitor_type", remote_name="performanceMonitorType", attribute_type=str, is_required=False, is_unique=False, choices=[u'CONTINUOUS', u'FIRST_PACKET', u'FIRST_PACKET_AND_CONTINUOUS']) self.expose_attribute(local_name="certificate_common_name", remote_name="certificateCommonName", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="destination_ip", remote_name="destinationIP", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="destination_port", remote_name="destinationPort", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="network_symmetry", remote_name="networkSymmetry", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="enable_pps", remote_name="enablePPS", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="one_way_delay", remote_name="oneWayDelay", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="one_way_jitter", remote_name="oneWayJitter", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="one_way_loss", remote_name="oneWayLoss", attribute_type=float, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="post_classification_path", remote_name="postClassificationPath", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'PRIMARY', u'SECONDARY']) self.expose_attribute(local_name="source_ip", remote_name="sourceIP", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="source_port", remote_name="sourcePort", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="app_id", remote_name="appId", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="optimize_path_selection", remote_name="optimizePathSelection", attribute_type=str, is_required=False, is_unique=False, choices=[u'JITTER', u'LATENCY', u'PACKETLOSS']) self.expose_attribute(local_name="pre_classification_path", remote_name="preClassificationPath", attribute_type=str, is_required=False, is_unique=False, choices=[u'DEFAULT', u'PRIMARY', u'SECONDARY']) self.expose_attribute(local_name="protocol", remote_name="protocol", attribute_type=str, is_required=False, is_unique=False, choices=[u'NONE', u'TCP', u'UDP']) self.expose_attribute(local_name="associated_l7_application_signature_id", remote_name="associatedL7ApplicationSignatureID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="ether_type", remote_name="etherType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name="symmetry", remote_name="symmetry", attribute_type=bool, is_required=False, is_unique=False) self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.monitorscopes = NUMonitorscopesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.application_bindings = NUApplicationBindingsFetcher.fetcher_with_object(parent_object=self, relationship="member") self._compute_args(**kwargs) @property def dscp(self): return self._dscp @dscp.setter def dscp(self, value): self._dscp = value @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def bandwidth(self): return self._bandwidth @bandwidth.setter def bandwidth(self, value): self._bandwidth = value @property def last_updated_by(self): return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): self._last_updated_by = value @property def read_only(self): return self._read_only @read_only.setter
BSD 3-Clause New or Revised License
cmu-delphi/delphi-epidata
src/acquisition/covidcast/csv_importer.py
CsvImporter.load_csv
python
def load_csv(filepath, geo_type, pandas=pandas): logger = get_structured_logger('load_csv') try: table = pandas.read_csv(filepath, dtype=CsvImporter.DTYPES) except ValueError as e: logger.warning(event='Failed to open CSV with specified dtypes, switching to str', detail=str(e), file=filepath) table = pandas.read_csv(filepath, dtype='str') if not CsvImporter.is_header_valid(table.columns): logger.warning(event='invalid header', detail=table.columns, file=filepath) yield None return table.rename(columns={"val": "value", "se": "stderr", "missing_val": "missing_value", "missing_se": "missing_stderr"}, inplace=True) for row in table.itertuples(index=False): row_values, error = CsvImporter.extract_and_check_row(row, geo_type, filepath) if error: logger.warning(event = 'invalid value for row', detail=(str(row), error), file=filepath) yield None continue yield row_values
Load, validate, and yield data as `RowValues` from a CSV file. filepath: the CSV file to be loaded geo_type: the geographic resolution (e.g. county) In case of a validation error, `None` is yielded for the offending row, including the header.
https://github.com/cmu-delphi/delphi-epidata/blob/e0c4cf81c940b55ee7efbd2ed335deb3062e54ec/src/acquisition/covidcast/csv_importer.py#L341-L371
from datetime import date import glob import math import os import re import pandas import epiweeks as epi from delphi_utils import Nans from delphi.utils.epiweek import delta_epiweeks from delphi.epidata.acquisition.covidcast.logger import get_structured_logger class CsvImporter: PATTERN_DAILY = re.compile(r'^.*/([^/]*)/(\d{8})_(\w+?)_(\w+)\.csv$') PATTERN_WEEKLY = re.compile(r'^.*/([^/]*)/weekly_(\d{6})_(\w+?)_(\w+)\.csv$') PATTERN_ISSUE_DIR = re.compile(r'^.*/([^/]*)/issue_(\d{8})$') GEOGRAPHIC_RESOLUTIONS = {'county', 'hrr', 'msa', 'dma', 'state', 'hhs', 'nation'} REQUIRED_COLUMNS = {'geo_id', 'val', 'se', 'sample_size'} MIN_YEAR = 2019 MAX_YEAR = 2030 DTYPES = { "geo_id": str, "val": float, "se": float, "sample_size": float, "missing_val": int, "missing_se": int, "missing_sample_size": int } class RowValues: def __init__(self, geo_value, value, stderr, sample_size, missing_value, missing_stderr, missing_sample_size): self.geo_value = geo_value self.value = value self.stderr = stderr self.sample_size = sample_size self.missing_value = missing_value self.missing_stderr = missing_stderr self.missing_sample_size = missing_sample_size @staticmethod def is_sane_day(value): year, month, day = value // 10000, (value % 10000) // 100, value % 100 nearby_year = CsvImporter.MIN_YEAR <= year <= CsvImporter.MAX_YEAR valid_month = 1 <= month <= 12 sensible_day = 1 <= day <= 31 if not (nearby_year and valid_month and sensible_day): return False return date(year=year,month=month,day=day) @staticmethod def is_sane_week(value): year, week = value // 100, value % 100 nearby_year = CsvImporter.MIN_YEAR <= year <= CsvImporter.MAX_YEAR sensible_week = 1 <= week <= 53 if not (nearby_year and sensible_week): return False return value @staticmethod def find_issue_specific_csv_files(scan_dir, glob=glob): logger = get_structured_logger('find_issue_specific_csv_files') for path in sorted(glob.glob(os.path.join(scan_dir, '*'))): issuedir_match = CsvImporter.PATTERN_ISSUE_DIR.match(path.lower()) if issuedir_match and os.path.isdir(path): issue_date_value = int(issuedir_match.group(2)) issue_date = CsvImporter.is_sane_day(issue_date_value) if issue_date: logger.info(event='processing csv files from issue', detail=issue_date, file=path) yield from CsvImporter.find_csv_files(path, issue=(issue_date, epi.Week.fromdate(issue_date)), glob=glob) else: logger.warning(event='invalid issue directory day', detail=issue_date_value, file=path) @staticmethod def find_csv_files(scan_dir, issue=(date.today(), epi.Week.fromdate(date.today())), glob=glob): logger = get_structured_logger('find_csv_files') issue_day,issue_epiweek=issue issue_day_value=int(issue_day.strftime("%Y%m%d")) issue_epiweek_value=int(str(issue_epiweek)) issue_value=-1 lag_value=-1 for path in sorted(glob.glob(os.path.join(scan_dir, '*', '*'))): if not path.lower().endswith('.csv'): continue daily_match = CsvImporter.PATTERN_DAILY.match(path.lower()) weekly_match = CsvImporter.PATTERN_WEEKLY.match(path.lower()) if not daily_match and not weekly_match: logger.warning(event='invalid csv path/filename', detail=path, file=path) yield (path, None) continue if daily_match: time_type = 'day' time_value = int(daily_match.group(2)) match = daily_match time_value_day = CsvImporter.is_sane_day(time_value) if not time_value_day: logger.warning(event='invalid filename day', detail=time_value, file=path) yield (path, None) continue issue_value=issue_day_value lag_value=(issue_day-time_value_day).days else: time_type = 'week' time_value = int(weekly_match.group(2)) match = weekly_match time_value_week=CsvImporter.is_sane_week(time_value) if not time_value_week: logger.warning(event='invalid filename week', detail=time_value, file=path) yield (path, None) continue issue_value=issue_epiweek_value lag_value=delta_epiweeks(time_value_week, issue_epiweek_value) geo_type = match.group(3).lower() if geo_type not in CsvImporter.GEOGRAPHIC_RESOLUTIONS: logger.warning(event='invalid geo_type', detail=geo_type, file=path) yield (path, None) continue source = match.group(1).lower() signal = match.group(4).lower() if len(signal) > 64: logger.warning(event='invalid signal name (64 char limit)',detail=signal, file=path) yield (path, None) continue yield (path, (source, signal, time_type, geo_type, time_value, issue_value, lag_value)) @staticmethod def is_header_valid(columns): return set(columns) >= CsvImporter.REQUIRED_COLUMNS @staticmethod def floaty_int(value): float_value = float(value) if not float_value.is_integer(): raise ValueError('not an int: "%s"' % str(value)) return int(float_value) @staticmethod def maybe_apply(func, quantity): if str(quantity).lower() in ('inf', '-inf'): raise ValueError("Quantity given was an inf.") elif str(quantity).lower() in ('', 'na', 'nan', 'none'): return None else: return func(quantity) @staticmethod def validate_quantity(row, attr_quantity): try: quantity = CsvImporter.maybe_apply(float, getattr(row, attr_quantity)) return quantity except (ValueError, AttributeError) as e: return "Error" @staticmethod def validate_missing_code(row, attr_quantity, attr_name, filepath=None, logger=None): logger = get_structured_logger('load_csv') if logger is None else logger missing_entry = getattr(row, "missing_" + attr_name, None) try: missing_entry = CsvImporter.floaty_int(missing_entry) except (ValueError, TypeError): missing_entry = None if missing_entry is None and attr_quantity is not None: return Nans.NOT_MISSING.value if missing_entry is None and attr_quantity is None: return Nans.OTHER.value if missing_entry != Nans.NOT_MISSING.value and attr_quantity is not None: logger.warning(event = f"missing_{attr_name} column contradicting {attr_name} presence.", detail = (str(row)), file = filepath) return Nans.NOT_MISSING.value if missing_entry == Nans.NOT_MISSING.value and attr_quantity is None: logger.warning(event = f"missing_{attr_name} column contradicting {attr_name} presence.", detail = (str(row)), file = filepath) return Nans.OTHER.value return missing_entry @staticmethod def extract_and_check_row(row, geo_type, filepath=None): try: geo_id = row.geo_id.lower() except AttributeError as e: return (None, 'geo_id') if geo_type in ('hrr', 'msa', 'dma', 'hhs'): try: geo_id = str(CsvImporter.floaty_int(geo_id)) except ValueError: return (None, 'geo_id') if geo_type == 'county': if len(geo_id) != 5 or not '01000' <= geo_id <= '80000': return (None, 'geo_id') elif geo_type == 'hrr': if not 1 <= int(geo_id) <= 500: return (None, 'geo_id') elif geo_type == 'msa': if len(geo_id) != 5 or not '10000' <= geo_id <= '99999': return (None, 'geo_id') elif geo_type == 'dma': if not 450 <= int(geo_id) <= 950: return (None, 'geo_id') elif geo_type == 'state': if len(geo_id) != 2 or not 'aa' <= geo_id <= 'zz': return (None, 'geo_id') elif geo_type == 'hhs': if not 1 <= int(geo_id) <= 10: return (None, 'geo_id') elif geo_type == 'nation': if len(geo_id) != 2 or not 'aa' <= geo_id <= 'zz': return (None, 'geo_id') else: return (None, 'geo_type') value = CsvImporter.validate_quantity(row, "value") if value == "Error": return (None, 'value') stderr = CsvImporter.validate_quantity(row, "stderr") if stderr == "Error" or (stderr is not None and stderr < 0): return (None, 'stderr') sample_size = CsvImporter.validate_quantity(row, "sample_size") if sample_size == "Error" or (sample_size is not None and sample_size < 0): return (None, 'sample_size') missing_value = CsvImporter.validate_missing_code(row, value, "value", filepath) missing_stderr = CsvImporter.validate_missing_code(row, stderr, "stderr", filepath) missing_sample_size = CsvImporter.validate_missing_code(row, sample_size, "sample_size", filepath) row_values = CsvImporter.RowValues( geo_id, value, stderr, sample_size, missing_value, missing_stderr, missing_sample_size ) return (row_values, None) @staticmethod
MIT License
mars-project/mars
mars/tensor/arithmetic/arctan2.py
arctan2
python
def arctan2(x1, x2, out=None, where=None, **kwargs): op = TensorArctan2(**kwargs) return op(x1, x2, out=out, where=where)
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly. The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is the signed angle in radians between the ray ending at the origin and passing through the point (1,0), and the ray ending at the origin and passing through the point (`x2`, `x1`). (Note the role reversal: the "`y`-coordinate" is the first function parameter, the "`x`-coordinate" is the second.) By IEEE convention, this function is defined for `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see Notes for specific values). This function is not defined for complex-valued arguments; for the so-called argument of complex values, use `angle`. Parameters ---------- x1 : array_like, real-valued `y`-coordinates. x2 : array_like, real-valued `x`-coordinates. `x2` must be broadcastable to match the shape of `x1` or vice versa. out : Tensor, None, or tuple of Tensor and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated tensor is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. where : array_like, optional Values of True indicate to calculate the ufunc at that position, values of False indicate to leave the value in the output alone. **kwargs Returns ------- angle : Tensor Array of angles in radians, in the range ``[-pi, pi]``. See Also -------- arctan, tan, angle Notes ----- *arctan2* is identical to the `atan2` function of the underlying C library. The following special values are defined in the C standard: [1]_ ====== ====== ================ `x1` `x2` `arctan2(x1,x2)` ====== ====== ================ +/- 0 +0 +/- 0 +/- 0 -0 +/- pi > 0 +/-inf +0 / +pi < 0 +/-inf -0 / -pi +/-inf +inf +/- (pi/4) +/-inf -inf +/- (3*pi/4) ====== ====== ================ Note that +0 and -0 are distinct floating point numbers, as are +inf and -inf. References ---------- .. [1] ISO/IEC standard 9899:1999, "Programming language C." Examples -------- Consider four points in different quadrants: >>> import mars.tensor as mt >>> x = mt.array([-1, +1, +1, -1]) >>> y = mt.array([-1, -1, +1, +1]) >>> (mt.arctan2(y, x) * 180 / mt.pi).execute() array([-135., -45., 45., 135.]) Note the order of the parameters. `arctan2` is defined also when `x2` = 0 and at several other special points, obtaining values in the range ``[-pi, pi]``: >>> mt.arctan2([1., -1.], [0., 0.]).execute() array([ 1.57079633, -1.57079633]) >>> mt.arctan2([0., 0., mt.inf], [+0., -0., mt.inf]).execute() array([ 0. , 3.14159265, 0.78539816])
https://github.com/mars-project/mars/blob/d50d9f8d8e966756e8b9dc80aca53a3e4607e7e0/mars/tensor/arithmetic/arctan2.py#L42-L128
import numpy as np from ... import opcodes as OperandDef from ..utils import infer_dtype from .core import TensorBinOp from .utils import arithmetic_operand @arithmetic_operand class TensorArctan2(TensorBinOp): _op_type_ = OperandDef.ARCTAN2 _func_name = "arctan2" @classmethod def _is_sparse(cls, x1, x2): if hasattr(x1, "issparse") and x1.issparse(): return True elif np.isscalar(x1) and x1 == 0: return x2.issparse() if hasattr(x2, "issparse") else False return False @infer_dtype(np.arctan2)
Apache License 2.0
ivy-dl/ivy
ivy/core/general.py
array_equal
python
def array_equal(x0: Union[ivy.Array, ivy.NativeArray], x1: Union[ivy.Array, ivy.NativeArray], f: ivy.Framework = None) -> bool: return _cur_framework(x0, f=f).array_equal(x0, x1)
Determines whether two input arrays are equal across all elements. :param x0: The first input array to compare. :type x0: array :param x1: The second input array to compare. :type x1: array :param f: Machine learning framework. Inferred from inputs if None. :type f: ml_framework, optional :return: Boolean, whether or not the input arrays are equal across all elements.
https://github.com/ivy-dl/ivy/blob/52cc1dddba0638f25e48fb03c0b3a20d637d4ce3/ivy/core/general.py#L78-L91
import math import einops import numpy as np from numbers import Number from typing import Callable, Any, Union, List, Tuple, Dict, Iterable import ivy from ivy.core.device import dev_str from ivy.framework_handler import current_framework as _cur_framework FN_CACHE = dict() INF = float('inf') TIMEOUT = 10.0 def array(object_in: Union[List, np.ndarray, ivy.Array, ivy.NativeArray], dtype_str: str = None, dev_str: str = None, f: ivy.Framework = None) -> Union[ivy.Array, ivy.NativeArray]: return _cur_framework(object_in, f=f).array(object_in, dtype_str, dev_str) def is_array(x: Any, exclusive: bool = False, f: ivy.Framework = None) -> bool: try: return _cur_framework(x, f=f).is_array(x, exclusive) except ValueError: return False def copy_array(x: Union[ivy.Array, ivy.NativeArray], f: ivy.Framework = None) -> Union[ivy.Array, ivy.NativeArray]: return _cur_framework(x, f=f).copy_array(x)
Apache License 2.0
hsdp/python-cf-api
cf_api/deploy_manifest.py
Deploy._create_app_params
python
def _create_app_params(self): self._assert_org_and_space() self._get_stack() params = dict( name=self.name, space_guid=self._space.guid, ) if self.stack: params['stack_guid'] = self._stack.guid if self.buildpack: params['buildpack'] = self.buildpack if self.env: params['environment_json'] = self.env if self.command: params['command'] = self.command if self.docker_image: username = self.docker_image.get('username') password = self.docker_image.get( 'password', os.getenv('CF_DOCKER_PASSWORD')) params['docker_image'] = self.docker_image.get('image') params['docker_credentials'] = { 'username': username, 'password': password, } if not self.no_route: params['health_check_type'] = self.health_check_type or 'port' params['health_check_timeout'] = self.health_check_timeout or 60 else: if self.health_check_type: params['health_check_type'] = self.health_check_type if self.health_check_timeout: params['health_check_timeout'] = self.health_check_timeout params['instances'] = self.instances or 2 params['memory'] = self.memory or 512 params['disk_quota'] = self.disk_quota or 512 return params
Assembles a POST body (dict) of values to be sent in creating the app based on the app manifest dictionary.
https://github.com/hsdp/python-cf-api/blob/13fc605e2ea3b5c09cc8a556c58e8c36ae290c8c/cf_api/deploy_manifest.py#L385-L425
from __future__ import print_function import argparse import fnmatch import hashlib import json import os import random import re import string import zipfile import sys import time import yaml import logging import signal import traceback from getpass import getpass from uuid import uuid4 import cf_api from . import logs_util from . import routes_util from . import dropsonde_util from . import exceptions as exc manifest_map = { 'health_check_timeout': 'timeout', 'health_check_type': 'health-check-type', 'no_hostname': 'no-hostname', 'random_route': 'random-route', 'no_route': 'no-route', 'docker_image': 'docker', } logger = logging.getLogger('cf_api.deploy_manifest') logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) class Deploy(object): _manifest_dict = None _manifest_filename = None _existing_app = None _existing_services = None _upload_job = None _space = None _org = None _app = None _stack = None _domain = None _service_instances = None _domains = None _source_dirname = None _tailing_thread = None is_debug = False def __init__(self, cc, manifest_filename, **manifest): self._cc = cc self._manifest_filename = manifest_filename self._manifest_dict = manifest self._service_instances = {} def __getattr__(self, item): item = manifest_map.get(item, item) value = self._manifest_dict.get(item, None) if 'memory' == item: value = to_mb(value) elif 'disk_quota' == item: value = to_mb(value) return value def clone(self, new_name): cl = Deploy(self._cc, self._manifest_filename, **self._manifest_dict) cl._manifest_dict['name'] = new_name cl._space = self._space cl._org = self._org cl.is_debug = self.is_debug return cl def set_org_and_space(self, org_name, space_name): res = self._cc.organizations().get_by_name(org_name) self._assert_no_error(res) self._org = res.resource res = self._cc.request(self._org.spaces_url).get_by_name(space_name) self._assert_no_error(res) self._space = res.resource return self def set_org_and_space_dicts(self, org_dict, space_dict): self._space = space_dict self._org = org_dict return self def set_source_dirname(self, dirname): self._source_dirname = dirname return self def set_debug(self, is_debug): self.is_debug = is_debug return self def log(self, *args, **kwargs): if self.is_debug: return log(*args, **kwargs) def _assert_org_and_space(self): if not self._org: raise exc.InvalidArgsException( 'Org is required to get the app', 500) if not self._space: raise exc.InvalidArgsException( 'Space is required to get the app', 500) def _get_source_dirname(self): if self._source_dirname: return self._source_dirname manifest_dirname = os.path.dirname(self._manifest_filename) if self.path: path = self.path if not path.startswith(os.path.sep): path = os.path.join(manifest_dirname, path) if os.path.isdir(path): self._source_dirname = os.path.normpath(path) if not self._source_dirname: self._source_dirname = manifest_dirname if not self._source_dirname.endswith(os.path.sep): self._source_dirname += os.path.sep return self._source_dirname def _get_archive_filename(self): if self._archive_filename: return self._archive_filename if self.path: path = self.path if not path.startswith(os.path.sep): manifest_dirname = os.path.dirname(self._manifest_filename) path = os.path.join(manifest_dirname, path) if os.path.isfile(path): self._archive_filename = os.path.normpath(path) return path filename = os.path.basename(self.name) + '-' + str(uuid4()) + '.zip' self._archive_filename = os.path.normpath(os.path.join(os.path.sep, 'tmp', filename)) return self._archive_filename def _cleanup_archive(self): if not self._archive_filename: raise exc.InvalidStateException( 'Archive has not been created!', 500) filename = self._get_archive_filename() if os.path.isfile(filename): os.unlink(filename) def _get_stack(self): self.log('get stack', self.stack) if self._stack is not None: return self._stack self._stack = self._cc.stacks().get_by_name(self.stack).resource return self._stack def _get_primary_domain(self): if self._domain is not None: return self._domain self._domain = self._cc.shared_domains().get().resource return self._domain def _get_domain(self, name=None, guid=None): if self._domains is None: self._domains = {} if name in self._domains: return self._domains[name] else: for domain in self._domains.values(): if domain.guid == guid: return domain try: domain = self._cc.shared_domains().get_by_name(name).resource except exc.ResponseException as e: print(str(e)) domain = None if not domain: domain = self._cc.request('private_domains') .get_by_name(name).resource self._domains[name] = domain return domain def _get_app(self, use_cache=True): self.log('get app', self.name) self._assert_org_and_space() if self._app is not None and use_cache: return self._app res = self._cc.request(self._space.apps_url).get_by_name(self.name) if res.has_error: if 'not found' in res.error_message: return None else: res.raise_error() self._app = res.resource return res.resource def _search_routes(self, routes_url, host=None, domain_guid=None, domain_name=None, port=None, path=None, **kwargs): if domain_name is not None and domain_guid is not None: raise exc.InvalidArgsException('domain_name and domain_guid may ' 'not both be set') if domain_guid is None: if domain_name is not None: domain = self._get_domain(name=domain_name) else: domain = self._get_primary_domain() domain_name = domain.name domain_guid = domain.guid args = ['domain_guid', domain_guid, 'host', host] if port is not None: args.extend(['port', port]) if path is not None: args.extend(['path', path]) self.log('searching routes', format_route( host=host, domain_name=domain_name, port=port, path=path, )) return self._cc.request(routes_url).search(*args).resources def _get_service_instances(self, name): if self._service_instances is not None and name in self._service_instances: return self._service_instances[name] res = self._cc.request(self._space.service_instances_url) .get_by_name(name) self._service_instances[name] = res.resource return self._service_instances[name]
Apache License 2.0
udst/urbansim
urbansim/developer/developer.py
Developer.merge
python
def merge(old_df, new_df, return_index=False): maxind = np.max(old_df.index.values) new_df = new_df.reset_index(drop=True) new_df.index = new_df.index + maxind + 1 concat_df = pd.concat([old_df, new_df], verify_integrity=True) concat_df.index.name = 'building_id' if return_index: return concat_df, new_df.index return concat_df
Merge two dataframes of buildings. The old dataframe is usually the buildings dataset and the new dataframe is a modified (by the user) version of what is returned by the pick method. Parameters ---------- old_df : dataframe Current set of buildings new_df : dataframe New buildings to add, usually comes from this module return_index : bool If return_index is true, this method will return the new index of new_df (which changes in order to create a unique index after the merge) Returns ------- df : dataframe Combined DataFrame of buildings, makes sure indexes don't overlap index : pd.Index If and only if return_index is True, return the new index for the new_df dataframe (which changes in order to create a unique index after the merge)
https://github.com/udst/urbansim/blob/0db75668ada0005352b7c7e0a405265f78ccadd7/urbansim/developer/developer.py#L234-L269
from __future__ import print_function from __future__ import division import pandas as pd import numpy as np class Developer(object): def __init__(self, feasibility): if isinstance(feasibility, dict): feasibility = pd.concat(feasibility.values(), keys=feasibility.keys(), axis=1) self.feasibility = feasibility @staticmethod def _max_form(f, colname): df = f.stack(level=0)[[colname]].stack().unstack(level=1).reset_index(level=1, drop=True) return df.idxmax(axis=1) def keep_form_with_max_profit(self, forms=None): f = self.feasibility if forms is not None: f = f[forms] if len(f) > 0: mu = self._max_form(f, "max_profit") indexes = [tuple(x) for x in mu.reset_index().values] else: indexes = [] df = f.stack(level=0).loc[indexes] df.index.names = ["parcel_id", "form"] df = df.reset_index(level=1) return df @staticmethod def compute_units_to_build(num_agents, num_units, target_vacancy): print("Number of agents: {:,}".format(num_agents)) print("Number of agent spaces: {:,}".format(int(num_units))) assert target_vacancy < 1.0 target_units = int(max(num_agents / (1 - target_vacancy) - num_units, 0)) print("Current vacancy = {:.2f}" .format(1 - num_agents / float(num_units))) print("Target vacancy = {:.2f}, target of new units = {:,}" .format(target_vacancy, target_units)) return target_units def pick(self, form, target_units, parcel_size, ave_unit_size, current_units, max_parcel_size=200000, min_unit_size=400, drop_after_build=True, residential=True, bldg_sqft_per_job=400.0, profit_to_prob_func=None): if len(self.feasibility) == 0: return if form is None: df = self.feasibility elif isinstance(form, list): df = self.keep_form_with_max_profit(form) else: df = self.feasibility[form] df = df[df.max_profit_far > 0] ave_unit_size[ave_unit_size < min_unit_size] = min_unit_size df["ave_unit_size"] = ave_unit_size df["parcel_size"] = parcel_size df['current_units'] = current_units df = df[df.parcel_size < max_parcel_size] df['residential_units'] = (df.residential_sqft / df.ave_unit_size).round() df['job_spaces'] = (df.non_residential_sqft / bldg_sqft_per_job).round() if residential: df['net_units'] = df.residential_units - df.current_units else: df['net_units'] = df.job_spaces - df.current_units df = df[df.net_units > 0] if len(df) == 0: print("WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM") return print("Sum of net units that are profitable: {:,}" .format(int(df.net_units.sum()))) if profit_to_prob_func: p = profit_to_prob_func(df) else: df['max_profit_per_size'] = df.max_profit / df.parcel_size p = df.max_profit_per_size.values / df.max_profit_per_size.sum() if df.net_units.sum() < target_units: print("WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO", "MATCH DEMAND") build_idx = df.index.values elif target_units <= 0: build_idx = [] else: choices = np.random.choice(df.index.values, size=min(len(df.index), target_units), replace=False, p=p) tot_units = df.net_units.loc[choices].values.cumsum() ind = int(np.searchsorted(tot_units, target_units, side="left")) + 1 build_idx = choices[:ind] if drop_after_build: self.feasibility = self.feasibility.drop(build_idx) new_df = df.loc[build_idx] new_df.index.name = "parcel_id" return new_df.reset_index() @staticmethod
BSD 3-Clause New or Revised License
gofrendiasgard/kokoropy
kokoropy/packages/sqlalchemy/interfaces.py
ConnectionProxy.rollback_savepoint
python
def rollback_savepoint(self, conn, rollback_savepoint, name, context): return rollback_savepoint(name, context)
Intercept rollback_savepoint() events.
https://github.com/gofrendiasgard/kokoropy/blob/49c8ca4b7dd2a084f2ced33fc5987b8a8b62c995/kokoropy/packages/sqlalchemy/interfaces.py#L284-L287
from . import event, util class PoolListener(object): @classmethod def _adapt_listener(cls, self, listener): listener = util.as_interface(listener, methods=('connect', 'first_connect', 'checkout', 'checkin')) if hasattr(listener, 'connect'): event.listen(self, 'connect', listener.connect) if hasattr(listener, 'first_connect'): event.listen(self, 'first_connect', listener.first_connect) if hasattr(listener, 'checkout'): event.listen(self, 'checkout', listener.checkout) if hasattr(listener, 'checkin'): event.listen(self, 'checkin', listener.checkin) def connect(self, dbapi_con, con_record): def first_connect(self, dbapi_con, con_record): def checkout(self, dbapi_con, con_record, con_proxy): def checkin(self, dbapi_con, con_record): class ConnectionProxy(object): @classmethod def _adapt_listener(cls, self, listener): def adapt_execute(conn, clauseelement, multiparams, params): def execute_wrapper(clauseelement, *multiparams, **params): return clauseelement, multiparams, params return listener.execute(conn, execute_wrapper, clauseelement, *multiparams, **params) event.listen(self, 'before_execute', adapt_execute) def adapt_cursor_execute(conn, cursor, statement, parameters, context, executemany): def execute_wrapper( cursor, statement, parameters, context, ): return statement, parameters return listener.cursor_execute( execute_wrapper, cursor, statement, parameters, context, executemany, ) event.listen(self, 'before_cursor_execute', adapt_cursor_execute) def do_nothing_callback(*arg, **kw): pass def adapt_listener(fn): def go(conn, *arg, **kw): fn(conn, do_nothing_callback, *arg, **kw) return util.update_wrapper(go, fn) event.listen(self, 'begin', adapt_listener(listener.begin)) event.listen(self, 'rollback', adapt_listener(listener.rollback)) event.listen(self, 'commit', adapt_listener(listener.commit)) event.listen(self, 'savepoint', adapt_listener(listener.savepoint)) event.listen(self, 'rollback_savepoint', adapt_listener(listener.rollback_savepoint)) event.listen(self, 'release_savepoint', adapt_listener(listener.release_savepoint)) event.listen(self, 'begin_twophase', adapt_listener(listener.begin_twophase)) event.listen(self, 'prepare_twophase', adapt_listener(listener.prepare_twophase)) event.listen(self, 'rollback_twophase', adapt_listener(listener.rollback_twophase)) event.listen(self, 'commit_twophase', adapt_listener(listener.commit_twophase)) def execute(self, conn, execute, clauseelement, *multiparams, **params): return execute(clauseelement, *multiparams, **params) def cursor_execute(self, execute, cursor, statement, parameters, context, executemany): return execute(cursor, statement, parameters, context) def begin(self, conn, begin): return begin() def rollback(self, conn, rollback): return rollback() def commit(self, conn, commit): return commit() def savepoint(self, conn, savepoint, name=None): return savepoint(name=name)
MIT License
z-zheng/simplecv
simplecv/data/seg_transforms.py
THRandomCrop.__call__
python
def __call__(self, images, masks=None): im_h, im_w, _ = images.shape c_h, c_w = self.crop_size pad_h = c_h - im_h pad_w = c_w - im_w if pad_h > 0 or pad_w > 0: images = F.pad(images, [0, 0, 0, max(pad_w, 0), 0, max(pad_h, 0)], mode='constant', value=0) masks = F.pad(masks, [0, max(pad_w, 0), 0, max(pad_h, 0)], mode='constant', value=0) im_h, im_w, _ = images.shape y_lim = im_h - c_h + 1 x_lim = im_w - c_w + 1 ymin = int(np.random.randint(0, y_lim, 1)) xmin = int(np.random.randint(0, x_lim, 1)) xmax = xmin + c_w ymax = ymin + c_h ret = list() images_tensor = images[ymin:ymax, xmin:xmax, :] ret.append(images_tensor) if masks is not None: masks_tensor = masks[ymin:ymax, xmin:xmax] ret.append(masks_tensor) return ret
Args: images: 3-D tensor of shape [height, width, channel] masks: 2-D tensor of shape [height, width] Returns: images_tensor masks_tensor
https://github.com/z-zheng/simplecv/blob/4fa67581441ad150e82b3aa2c394a921f74e4ecd/simplecv/data/seg_transforms.py#L120-L155
import numpy as np import torch import torch.nn.functional as F from simplecv.util import tensor_util from simplecv.data import preprocess class THToTensor(object): def __call__(self, images, masks): return tensor_util.to_tensor([images, masks]) class THNormalize(object): def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, images, masks=None): images_tensor = preprocess.mean_std_normalize(images, self.mean, self.std) return images_tensor, masks class THRandomRotate90k(object): def __init__(self, p=0.5, k=None): self.p = p self.k = k def __call__(self, images, masks=None): k = int(np.random.choice([1, 2, 3], 1)[0]) if self.k is None else self.k ret = list() images_tensor = torch.rot90(images, k, [0, 1]) ret.append(images_tensor) if masks is not None: masks_tensor = torch.rot90(masks, k, [0, 1]) ret.append(masks_tensor) return ret if len(ret) > 1 else ret[0] class THRandomHorizontalFlip(object): def __init__(self, p=0.5): self.p = p def __call__(self, images, masks=None): ret = list() if self.p < np.random.uniform(): ret.append(images) if masks is not None: ret.append(masks) return ret if len(ret) > 1 else ret[0] images_tensor = torch.flip(images, [1]) ret.append(images_tensor) if masks is not None: masks_tensor = torch.flip(masks, [1]) ret.append(masks_tensor) return ret if len(ret) > 1 else ret[0] class THRandomVerticalFlip(object): def __init__(self, p=0.5): self.p = p def __call__(self, images, masks=None): ret = list() if self.p < np.random.uniform(): ret.append(images) if masks is not None: ret.append(masks) return ret if len(ret) > 1 else ret[0] images_tensor = torch.flip(images, [0]) ret.append(images_tensor) if masks is not None: masks_tensor = torch.flip(masks, [0]) ret.append(masks_tensor) return ret if len(ret) > 1 else ret[0] class THRandomCrop(object): def __init__(self, crop_size=(512, 512)): self.crop_size = crop_size
MIT License
huntermcgushion/hyperparameter_hunter
hyperparameter_hunter/i_o/recorders.py
BaseRecorder.format_result
python
def format_result(self):
Set :attr:`BaseRecorder.result` to the final result object to be saved by :meth:`BaseRecorder.save_result`
https://github.com/huntermcgushion/hyperparameter_hunter/blob/28b1d48e01a993818510811b82a677e0a7a232b2/hyperparameter_hunter/i_o/recorders.py#L99-L101
from hyperparameter_hunter.data import OOFDataset, HoldoutDataset, TestDataset from hyperparameter_hunter.i_o.exceptions import EnvironmentInactiveError, EnvironmentInvalidError from hyperparameter_hunter.i_o.leaderboards import GlobalLeaderboard from hyperparameter_hunter.settings import G from hyperparameter_hunter.utils.file_utils import write_json, add_to_json, make_dirs, read_json from hyperparameter_hunter.utils.file_utils import RetryMakeDirs from hyperparameter_hunter.utils.general_utils import subdict from abc import ABCMeta, abstractmethod from collections import OrderedDict from platform import node import shutil from sys import exc_info class BaseRecorder(metaclass=ABCMeta): def __init__(self): self.result_path = None self.result = None try: self.result_path = G.Env.result_paths[self.result_path_key] except AttributeError as _ex: if G.Env is None: raise EnvironmentInactiveError(str(_ex)).with_traceback(exc_info()[2]) if not hasattr(G.Env, "result_paths"): _err_message = f"{_ex!s}\nG.Env missing 'result_paths' attr" raise EnvironmentInvalidError(_err_message).with_traceback(exc_info()[2]) except KeyError as _ex: _err_message = f"{_ex!s}\nG.Env.result_paths missing the key: '{self.result_path_key}'" raise EnvironmentInvalidError(_err_message).with_traceback(exc_info()[2]) if self.result_path is None: return for required_attribute in self.required_attributes: try: setattr(self, required_attribute, getattr(G.Env.current_task, required_attribute)) except AttributeError as _ex: if G.Env.current_task is None: _err_message = f"{_ex!s}\nNo active experiment found" raise EnvironmentInvalidError(_err_message).with_traceback(exc_info()[2]) raise EnvironmentInvalidError(str(_ex)).with_traceback(exc_info()[2]) @property @abstractmethod def result_path_key(self) -> str: @property @abstractmethod def required_attributes(self) -> list: @abstractmethod
MIT License
jameskbowler/fxcollect
fx_collect/broker/fxcm/tools.py
FXCMOffersTable.get_market_status
python
def get_market_status(self, offer): status = self._session.get_offer_trading_status(offer) return status
TODO Doc string.....
https://github.com/jameskbowler/fxcollect/blob/02d8a552aad55f5002eda864d674a9f0c21dff8f/fx_collect/broker/fxcm/tools.py#L45-L50
import numpy as np try: from fx_collect.utils.date_utils import fm_ole, to_ole except ImportError: from utils.date_utils import fm_ole, to_ole from datetime import datetime, timedelta from .base import AbstractFXCMBroker class FXCMOffersTable(AbstractFXCMBroker): def __init__(self): self._session = self._offers_table() def get_status(self, offers): offers_dict = {} for o in offers: offers_dict[o] = { 'status': self.get_market_status(o), 'timestamp': self.get_offer_timestamp(o) } return offers_dict def get_current_bid_ask(self, offer): bid, ask = self._session.get_bid_ask(offer) if bid > 0 and ask > 0: return bid, ask return None, None def get_offers(self): return self._session.get_offers()
MIT License
qecsim/qecsim
src/qecsim/app.py
merge
python
def merge(*data_list): grp_keys = ('code', 'n_k_d', 'error_model', 'decoder', 'error_probability', 'time_steps', 'measurement_error_probability') scalar_val_keys = ('n_run', 'n_fail', 'n_success', 'error_weight_total', 'wall_time') scalar_zero_vals = (0, 0, 0, 0, 0.0) array_val_keys = ('n_logical_commutations', 'custom_totals',) grps_to_scalar_sums = collections.OrderedDict() grps_to_array_sums = {} for runs_data in itertools.chain(*data_list): defaults_0_16 = {'time_steps': 1, 'measurement_error_probability': 0.0} defaults_1_0b6 = {'n_logical_commutations': None, 'custom_totals': None} runs_data = dict(itertools.chain(defaults_0_16.items(), defaults_1_0b6.items(), runs_data.items())) group_id = tuple(tuple(v) if isinstance(v, list) else v for v in (runs_data[k] for k in grp_keys)) scalar_vals = tuple(runs_data[k] for k in scalar_val_keys) scalar_sums = grps_to_scalar_sums.get(group_id, scalar_zero_vals) scalar_sums = tuple(sum(x) for x in zip(scalar_vals, scalar_sums)) grps_to_scalar_sums[group_id] = scalar_sums array_vals = tuple(None if runs_data[k] is None else tuple(runs_data[k]) for k in array_val_keys) try: array_sums = [] for array_sum, array_val in zip(grps_to_array_sums[group_id], array_vals): if array_sum is None and array_val is None: array_sums.append(None) elif (array_sum is None or array_val is None) or (len(array_sum) != len(array_val)): raise ValueError('Mismatch between array values to sum: {}, {}'.format(array_sum, array_val)) else: array_sums.append(tuple(sum(x) for x in zip(array_sum, array_val))) array_sums = tuple(array_sums) except KeyError: array_sums = array_vals grps_to_array_sums[group_id] = array_sums merged_data_list = [dict(zip(grp_keys + scalar_val_keys + array_val_keys, group_id + scalar_sums + grps_to_array_sums[group_id])) for group_id, scalar_sums in grps_to_scalar_sums.items()] for runs_data in merged_data_list: _add_rate_statistics(runs_data) return merged_data_list
Merge any number of lists of aggregated runs data. Notes: * The runs data is in the format specified in :func:`run` and :func:`fun_ftp`. * Merged data is grouped by: `(code, n_k_d, error_model, decoder, error_probability, time_steps, measurement_error_probability)`. * The following scalar values are summed: `n_run`, `n_success`, `n_fail`, `error_weight_total`, `wall_time`. * The following array values are summed: `n_logical_commutations`, `custom_totals`. * The following values are recalculated: `logical_failure_rate`, `physical_error_rate`. * The following values are *not* currently recalculated: `error_weight_pvar`. :param data_list: List of aggregated runs data. :type data_list: list of dict :return: Merged list of aggregated runs data. :rtype: list of dict :raises ValueError: if there is a mismatch between array values to be summed.
https://github.com/qecsim/qecsim/blob/24d6b8a320b292461b66b68fe4fba40c9ddc2257/src/qecsim/app.py#L535-L602
import collections import itertools import json import logging import statistics import time import numpy as np from qecsim import paulitools as pt from qecsim.error import QecsimError from qecsim.model import DecodeResult logger = logging.getLogger(__name__) def _run_once(mode, code, time_steps, error_model, decoder, error_probability, measurement_error_probability, rng): assert (mode == 'ideal' and time_steps == 1) or mode == 'ftp' step_errors, step_syndromes, step_measurement_errors = [], [], [] for _ in range(time_steps): step_error = error_model.generate(code, error_probability, rng) step_errors.append(step_error) step_syndrome = pt.bsp(step_error, code.stabilizers.T) step_syndromes.append(step_syndrome) if measurement_error_probability: step_measurement_error = rng.choice( (0, 1), size=step_syndrome.shape, p=(1 - measurement_error_probability, measurement_error_probability) ) else: step_measurement_error = np.zeros(step_syndrome.shape, dtype=int) step_measurement_errors.append(step_measurement_error) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: step_errors={}'.format(step_errors)) logger.debug('run: step_syndromes={}'.format(step_syndromes)) logger.debug('run: step_measurement_errors={}'.format(step_measurement_errors)) error = np.bitwise_xor.reduce(step_errors) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: error={}'.format(error)) syndrome = [] for t in range(time_steps): syndrome.append(step_measurement_errors[t - 1] ^ step_syndromes[t] ^ step_measurement_errors[t]) syndrome = np.array(syndrome) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: syndrome={}'.format(syndrome)) ctx = {'error_model': error_model, 'error_probability': error_probability, 'error': error, 'step_errors': step_errors, 'measurement_error_probability': measurement_error_probability, 'step_measurement_errors': step_measurement_errors} if mode == 'ideal': decoding = decoder.decode(code, syndrome[0], **ctx) if mode == 'ftp': decoding = decoder.decode_ftp(code, time_steps, syndrome, **ctx) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: decoding={}'.format(decoding)) if not isinstance(decoding, DecodeResult): decoding = DecodeResult(recovery=decoding) success = decoding.success logical_commutations = decoding.logical_commutations custom_values = decoding.custom_values if decoding.recovery is not None: recovered = decoding.recovery ^ error commutes_with_stabilizers = np.all(pt.bsp(recovered, code.stabilizers.T) == 0) if not commutes_with_stabilizers: log_data = { 'code': repr(code), 'error_model': repr(error_model), 'decoder': repr(decoder), 'error': pt.pack(error), 'recovery': pt.pack(decoding.recovery), 'step_errors': [pt.pack(v) for v in step_errors], 'step_measurement_errors': [pt.pack(v) for v in step_measurement_errors], } logger.warning('RECOVERY DOES NOT RETURN TO CODESPACE: {}'.format(json.dumps(log_data, sort_keys=True))) resolved_logical_commutations = pt.bsp(recovered, code.logicals.T) commutes_with_logicals = np.all(resolved_logical_commutations == 0) resolved_success = commutes_with_stabilizers and commutes_with_logicals success = resolved_success if success is None else success logical_commutations = resolved_logical_commutations if logical_commutations is None else logical_commutations if logger.isEnabledFor(logging.DEBUG): logger.debug('run: success={}'.format(success)) logger.debug('run: logical_commutations={!r}'.format(logical_commutations)) logger.debug('run: custom_values={!r}'.format(custom_values)) data = { 'error_weight': pt.bsf_wt(np.array(step_errors)), 'success': bool(success), 'logical_commutations': logical_commutations, 'custom_values': custom_values, } return data def run_once(code, error_model, decoder, error_probability, rng=None): if not (0 <= error_probability <= 1): raise ValueError('Error probability must be in [0, 1].') rng = np.random.default_rng() if rng is None else rng return _run_once('ideal', code, 1, error_model, decoder, error_probability, 0.0, rng) def run_once_ftp(code, time_steps, error_model, decoder, error_probability, measurement_error_probability=None, rng=None): if not time_steps >= 1: raise ValueError('Time steps must be integer >= 1.') if not (0 <= error_probability <= 1): raise ValueError('Error probability must be in [0, 1].') if not (measurement_error_probability is None or (0 <= measurement_error_probability <= 1)): raise ValueError('Measurement error probability must be None or in [0, 1].') if measurement_error_probability is None: measurement_error_probability = 0.0 if time_steps == 1 else error_probability rng = np.random.default_rng() if rng is None else rng return _run_once('ftp', code, time_steps, error_model, decoder, error_probability, measurement_error_probability, rng) def _run(mode, code, time_steps, error_model, decoder, error_probability, measurement_error_probability, max_runs=None, max_failures=None, random_seed=None): assert (mode == 'ideal' and time_steps == 1) or mode == 'ftp' if max_runs is None and max_failures is None: max_runs = 1 if logger.isEnabledFor(logging.DEBUG): logger.debug('run: code={}, time_steps={}, error_model={}, decoder={}, error_probability={},' 'measurement_error_probability={} max_runs={}, max_failures={}, random_seed={}.' .format(code, time_steps, error_model, decoder, error_probability, measurement_error_probability, max_runs, max_failures, random_seed)) wall_time_start = time.perf_counter() runs_data = { 'code': code.label, 'n_k_d': code.n_k_d, 'time_steps': time_steps, 'error_model': error_model.label, 'decoder': decoder.label, 'error_probability': error_probability, 'measurement_error_probability': measurement_error_probability, 'n_run': 0, 'n_success': 0, 'n_fail': 0, 'n_logical_commutations': None, 'custom_totals': None, 'error_weight_total': 0, 'error_weight_pvar': 0.0, 'logical_failure_rate': 0.0, 'physical_error_rate': 0.0, 'wall_time': 0.0, } seed_sequence = np.random.SeedSequence(random_seed) logger.info('run: np.random.SeedSequence.entropy={}'.format(seed_sequence.entropy)) rng = np.random.default_rng(seed_sequence) array_sum_keys = ('n_logical_commutations', 'custom_totals',) array_val_keys = ('logical_commutations', 'custom_values',) error_weights = [] while ((max_runs is None or runs_data['n_run'] < max_runs) and (max_failures is None or runs_data['n_fail'] < max_failures)): data = _run_once(mode, code, time_steps, error_model, decoder, error_probability, measurement_error_probability, rng) runs_data['n_run'] += 1 if data['success']: runs_data['n_success'] += 1 else: runs_data['n_fail'] += 1 for array_sum_key, array_val_key in zip(array_sum_keys, array_val_keys): array_sum = runs_data[array_sum_key] array_val = data[array_val_key] if runs_data['n_run'] == 1 and array_val is not None: array_sum = np.zeros_like(array_val) if array_sum is None and array_val is None: array_sum = None elif (array_sum is None or array_val is None) or (array_sum.shape != array_val.shape): raise QecsimError( 'Mismatch between {} values to sum: {}, {}'.format(array_val_key, array_sum, array_val)) else: array_sum = array_sum + array_val runs_data[array_sum_key] = array_sum error_weights.append(data['error_weight']) runs_data['error_weight_total'] = sum(error_weights) runs_data['error_weight_pvar'] = statistics.pvariance(error_weights) _add_rate_statistics(runs_data) for array_sum_key in array_sum_keys: if runs_data[array_sum_key] is not None: runs_data[array_sum_key] = tuple(runs_data[array_sum_key].tolist()) runs_data['wall_time'] = time.perf_counter() - wall_time_start if logger.isEnabledFor(logging.DEBUG): logger.debug('run: aggregated_data={}'.format(runs_data)) return runs_data def run(code, error_model, decoder, error_probability, max_runs=None, max_failures=None, random_seed=None): if not (0 <= error_probability <= 1): raise ValueError('Error probability must be in [0, 1].') return _run('ideal', code, 1, error_model, decoder, error_probability, 0.0, max_runs, max_failures, random_seed) def run_ftp(code, time_steps, error_model, decoder, error_probability, measurement_error_probability=None, max_runs=None, max_failures=None, random_seed=None): if not (0 <= error_probability <= 1): raise ValueError('Error probability must be in [0, 1].') if not time_steps >= 1: raise ValueError('Time steps must be integer >= 1.') if not (measurement_error_probability is None or (0 <= measurement_error_probability <= 1)): raise ValueError('Measurement error probability must be None or in [0, 1].') if measurement_error_probability is None: measurement_error_probability = 0.0 if time_steps == 1 else error_probability return _run('ftp', code, time_steps, error_model, decoder, error_probability, measurement_error_probability, max_runs, max_failures, random_seed) def _add_rate_statistics(runs_data): time_steps = runs_data['time_steps'] n_run = runs_data['n_run'] n_fail = runs_data['n_fail'] error_weight_total = runs_data['error_weight_total'] code_n_qubits = runs_data['n_k_d'][0] runs_data['logical_failure_rate'] = n_fail / n_run runs_data['physical_error_rate'] = error_weight_total / code_n_qubits / time_steps / n_run
BSD 3-Clause New or Revised License
mfitzp/padua
padua/analysis.py
enrichment_from_evidence
python
def enrichment_from_evidence(dfe, modification="Phospho (STY)"): dfe = dfe.reset_index().set_index('Experiment') dfe['Modifications'] = np.array([modification in m for m in dfe['Modifications']]) dfe = dfe.set_index('Modifications', append=True) dfes = dfe.sum(axis=0, level=[0,1]).T columns = dfes.sum(axis=1, level=0).columns total = dfes.sum(axis=1, level=0).values.flatten() modified = dfes.iloc[0, dfes.columns.get_level_values('Modifications').values ].values enrichment = modified / total return pd.DataFrame([enrichment], columns=columns, index=['% Enrichment'])
Calculate relative enrichment of peptide modifications from evidence.txt. Taking a modifiedsitepeptides ``DataFrame`` returns the relative enrichment of the specified modification in the table. The returned data columns are generated from the input data columns. :param df: Pandas ``DataFrame`` of evidence :return: Pandas ``DataFrame`` of percentage modifications in the supplied data.
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/analysis.py#L232-L258
import pandas as pd import numpy as np import requests import warnings import scipy as sp from scipy import stats try: import sklearn except ImportError: sklearn = False else: from sklearn.decomposition import PCA try: from StringIO import StringIO except ImportError: from io import StringIO from . import filters, process from .utils import get_protein_id def correlation(df, rowvar=False): df = df.copy() maskv = np.ma.masked_where(np.isnan(df.values), df.values) cdf = np.ma.corrcoef(maskv, rowvar=False) cdf = pd.DataFrame(np.array(cdf)) cdf.columns = df.columns cdf.index = df.columns cdf = cdf.sort_index(level=0, axis=1) cdf = cdf.sort_index(level=0) return cdf def pca(df, n_components=2, mean_center=False, **kwargs): if not sklearn: assert('This library depends on scikit-learn (sklearn) to perform PCA analysis') from sklearn.decomposition import PCA df = df.copy() df[ np.isnan(df) ] = 0 if mean_center: mean = np.mean(df.values, axis=0) df = df - mean pca = PCA(n_components=n_components, **kwargs) pca.fit(df.values.T) scores = pd.DataFrame(pca.transform(df.values.T)).T scores.index = ['Principal Component %d (%.2f%%)' % ( (n+1), pca.explained_variance_ratio_[n]*100 ) for n in range(0, scores.shape[0])] scores.columns = df.columns weights = pd.DataFrame(pca.components_).T weights.index = df.index weights.columns = ['Weights on Principal Component %d' % (n+1) for n in range(0, weights.shape[1])] return scores, weights def plsda(df, a, b, n_components=2, mean_center=False, scale=True, **kwargs): if not sklearn: assert('This library depends on scikit-learn (sklearn) to perform PLS-DA') from sklearn.cross_decomposition import PLSRegression df = df.copy() df[ np.isnan(df) ] = 0 if mean_center: mean = np.mean(df.values, axis=0) df = df - mean sxa, _ = df.columns.get_loc_level(a) sxb, _ = df.columns.get_loc_level(b) dfa = df.iloc[:, sxa] dfb = df.iloc[:, sxb] dff = pd.concat([dfa, dfb], axis=1) y = np.ones(dff.shape[1]) y[np.arange(dfa.shape[1])] = 0 plsr = PLSRegression(n_components=n_components, scale=scale, **kwargs) plsr.fit(dff.values.T, y) x_scores = plsr.transform(df.values.T) scores = pd.DataFrame(x_scores.T) scores.index = ['Latent Variable %d' % (n+1) for n in range(0, scores.shape[0])] scores.columns = df.columns weights = pd.DataFrame(plsr.x_weights_) weights.index = df.index weights.columns = ['Weights on Latent Variable %d' % (n+1) for n in range(0, weights.shape[1])] loadings = pd.DataFrame(plsr.x_loadings_) loadings.index = df.index loadings.columns = ['Loadings on Latent Variable %d' % (n+1) for n in range(0, loadings.shape[1])] return scores, weights, loadings def plsr(df, v, n_components=2, mean_center=False, scale=True, **kwargs): if not sklearn: assert('This library depends on scikit-learn (sklearn) to perform PLS-DA') from sklearn.cross_decomposition import PLSRegression df = df.copy() df[ np.isnan(df) ] = 0 if mean_center: mean = np.mean(df.values, axis=0) df = df - mean plsr = PLSRegression(n_components=n_components, scale=scale, **kwargs) plsr.fit(df.values.T, v) scores = pd.DataFrame(plsr.x_scores_.T) scores.index = ['Latent Variable %d' % (n+1) for n in range(0, scores.shape[0])] scores.columns = df.columns weights = pd.DataFrame(plsr.x_weights_) weights.index = df.index weights.columns = ['Weights on Latent Variable %d' % (n+1) for n in range(0, weights.shape[1])] loadings = pd.DataFrame(plsr.x_loadings_) loadings.index = df.index loadings.columns = ['Loadings on Latent Variable %d' % (n+1) for n in range(0, loadings.shape[1])] predicted = plsr.predict(df.values.T) return scores, weights, loadings, predicted def _non_zero_sum(df): dfo = df.sum(axis=0, level=0) for c in df.columns.values: dft = df[c] dfo[c] = dft[ dft > 0].sum(axis=0, level=0) return dfo
BSD 2-Clause Simplified License
matttunny/aws-transit-gateway-demo-multiaccount
share-resources/docutils/parsers/rst/__init__.py
Directive.directive_error
python
def directive_error(self, level, message): return DirectiveError(level, message)
Return a DirectiveError suitable for being thrown as an exception. Call "raise self.directive_error(level, message)" from within a directive implementation to return one single system message at level `level`, which automatically gets the directive block and the line number added. Preferably use the `debug`, `info`, `warning`, `error`, or `severe` wrapper methods, e.g. ``self.error(message)`` to generate an ERROR-level directive error.
https://github.com/matttunny/aws-transit-gateway-demo-multiaccount/blob/5766f8b96ee5c6af22eea02c79c580493ad5e880/share-resources/docutils/parsers/rst/__init__.py#L337-L350
__docformat__ = 'reStructuredText' import docutils.parsers import docutils.statemachine from docutils.parsers.rst import states from docutils import frontend, nodes, Component from docutils.transforms import universal class Parser(docutils.parsers.Parser): supported = ('restructuredtext', 'rst', 'rest', 'restx', 'rtxt', 'rstx') settings_spec = ( 'reStructuredText Parser Options', None, (('Recognize and link to standalone PEP references (like "PEP 258").', ['--pep-references'], {'action': 'store_true', 'validator': frontend.validate_boolean}), ('Base URL for PEP references ' '(default "http://www.python.org/dev/peps/").', ['--pep-base-url'], {'metavar': '<URL>', 'default': 'http://www.python.org/dev/peps/', 'validator': frontend.validate_url_trailing_slash}), ('Template for PEP file part of URL. (default "pep-%04d")', ['--pep-file-url-template'], {'metavar': '<URL>', 'default': 'pep-%04d'}), ('Recognize and link to standalone RFC references (like "RFC 822").', ['--rfc-references'], {'action': 'store_true', 'validator': frontend.validate_boolean}), ('Base URL for RFC references (default "http://tools.ietf.org/html/").', ['--rfc-base-url'], {'metavar': '<URL>', 'default': 'http://tools.ietf.org/html/', 'validator': frontend.validate_url_trailing_slash}), ('Set number of spaces for tab expansion (default 8).', ['--tab-width'], {'metavar': '<width>', 'type': 'int', 'default': 8, 'validator': frontend.validate_nonnegative_int}), ('Remove spaces before footnote references.', ['--trim-footnote-reference-space'], {'action': 'store_true', 'validator': frontend.validate_boolean}), ('Leave spaces before footnote references.', ['--leave-footnote-reference-space'], {'action': 'store_false', 'dest': 'trim_footnote_reference_space'}), ('Disable directives that insert the contents of external file ' '("include" & "raw"); replaced with a "warning" system message.', ['--no-file-insertion'], {'action': 'store_false', 'default': 1, 'dest': 'file_insertion_enabled', 'validator': frontend.validate_boolean}), ('Enable directives that insert the contents of external file ' '("include" & "raw"). Enabled by default.', ['--file-insertion-enabled'], {'action': 'store_true'}), ('Disable the "raw" directives; replaced with a "warning" ' 'system message.', ['--no-raw'], {'action': 'store_false', 'default': 1, 'dest': 'raw_enabled', 'validator': frontend.validate_boolean}), ('Enable the "raw" directive. Enabled by default.', ['--raw-enabled'], {'action': 'store_true'}), ('Token name set for parsing code with Pygments: one of ' '"long", "short", or "none (no parsing)". Default is "long".', ['--syntax-highlight'], {'choices': ['long', 'short', 'none'], 'default': 'long', 'metavar': '<format>'}), ('Change straight quotation marks to typographic form: ' 'one of "yes", "no", "alt[ernative]" (default "no").', ['--smart-quotes'], {'default': False, 'metavar': '<yes/no/alt>', 'validator': frontend.validate_ternary}), ('Characters to use as "smart quotes" for <language>. ', ['--smartquotes-locales'], {'metavar': '<language:quotes[,language:quotes,...]>', 'action': 'append', 'validator': frontend.validate_smartquotes_locales}), ('Inline markup recognized at word boundaries only ' '(adjacent to punctuation or whitespace). ' 'Force character-level inline markup recognition with ' '"\\ " (backslash + space). Default.', ['--word-level-inline-markup'], {'action': 'store_false', 'dest': 'character_level_inline_markup'}), ('Inline markup recognized anywhere, regardless of surrounding ' 'characters. Backslash-escapes must be used to avoid unwanted ' 'markup recognition. Useful for East Asian languages. ' 'Experimental.', ['--character-level-inline-markup'], {'action': 'store_true', 'default': False, 'dest': 'character_level_inline_markup'}), )) config_section = 'restructuredtext parser' config_section_dependencies = ('parsers',) def __init__(self, rfc2822=False, inliner=None): if rfc2822: self.initial_state = 'RFC2822Body' else: self.initial_state = 'Body' self.state_classes = states.state_classes self.inliner = inliner def get_transforms(self): return Component.get_transforms(self) + [ universal.SmartQuotes] def parse(self, inputstring, document): self.setup_parse(inputstring, document) self.statemachine = states.RSTStateMachine( state_classes=self.state_classes, initial_state=self.initial_state, debug=document.reporter.debug_flag) inputlines = docutils.statemachine.string2lines( inputstring, tab_width=document.settings.tab_width, convert_whitespace=True) self.statemachine.run(inputlines, document, inliner=self.inliner) self.finish_parse() class DirectiveError(Exception): def __init__(self, level, message): Exception.__init__(self) self.level = level self.msg = message class Directive(object): required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False option_spec = None has_content = False def __init__(self, name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): self.name = name self.arguments = arguments self.options = options self.content = content self.lineno = lineno self.content_offset = content_offset self.block_text = block_text self.state = state self.state_machine = state_machine def run(self): raise NotImplementedError('Must override run() is subclass.')
MIT License
intel/dffml
dffml/service/dev.py
Install.dep_check
python
def dep_check( plugin_deps: Dict[Tuple[str, str], Dict[str, Callable[[], bool]]], skip: Optional[List[Tuple[str, str]]] = None, ): if skip is None: skip = [] missing_deps = {} for package, deps in plugin_deps.items(): plugin_path = "/".join(package) if plugin_path in skip: continue missing_plugin_deps = { name: check_if_dep_found() for name, check_if_dep_found in deps.items() } if not all(missing_plugin_deps.values()): missing_deps[plugin_path] = [ name for name, found in missing_plugin_deps.items() if not found ] if missing_deps: msg = "The following plugins have unmet dependencies and could not be installed\n\n" for plugin_path, deps in missing_deps.items(): msg += f" {plugin_path}\n\n" for name in deps: msg += f" {name}\n" msg += "\n" msg += "Install missing dependencies and re-run plugin install, or skip with\n\n" msg += " -skip " msg += " ".join(missing_deps.keys()) raise MissingDependenciesError(msg)
Check if all dependencies are installed prior to running setup.py installs of plugins
https://github.com/intel/dffml/blob/e7a356dfe8fd6fdf3cac7f8c218abc7d650fd93c/dffml/service/dev.py#L370-L406
import os import re import sys import ast import json import pydoc import shutil import asyncio import pathlib import getpass import tempfile import platform import functools import importlib import itertools import contextlib import http.server import configparser import socketserver import pkg_resources import unittest.mock import urllib.request import importlib.util from pathlib import Path from typing import Callable, Dict, List, Optional, Tuple from ..util.os import chdir, MODE_BITS_SECURE from ..version import VERSION from ..util.skel import Skel, SkelTemplateConfig from ..util.cli.cmd import CMD from ..util.entrypoint import load from ..base import MissingConfig, config as configdataclass, field from ..util.packaging import is_develop from ..util.net import cached_download from ..util.data import traverse_config_get, export from ..util.subprocess import run_command from ..df.types import Input, DataFlow from ..df.memory import MemoryOrchestrator from ..configloader.configloader import BaseConfigLoader from ..configloader.json import JSONConfigLoader from ..operation.output import GetSingle from ..plugins import ( CORE_PLUGINS, CORE_PLUGIN_DEPS, PACKAGE_NAMES_TO_DIRECTORY, PACKAGE_DIRECTORY_TO_NAME, ) config = configparser.ConfigParser() config.read(Path("~", ".gitconfig").expanduser()) USER = "unknown" with contextlib.suppress(KeyError): USER = getpass.getuser() NAME = config.get("user", "name", fallback="Unknown") EMAIL = config.get("user", "email", fallback="unknown@example.com") REPO_ROOT = pathlib.Path(__file__).parents[2] async def get_cmd_output(cmd: List[str]): print(f"$ {' '.join(cmd)}") proc = await asyncio.create_subprocess_shell( " ".join(cmd), stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=REPO_ROOT, ) await proc.wait() stdout, stderr = await proc.communicate() if proc.returncode != 0: raise RuntimeError(stderr) output = stdout.decode().strip() return output def create_from_skel(plugin_type): @configdataclass class CreateCMDConfig: package: str = field("Name of python package to create") user: str = field(f"Your username (default: {USER})", default=USER) name: str = field( f"Your name (default: {NAME})", default=NAME, ) email: str = field( f"Your email (default: {EMAIL})", default=EMAIL, ) description: str = field( f"Description of python package (default: DFFML {plugin_type} {{package name}})", default=None, ) target: str = field( f"Directory to put code in (default: same as package name)", default=None, ) class CreateCMD(CMD): skel = Skel() CONFIG = CreateCMDConfig async def run(self): if not self.description: self.description = f"DFFML {plugin_type} {self.package}" if not self.target: self.target = self.package self.skel.from_template( plugin_type, self.target, SkelTemplateConfig( org=self.user, package=self.package, description=self.description, name=self.name, email=self.email, dffml_version=VERSION, ), ) for cmd in [ ["git", "init"], ["git", "add", "-A"], ["git", "commit", "-snm", "housekeeping: Initial Commit"], ]: await run_command( cmd, logger=self.logger, cwd=str(self.target) ) return CreateCMD class Create(CMD): model = create_from_skel("model") operations = create_from_skel("operations") service = create_from_skel("service") source = create_from_skel("source") config = create_from_skel("config") blank = create_from_skel("blank") class Link(CMD): skel = Skel() async def run(self): for plugin in self.skel.plugins(): self.skel.create_symlinks(plugin) class Skeleton(CMD): link = Link @configdataclass class RunConfig: operation: str = field("Python path to operation") class Run(CMD): CONFIG = RunConfig async def run(self): sys.path.insert(0, os.getcwd()) modname, qualname_separator, qualname = self.operation.partition(":") obj = importlib.import_module(modname) if qualname_separator: for attr in qualname.split("."): obj = getattr(obj, attr) self.logger.debug("Loaded operation: %s(%s)", attr, obj) return await self.run_op(attr, obj) def config_get(self, op, key, definition): try: value = traverse_config_get(self.extra_config, key) except KeyError as error: raise MissingConfig("%s missing %s" % (op.name, key)) if "Dict" in definition.primitive: self.logger.critical( "Dict / spec'd arguments are not yet completely handled" ) value = json.loads(value[0]) else: typecast = pydoc.locate( definition.primitive.replace("List[", "").replace("]", "") ) if definition.primitive.startswith("List["): value = list(map(typecast, value)) else: value = typecast(value[0]) if typecast is str and value in ["True", "False"]: raise MissingConfig("%s missing %s" % (op.name, key)) return value async def run_op(self, name, opimp): inputs = [] for name, definition in opimp.op.inputs.items(): try: inputs.append( Input( value=self.config_get(opimp.op, name, definition), definition=definition, ) ) except MissingConfig as error: error.args = (f"{opimp.op.inputs}: {error.args[0]}",) raise error config = {} extra_config = self.extra_config for i in range(0, 2): if "config" in extra_config and len(extra_config["config"]): extra_config = extra_config["config"] if extra_config: config = extra_config dataflow = DataFlow.auto(GetSingle, opimp) if config: dataflow.configs[opimp.op.name] = config async with MemoryOrchestrator.withconfig({}) as orchestrator: async with orchestrator(dataflow) as octx: async for ctx, results in octx.run( [ Input( value=[ definition.name for definition in opimp.op.outputs.values() ], definition=GetSingle.op.inputs["spec"], ), *inputs, ] ): return results @configdataclass class ListEntrypointsConfig: entrypoint: str = field("Entrypoint to list, example: dffml.model") class ListEntrypoints(CMD): CONFIG = ListEntrypointsConfig async def run(self): for entrypoint in pkg_resources.iter_entry_points(self.entrypoint): print(f"{entrypoint} -> {entrypoint.dist!r}") class Entrypoints(CMD): _list = ListEntrypoints @configdataclass class ExportConfig: export: str = field("Python path to object to export",) configloader: BaseConfigLoader = field( "ConfigLoader to use", default=JSONConfigLoader, ) not_linked: bool = field( "Do not export dataflows as linked", default=False, action="store_true", ) class Export(CMD): CONFIG = ExportConfig async def run(self): async with self.configloader() as configloader: async with configloader() as loader: for obj in load(self.export, relative=os.getcwd()): self.logger.debug("Loaded %s: %s", self.export, obj) if isinstance(obj, DataFlow): sys.stdout.buffer.write( await loader.dumpb( obj.export(linked=not self.not_linked) ) ) elif hasattr(obj, "export"): sys.stdout.buffer.write( await loader.dumpb(obj.export()) ) elif hasattr(obj, "_asdict"): sys.stdout.buffer.write( await loader.dumpb(obj._asdict()) ) else: sys.stdout.buffer.write( await loader.dumpb(export(obj)) ) class MissingDependenciesError(Exception): @configdataclass class InstallConfig: skip: List[str] = field( "List of plugin paths not to install (Example: model/scikit)", default_factory=lambda: [], ) nocheck: bool = field( "Do not perform pre-install dependency checks", default=False ) user: bool = field( "Perform user install", default=False, action="store_true" ) class Install(CMD): CONFIG = InstallConfig @staticmethod
MIT License
ubunatic/ohlc
ohlc/cli.py
ArgumentParser.add_parse_callback
python
def add_parse_callback(p, fn): if p.callbacks is None: p.callbacks = [] p.callbacks.append(fn)
add_parse_callback adds the given callbacks which are excuted once after parsing the parser's arguments
https://github.com/ubunatic/ohlc/blob/b6cece5e7745bedf7e30da1df5584a9f338556cb/ohlc/cli.py#L57-L61
import argparse, logging, sys log = logging.getLogger(__name__) def setup_logging(args): if getattr(args,'debug', False): level = logging.DEBUG else: level = logging.INFO log.debug('setup logging') logging.basicConfig(level=level) class ArgumentParser(argparse.ArgumentParser): callbacks = None opti = argparse.ArgumentParser.add_argument def flag(p, flag, *args, **kwargs): if 'action' not in kwargs: kwargs['action'] = 'store_true' return p.add_argument(flag, *args, **kwargs) def with_debug(p): p.add_argument('--debug', help='enable debug log', action='store_true') return p def with_version(p): p.add_argument('--version', help='show version info', action='store_true') return p def with_input(p, default='-', nargs='?', help='input file descriptor', **argparse_args): p.add_argument('input', help=help, default=default, nargs=nargs, **argparse_args) return p def with_logging(p): p.add_parse_callback(p.setup_logging) return p def setup_logging(p): args, _ = p.parse_known_args() setup_logging(args) def show_version(p): args, _ = p.parse_known_args() if args.version: py = sys.version.split('\n')[0] ver = '0.0.0' sys.stdout.write('{} {}\n'.format(ver, py)) sys.exit(0)
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/sensor/zha.py
Sensor.should_poll
python
def should_poll(self) -> bool: return False
State gets pushed from device.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/sensor/zha.py#L71-L73
import asyncio import logging from homeassistant.components.sensor import DOMAIN from homeassistant.components import zha from homeassistant.const import TEMP_CELSIUS from homeassistant.util.temperature import convert as convert_temperature _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['zha'] @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): discovery_info = zha.get_discovery_info(hass, discovery_info) if discovery_info is None: return sensor = yield from make_sensor(discovery_info) async_add_devices([sensor], update_before_add=True) @asyncio.coroutine def make_sensor(discovery_info): from zigpy.zcl.clusters.measurement import ( RelativeHumidity, TemperatureMeasurement, PressureMeasurement, IlluminanceMeasurement ) from zigpy.zcl.clusters.smartenergy import Metering in_clusters = discovery_info['in_clusters'] if RelativeHumidity.cluster_id in in_clusters: sensor = RelativeHumiditySensor(**discovery_info) elif TemperatureMeasurement.cluster_id in in_clusters: sensor = TemperatureSensor(**discovery_info) elif PressureMeasurement.cluster_id in in_clusters: sensor = PressureSensor(**discovery_info) elif IlluminanceMeasurement.cluster_id in in_clusters: sensor = IlluminanceMeasurementSensor(**discovery_info) elif Metering.cluster_id in in_clusters: sensor = MeteringSensor(**discovery_info) else: sensor = Sensor(**discovery_info) if discovery_info['new_join']: cluster = list(in_clusters.values())[0] yield from cluster.bind() yield from cluster.configure_reporting( sensor.value_attribute, 300, 600, sensor.min_reportable_change, ) return sensor class Sensor(zha.Entity): _domain = DOMAIN value_attribute = 0 min_reportable_change = 1 @property
MIT License
loglabs/mltrace
mltrace/client.py
backtrace
python
def backtrace(output_pointer: str): store = Store(_db_uri) trace = store.trace(output_pointer) component_runs = [] for depth, cr in trace: inputs = [IOPointer.from_dictionary(iop.__dict__) for iop in cr.inputs] outputs = [ IOPointer.from_dictionary(iop.__dict__) for iop in cr.outputs ] dependencies = [dep.component_name for dep in cr.dependencies] d = copy.deepcopy(cr.__dict__) d.update( { "inputs": inputs, "outputs": outputs, "dependencies": dependencies, } ) component_runs.append((depth, ComponentRun.from_dictionary(d))) return component_runs
Prints trace for an output id. Returns list of tuples (level, ComponentRun) where level is how many hops away the node is from the node that produced the output_id.
https://github.com/loglabs/mltrace/blob/87d1249eeb3579a57f1dfed56d96a879db318a51/mltrace/client.py#L547-L572
from datetime import datetime from mltrace.db import Store, PointerTypeEnum from mltrace.entities import Component, ComponentRun, IOPointer import copy import functools import git import inspect import logging import os import sys import typing import uuid def _set_address_helper(old_uri: str, address: str): first = old_uri.split("@")[0] last = old_uri.split("@")[1].split(":")[1] return first + "@" + address + ":" + last _db_uri = os.environ.get("DB_URI") if _db_uri is None: _db_uri = "postgresql://admin:admin@localhost:5432/sqlalchemy" if os.environ.get("DB_SERVER"): _db_uri = _set_address_helper(_db_uri, os.environ.get("DB_SERVER")) else: logging.warning( f"Please set DB_URI or DB_SERVER as an environment variable. \ Otherwise, DB_URI is set to {_db_uri}." ) def set_db_uri(uri: str): global _db_uri _db_uri = uri def get_db_uri() -> str: global _db_uri return _db_uri def set_address(address: str): global _db_uri _db_uri = _set_address_helper(_db_uri, address) def clean_db(): store = Store(_db_uri, delete_first=True) def create_component( name: str, description: str, owner: str, tags: typing.List[str] = [] ): store = Store(_db_uri) store.create_component(name, description, owner, tags) def tag_component(component_name: str, tags: typing.List[str]): store = Store(_db_uri) store.add_tags_to_component(component_name, tags) def log_component_run( component_run: ComponentRun, set_dependencies_from_inputs=True, staleness_threshold: int = (60 * 60 * 24 * 30), ): store = Store(_db_uri) component_run_dict = component_run.to_dictionary() component_run_sql = store.initialize_empty_component_run( component_run.component_name ) if component_run_dict["start_timestamp"]: component_run_sql.set_start_timestamp( component_run_dict["start_timestamp"] ) if component_run_dict["end_timestamp"]: component_run_sql.set_end_timestamp( component_run_dict["end_timestamp"] ) if component_run_dict["notes"]: component_run_sql.add_notes(component_run_dict["notes"]) component_run_sql.set_git_hash(component_run_dict["git_hash"]) component_run_sql.set_git_tags(component_run_dict["git_tags"]) component_run_sql.set_code_snapshot(component_run_dict["code_snapshot"]) component_run_sql.add_inputs( [ store.get_io_pointer( inp.name, inp.value, pointer_type=inp.pointer_type ) for inp in component_run_dict["inputs"] ] ) component_run_sql.add_outputs( [ store.get_io_pointer( out.name, out.value, pointer_type=out.pointer_type ) for out in component_run_dict["outputs"] ] ) create_component(component_run.component_name, "", "") if set_dependencies_from_inputs: store.set_dependencies_from_inputs(component_run_sql) for dependency in component_run_dict["dependencies"]: cr = store.get_history(dependency, 1)[0] component_run_sql.set_upstream(cr) store.commit_component_run( component_run_sql, staleness_threshold=staleness_threshold ) def create_random_ids(num_outputs=1) -> typing.List[str]: return [str(uuid.uuid4()) for _ in range(num_outputs)] def register( component_name: str, inputs: typing.List[str] = [], outputs: typing.List[str] = [], input_vars: typing.List[str] = [], output_vars: typing.List[str] = [], input_kwargs: typing.Dict[str, str] = {}, output_kwargs: typing.Dict[str, str] = {}, endpoint: bool = False, staleness_threshold: int = (60 * 60 * 24 * 30), ): def actual_decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): filename = inspect.getfile(func) function_name = func.__name__ store = Store(_db_uri) component_run = store.initialize_empty_component_run( component_name ) component_run.set_start_timestamp() frame = None trace = sys.gettrace() def trace_helper(_frame, event, arg): nonlocal frame if frame is None and event == "call": frame = _frame sys.settrace(trace) return trace sys.settrace(trace_helper) try: value = func(*args, **kwargs) finally: sys.settrace(trace) logging.info(f"Inspecting {frame.f_code.co_filename}") input_pointers = [] output_pointers = [] local_vars = frame.f_locals for var in input_vars: if var not in local_vars: raise ValueError( f"Variable {var} not in current stack frame." ) val = local_vars[var] if val is None: logging.debug(f"Variable {var} has value {val}.") continue if isinstance(val, list): input_pointers += store.get_io_pointers(val) else: input_pointers.append(store.get_io_pointer(str(val))) for var in output_vars: if var not in local_vars: raise ValueError( f"Variable {var} not in current stack frame." ) val = local_vars[var] if val is None: logging.debug(f"Variable {var} has value {val}.") continue if isinstance(val, list): output_pointers += ( store.get_io_pointers( val, pointer_type=PointerTypeEnum.ENDPOINT ) if endpoint else store.get_io_pointers(val) ) else: output_pointers += ( [ store.get_io_pointer( str(val), pointer_type=PointerTypeEnum.ENDPOINT ) ] if endpoint else [store.get_io_pointer(str(val))] ) for key, val in input_kwargs.items(): if key not in local_vars or val not in local_vars: raise ValueError( f"Variables ({key}, {val}) not in current stack frame." ) if local_vars[key] is None: logging.debug( f"Variable {key} has value {local_vars[key]}." ) continue if isinstance(local_vars[key], list): if not isinstance(local_vars[val], list) or len( local_vars[key] ) != len(local_vars[val]): raise ValueError( f'Value "{val}" does not have the same length as' + f' the key "{key}."' ) input_pointers += store.get_io_pointers( local_vars[key], values=local_vars[val] ) else: input_pointers.append( store.get_io_pointer( str(local_vars[key]), local_vars[val] ) ) for key, val in output_kwargs.items(): if key not in local_vars or val not in local_vars: raise ValueError( f"Variables ({key}, {val}) not in current stack frame." ) if local_vars[key] is None: logging.debug( f"Variable {key} has value {local_vars[key]}." ) continue if isinstance(local_vars[key], list): if not isinstance(local_vars[val], list) or len( local_vars[key] ) != len(local_vars[val]): raise ValueError( f'Value "{val}" does not have the same length as' + f' the key "{key}."' ) output_pointers += ( store.get_io_pointers( local_vars[key], local_vars[val], pointer_type=PointerTypeEnum.ENDPOINT, ) if endpoint else store.get_io_pointers( local_vars[key], local_vars[val] ) ) else: output_pointers += ( [ store.get_io_pointer( str(local_vars[key]), local_vars[val], pointer_type=PointerTypeEnum.ENDPOINT, ) ] if endpoint else [ store.get_io_pointer( str(local_vars[key]), local_vars[val] ) ] ) component_run.add_inputs(input_pointers) component_run.add_outputs(output_pointers) component_run.set_end_timestamp() try: repo = git.Repo(search_parent_directories=True) component_run.set_git_hash(str(repo.head.object.hexsha)) except Exception as e: logging.info("No git repo found.") if get_git_tags() is not None: component_run.set_git_tags(get_git_tags()) func_source_code = inspect.getsource(func) if len(func_source_code) < 2 ** 16: component_run.set_code_snapshot( bytes(func_source_code, "ascii") ) input_pointers = [store.get_io_pointer(inp) for inp in inputs] output_pointers = ( [ store.get_io_pointer( out, pointer_type=PointerTypeEnum.ENDPOINT ) for out in outputs ] if endpoint else [store.get_io_pointer(out) for out in outputs] ) component_run.add_inputs(input_pointers) component_run.add_outputs(output_pointers) create_component(component_run.component_name, "", "") store.set_dependencies_from_inputs(component_run) store.commit_component_run( component_run, staleness_threshold=staleness_threshold ) return value return wrapper return actual_decorator def get_git_hash() -> str: try: repo = git.Repo(search_parent_directories=True) return str(repo.head.object.hexsha) except Exception as e: logging.info("No git repo found.") return None def get_git_tags() -> str: try: tagmap = {} repo = git.Repo(search_parent_directories=True) for t in repo.tags: tagmap.setdefault(repo.commit(t), []).append(t) tags = tagmap[repo.commit(repo.head.object.hexsha)] return [tag.name for tag in tags] except Exception as e: logging.info("No git tag found") def add_notes_to_component_run(component_run_id: str, notes: str) -> str: store = Store(_db_uri) return store.add_notes_to_component_run(component_run_id, notes) def flag_output_id(output_id: str) -> bool: store = Store(_db_uri) return store.set_io_pointer_flag(output_id, True) def unflag_output_id(output_id: str) -> bool: store = Store(_db_uri) return store.set_io_pointer_flag(output_id, False) def unflag_all(): store = Store(_db_uri) store.unflag_all() def get_history( component_name: str, limit: int = 10, date_lower: typing.Union[datetime, str] = datetime.min, date_upper: typing.Union[datetime, str] = datetime.max, ) -> typing.List[ComponentRun]: store = Store(_db_uri) if not date_lower: date_lower = datetime.min if not date_upper: date_upper = datetime.max history = store.get_history(component_name, limit, date_lower, date_upper) component_runs = [] for cr in history: inputs = [ IOPointer.from_dictionary(iop.__dict__).to_dictionary() for iop in cr.inputs ] outputs = [ IOPointer.from_dictionary(iop.__dict__).to_dictionary() for iop in cr.outputs ] dependencies = [dep.component_name for dep in cr.dependencies] d = copy.deepcopy(cr.__dict__) d.update( { "inputs": inputs, "outputs": outputs, "dependencies": dependencies, } ) component_runs.append(ComponentRun.from_dictionary(d)) return component_runs def get_component_information(component_name: str) -> Component: store = Store(_db_uri) c = store.get_component(component_name) if not c: raise RuntimeError(f"Component with name {component_name} not found.") tags = [tag.name for tag in c.tags] d = copy.deepcopy(c.__dict__) d.update({"tags": tags}) return Component.from_dictionary(d) def get_component_run_information(component_run_id: str) -> ComponentRun: store = Store(_db_uri) cr = store.get_component_run(component_run_id) if not cr: raise RuntimeError(f"Component run with id {id} not found.") inputs = [ IOPointer.from_dictionary(iop.__dict__).to_dictionary() for iop in cr.inputs ] outputs = [ IOPointer.from_dictionary(iop.__dict__).to_dictionary() for iop in cr.outputs ] dependencies = [dep.component_name for dep in cr.dependencies] d = copy.deepcopy(cr.__dict__) if cr.code_snapshot: d.update({"code_snapshot": str(cr.code_snapshot.decode("utf-8"))}) d.update( {"inputs": inputs, "outputs": outputs, "dependencies": dependencies} ) return ComponentRun.from_dictionary(d) def get_components(tag="", owner="") -> typing.List[Component]: store = Store(_db_uri) res = store.get_components(tag=tag, owner=owner) components = [] for c in res: tags = [tag.name for tag in c.tags] d = copy.deepcopy(c.__dict__) d.update({"tags": tags}) components.append(Component.from_dictionary(d)) return components def get_recent_run_ids(limit: int = 5, last_run_id=None): store = Store(_db_uri) return store.get_recent_run_ids(limit, last_run_id) def get_io_pointer( io_pointer_id: str, io_pointer_val: typing.Any = None, create=True ): store = Store(_db_uri) iop = store.get_io_pointer(io_pointer_id, io_pointer_val, create=create) return IOPointer.from_dictionary(iop.__dict__) def get_all_tags() -> typing.List[str]: store = Store(_db_uri) res = store.get_all_tags() tags = [t.name for t in res] return tags
Apache License 2.0
rlenglet/openfaucet
src/openfaucet/ofcontroller.py
IOpenflowController.handle_port_status
python
def handle_port_status(reason, desc):
Handle the reception of a OFPT_PORT_STATUS message. Args: reason: The reason for the port status change, either OFPPR_ADD (the port was added), OFPPR_DELETE (the port was removed), or OFPPR_MODIFY (some attribute of the port has changed). desc: A PhyPort object defining the physical port.
https://github.com/rlenglet/openfaucet/blob/4ef1783fc74320e66ee7a71576dc91511f238a81/src/openfaucet/ofcontroller.py#L97-L106
import collections import logging import threading import weakref import twisted.internet.reactor from zope import interface from openfaucet import oferror from openfaucet import ofproto from openfaucet import ofprotoops class IOpenflowController(interface.Interface): def connection_made(): def connection_lost(reason): def handle_packet_in(buffer_id, total_len, in_port, reason, data): def handle_flow_removed( match, cookie, priority, reason, duration_sec, duration_nsec, idle_timeout, packet_count, byte_count):
Apache License 2.0
zachchristensen28/ta-opnsense
bin/ta_opnsense/aob_py3/future/backports/email/iterators.py
walk
python
def walk(self): yield self if self.is_multipart(): for subpart in self.get_payload(): for subsubpart in subpart.walk(): yield subsubpart
Walk over the message tree, yielding each subpart. The walk is performed in depth-first order. This method is a generator.
https://github.com/zachchristensen28/ta-opnsense/blob/fc736f4c6f0fa7866b4f6d2dcf9761b6b693d6cf/bin/ta_opnsense/aob_py3/future/backports/email/iterators.py#L23-L33
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import __all__ = [ 'body_line_iterator', 'typed_subpart_iterator', 'walk', ] import sys from io import StringIO
MIT License
react-native-skia/react-native-skia
build/android/gyp/util/md5_check.py
_Metadata.AddZipFile
python
def AddZipFile(self, path, entries): self._AssertNotQueried() tag = _ComputeInlineMd5(itertools.chain((e[0] for e in entries), (e[1] for e in entries))) self._files.append({ 'path': path, 'tag': tag, 'entries': [{"path": e[0], "tag": e[1]} for e in entries], })
Adds metadata for a zip file. Args: path: Path to the file. entries: List of (subpath, tag) tuples for entries within the zip.
https://github.com/react-native-skia/react-native-skia/blob/91ecc74444b163f128541dbc1a42e27a9c0fb40b/build/android/gyp/util/md5_check.py#L376-L390
from __future__ import print_function import difflib import hashlib import itertools import json import os import sys import zipfile from util import build_utils sys.path.insert(1, os.path.join(build_utils.DIR_SOURCE_ROOT, 'build')) import print_python_deps PRINT_EXPLANATIONS = int(os.environ.get('PRINT_BUILD_EXPLANATIONS', 0)) _FORCE_REBUILD = int(os.environ.get('FORCE_REBUILD', 0)) def CallAndWriteDepfileIfStale(on_stale_md5, options, record_path=None, input_paths=None, input_strings=None, output_paths=None, force=False, pass_changes=False, track_subpaths_allowlist=None, depfile_deps=None): if not output_paths: raise Exception('At least one output_path must be specified.') input_paths = list(input_paths or []) input_strings = list(input_strings or []) output_paths = list(output_paths or []) input_paths += print_python_deps.ComputePythonDependencies() CallAndRecordIfStale( on_stale_md5, record_path=record_path, input_paths=input_paths, input_strings=input_strings, output_paths=output_paths, force=force, pass_changes=pass_changes, track_subpaths_allowlist=track_subpaths_allowlist) if hasattr(options, 'depfile') and options.depfile: build_utils.WriteDepfile(options.depfile, output_paths[0], depfile_deps) def CallAndRecordIfStale(function, record_path=None, input_paths=None, input_strings=None, output_paths=None, force=False, pass_changes=False, track_subpaths_allowlist=None): assert record_path or output_paths input_paths = input_paths or [] input_strings = input_strings or [] output_paths = output_paths or [] record_path = record_path or output_paths[0] + '.md5.stamp' assert record_path.endswith('.stamp'), ( 'record paths must end in \'.stamp\' so that they are easy to find ' 'and delete') new_metadata = _Metadata(track_entries=pass_changes or PRINT_EXPLANATIONS) new_metadata.AddStrings(input_strings) zip_allowlist = set(track_subpaths_allowlist or []) for path in input_paths: if path in zip_allowlist: entries = _ExtractZipEntries(path) new_metadata.AddZipFile(path, entries) else: new_metadata.AddFile(path, _ComputeTagForPath(path)) old_metadata = None force = force or _FORCE_REBUILD missing_outputs = [x for x in output_paths if force or not os.path.exists(x)] too_new = [] if not missing_outputs and os.path.exists(record_path): record_mtime = os.path.getmtime(record_path) too_new = [x for x in output_paths if os.path.getmtime(x) > record_mtime] if not too_new: with open(record_path, 'r') as jsonfile: try: old_metadata = _Metadata.FromFile(jsonfile) except: pass changes = Changes(old_metadata, new_metadata, force, missing_outputs, too_new) if not changes.HasChanges(): return if PRINT_EXPLANATIONS: print('=' * 80) print('Target is stale: %s' % record_path) print(changes.DescribeDifference()) print('=' * 80) args = (changes,) if pass_changes else () function(*args) with open(record_path, 'w') as f: new_metadata.ToFile(f) class Changes(object): def __init__(self, old_metadata, new_metadata, force, missing_outputs, too_new): self.old_metadata = old_metadata self.new_metadata = new_metadata self.force = force self.missing_outputs = missing_outputs self.too_new = too_new def _GetOldTag(self, path, subpath=None): return self.old_metadata and self.old_metadata.GetTag(path, subpath) def HasChanges(self): return (self.HasStringChanges() or self.old_metadata.FilesMd5() != self.new_metadata.FilesMd5()) def HasStringChanges(self): return (self.force or not self.old_metadata or self.old_metadata.StringsMd5() != self.new_metadata.StringsMd5()) def AddedOrModifiedOnly(self): if self.HasStringChanges(): return False if any(self.IterRemovedPaths()): return False for path in self.IterModifiedPaths(): if any(self.IterRemovedSubpaths(path)): return False return True def IterAllPaths(self): return self.new_metadata.IterPaths(); def IterAllSubpaths(self, path): return self.new_metadata.IterSubpaths(path); def IterAddedPaths(self): for path in self.new_metadata.IterPaths(): if self._GetOldTag(path) is None: yield path def IterAddedSubpaths(self, path): for subpath in self.new_metadata.IterSubpaths(path): if self._GetOldTag(path, subpath) is None: yield subpath def IterRemovedPaths(self): if self.old_metadata: for path in self.old_metadata.IterPaths(): if self.new_metadata.GetTag(path) is None: yield path def IterRemovedSubpaths(self, path): if self.old_metadata: for subpath in self.old_metadata.IterSubpaths(path): if self.new_metadata.GetTag(path, subpath) is None: yield subpath def IterModifiedPaths(self): for path in self.new_metadata.IterPaths(): old_tag = self._GetOldTag(path) new_tag = self.new_metadata.GetTag(path) if old_tag is not None and old_tag != new_tag: yield path def IterModifiedSubpaths(self, path): for subpath in self.new_metadata.IterSubpaths(path): old_tag = self._GetOldTag(path, subpath) new_tag = self.new_metadata.GetTag(path, subpath) if old_tag is not None and old_tag != new_tag: yield subpath def IterChangedPaths(self): return itertools.chain(self.IterRemovedPaths(), self.IterModifiedPaths(), self.IterAddedPaths()) def IterChangedSubpaths(self, path): return itertools.chain(self.IterRemovedSubpaths(path), self.IterModifiedSubpaths(path), self.IterAddedSubpaths(path)) def DescribeDifference(self): if self.force: return 'force=True' elif self.missing_outputs: return 'Outputs do not exist:\n ' + '\n '.join(self.missing_outputs) elif self.too_new: return 'Outputs newer than stamp file:\n ' + '\n '.join(self.too_new) elif self.old_metadata is None: return 'Previous stamp file not found.' if self.old_metadata.StringsMd5() != self.new_metadata.StringsMd5(): ndiff = difflib.ndiff(self.old_metadata.GetStrings(), self.new_metadata.GetStrings()) changed = [s for s in ndiff if not s.startswith(' ')] return 'Input strings changed:\n ' + '\n '.join(changed) if self.old_metadata.FilesMd5() == self.new_metadata.FilesMd5(): return "There's no difference." lines = [] lines.extend('Added: ' + p for p in self.IterAddedPaths()) lines.extend('Removed: ' + p for p in self.IterRemovedPaths()) for path in self.IterModifiedPaths(): lines.append('Modified: ' + path) lines.extend(' -> Subpath added: ' + p for p in self.IterAddedSubpaths(path)) lines.extend(' -> Subpath removed: ' + p for p in self.IterRemovedSubpaths(path)) lines.extend(' -> Subpath modified: ' + p for p in self.IterModifiedSubpaths(path)) if lines: return 'Input files changed:\n ' + '\n '.join(lines) return 'I have no idea what changed (there is a bug).' class _Metadata(object): def __init__(self, track_entries=False): self._track_entries = track_entries self._files_md5 = None self._strings_md5 = None self._files = [] self._strings = [] self._file_map = None @classmethod def FromFile(cls, fileobj): ret = cls() obj = json.load(fileobj) ret._files_md5 = obj['files-md5'] ret._strings_md5 = obj['strings-md5'] ret._files = obj.get('input-files', []) ret._strings = obj.get('input-strings', []) return ret def ToFile(self, fileobj): obj = { 'files-md5': self.FilesMd5(), 'strings-md5': self.StringsMd5(), } if self._track_entries: obj['input-files'] = sorted(self._files, key=lambda e: e['path']) obj['input-strings'] = self._strings json.dump(obj, fileobj, indent=2) def _AssertNotQueried(self): assert self._files_md5 is None assert self._strings_md5 is None assert self._file_map is None def AddStrings(self, values): self._AssertNotQueried() self._strings.extend(str(v) for v in values) def AddFile(self, path, tag): self._AssertNotQueried() self._files.append({ 'path': path, 'tag': tag, })
MIT License
ucam-smt/sgnmt
cam/sgnmt/predictors/bow.py
BagOfWordsPredictor.predict_next
python
def predict_next(self): if not self.bag: return {utils.EOS_ID : 0.0} ret = {w : 0.0 for w in self.bag} if self.accept_subsets: ret[utils.EOS_ID] = 0.0 return ret
If the bag is empty, the only allowed symbol is EOS. Otherwise, return the list of keys in the bag.
https://github.com/ucam-smt/sgnmt/blob/c663ec7b251552e36b6b4f992f0ac21aad87cb7b/cam/sgnmt/predictors/bow.py#L131-L140
import logging from cam.sgnmt import utils from cam.sgnmt.decoding.beam import BeamDecoder from cam.sgnmt.decoding.core import CLOSED_VOCAB_SCORE_NORM_NONE from cam.sgnmt.misc.trie import SimpleTrie from cam.sgnmt.misc.unigram import FileUnigramTable, BestStatsUnigramTable, FullStatsUnigramTable, AllStatsUnigramTable from cam.sgnmt.predictors.core import Predictor from cam.sgnmt.utils import INF, NEG_INF, MESSAGE_TYPE_FULL_HYPO, MESSAGE_TYPE_DEFAULT class BagOfWordsPredictor(Predictor): def __init__(self, trg_test_file, accept_subsets=False, accept_duplicates=False, heuristic_scores_file="", collect_stats_strategy='best', heuristic_add_consumed = False, heuristic_add_remaining = True, diversity_heuristic_factor = -1.0, equivalence_vocab=-1): super(BagOfWordsPredictor, self).__init__() with open(trg_test_file) as f: self.lines = f.read().splitlines() if heuristic_scores_file: self.estimates = FileUnigramTable(heuristic_scores_file) elif collect_stats_strategy == 'best': self.estimates = BestStatsUnigramTable() elif collect_stats_strategy == 'full': self.estimates = FullStatsUnigramTable() elif collect_stats_strategy == 'all': self.estimates = AllStatsUnigramTable() else: logging.error("Unknown statistics collection strategy") self.accept_subsets = accept_subsets self.accept_duplicates = accept_duplicates self.heuristic_add_consumed = heuristic_add_consumed self.heuristic_add_remaining = heuristic_add_remaining self.equivalence_vocab = equivalence_vocab if accept_duplicates and not accept_subsets: logging.error("You enabled bow_accept_duplicates but not bow_" "accept_subsets. Therefore, the bow predictor will " "never accept end-of-sentence and could cause " "an infinite loop in the search strategy.") self.diversity_heuristic_factor = diversity_heuristic_factor self.diverse_heuristic = (diversity_heuristic_factor > 0.0) def get_unk_probability(self, posterior): return NEG_INF
Apache License 2.0
blueqat/blueqat
blueqat/circuit_funcs/circuit_to_unitary.py
circuit_to_unitary
python
def circuit_to_unitary(circ: Circuit, *runargs, **runkwargs) -> np.ndarray: runkwargs.setdefault('returns', 'statevector') runkwargs.setdefault('ignore_global', False) n_qubits = circ.n_qubits vecs = [] if n_qubits == 0: return np.array([[1]]) for i in range(1 << n_qubits): bitmask = tuple(k for k in range(n_qubits) if (1 << k) & i) c = Circuit() if bitmask: c.x[bitmask] c += circ vecs.append(c.run(*runargs, **runkwargs)) return np.array(vecs).T
Make circuit to unitary. This function is experimental feature and may changed or deleted in the future.
https://github.com/blueqat/blueqat/blob/098debdce97c8652397ef9a3ab79a2a464ffbb05/blueqat/circuit_funcs/circuit_to_unitary.py#L5-L21
import numpy as np from blueqat import Circuit
Apache License 2.0
aws/aws-encryption-sdk-cli
src/aws_encryption_sdk_cli/internal/arg_parsing.py
CommentIgnoringArgumentParser.__init__
python
def __init__(self, *args, **kwargs): self.__dummy_arguments = [] self.__is_windows = any(platform.win32_ver()) super(CommentIgnoringArgumentParser, self).__init__(*args, **kwargs)
Sets up the dummy argument registry.
https://github.com/aws/aws-encryption-sdk-cli/blob/ba6365817d7dc226f64bd9506ac28e5c32368c5a/src/aws_encryption_sdk_cli/internal/arg_parsing.py#L53-L60
import argparse import copy import logging import os import platform import shlex from collections import OrderedDict, defaultdict from enum import Enum import aws_encryption_sdk import six from aws_encryption_sdk_cli.exceptions import ParameterParseError from aws_encryption_sdk_cli.internal.identifiers import ALGORITHM_NAMES, DEFAULT_MASTER_KEY_PROVIDER, __version__ from aws_encryption_sdk_cli.internal.logging_utils import LOGGER_NAME from aws_encryption_sdk_cli.internal.metadata import MetadataWriter try: from typing import Any, Dict, List, Optional, Sequence, Tuple, Union from aws_encryption_sdk_cli.internal.mypy_types import ( ARGPARSE_TEXT, CACHING_CONFIG, COLLAPSED_CONFIG, MASTER_KEY_PROVIDER_CONFIG, PARSED_CONFIG, RAW_CONFIG, ) except ImportError: cast = lambda typ, val: val __all__ = ("parse_args",) _LOGGER = logging.getLogger(LOGGER_NAME) class CommentIgnoringArgumentParser(argparse.ArgumentParser):
Apache License 2.0
wxglade/wxglade
wcodegen/__init__.py
BaseWidgetWriter.generate_code_bitmap
python
def generate_code_bitmap(self, bitmap, required=False): assert self.tmpl_inline_bitmap if not bitmap and not required: return self.codegen.cn('wxNullBitmap') preview = self.codegen.preview if ( preview and ( bitmap.startswith('var:') or bitmap.startswith('code:') ) ) or (not bitmap and required): preview_icon = os.path.join(config.icons_path, "icon.png") return self.tmpl_inline_bitmap % { 'name': self.codegen.cn('wxBitmap'), 'bitmap': self.codegen.quote_path(preview_icon), 'bitmap_type': self.codegen.cn('wxBITMAP_TYPE_ANY') } if bitmap.startswith('var:'): return self.tmpl_inline_bitmap % { 'name': self.codegen.cn('wxBitmap'), 'bitmap': bitmap[4:].strip(), 'bitmap_type': self.codegen.cn('wxBITMAP_TYPE_ANY') } if bitmap.startswith('empty:'): return self.get_inline_stmt_emptybitmap(bitmap) if bitmap.startswith('art:'): return self.get_inline_stmt_artprovider(bitmap) if bitmap.startswith('code:'): return '%s' % self.codegen.cn(bitmap[5:].strip()) if preview: bitmap = misc.get_absolute_path(bitmap, True) return self.tmpl_inline_bitmap % { 'name': self.codegen.cn('wxBitmap'), 'bitmap': self.codegen.quote_path(bitmap), 'bitmap_type': self.codegen.cn('wxBITMAP_TYPE_ANY') }
Returns a code fragment that generates an wxBitmap object bitmap: Bitmap definition string see: tmpl_inline_bitmap, get_inline_stmt_emptybitmap(), get_inline_stmt_artprovider()
https://github.com/wxglade/wxglade/blob/56a1acd3e44cd3fb777db004ec38f4bb85461f3e/wcodegen/__init__.py#L484-L517
from __future__ import absolute_import import common, config, misc, compat import new_properties as np import copy, logging, os.path from gui_mixins import StylesMixin class BaseCodeWriter(object): def __init__(self): pass def get_code(self, obj): return [], [] def get_properties_code(self, obj): return [] def get_init_code(self, obj): return [] def get_layout_code(self, obj): return [] def get_code_per_child(self, obj, child): return [] class BaseLanguageMixin(StylesMixin): comment_sign = '' default_extensions = [] format_flags = False language = None lang_prefix = None scope_sep = '' tmpl_flag_join = '|' def cn(self, name): return name def cn_class(self, klass): return klass def get_class(self, scope): if self.scope_sep: scope_list = scope.rsplit(self.scope_sep, 1) if len(scope_list) == 2: return scope_list[1] else: return scope return scope def get_scope(self, scope): if self.scope_sep: scope_list = scope.rsplit(self.scope_sep, 1) if len(scope_list) == 2: return scope_list[0] else: return '' return '' def _get_style_list(self): try: groups = self.config['style_list'] except (AttributeError, KeyError): groups = [] return groups style_list = property(_get_style_list) class CppMixin(BaseLanguageMixin): comment_sign = '//' default_extensions = ['cpp', 'cc', 'C', 'cxx', 'c++', 'h', 'hh', 'hpp', 'H', 'hxx', ] language = 'C++' lang_prefix = 'cpp' scope_sep = '::' def cn_class(self, klass): if not klass: return klass klass = klass.replace('::', '_') return klass class LispMixin(BaseLanguageMixin): comment_sign = ';;;' default_extensions = ['lisp'] format_flags = True language = 'lisp' lang_prefix = 'lisp' def cn(self, name): if name[:2] == 'wx': return 'wx' + name[2:] elif name[:4] == 'EVT_': return 'wx' + name return name def cn_f(self, flags): flags = BaseLanguageMixin.cn_f(self, flags) flags = flags.split('|') if len(flags) == 1: flags = flags[0] else: flags = '(logior %s)' % ' '.join(flags) return flags class PerlMixin(BaseLanguageMixin): comment_sign = '#' default_extensions = ['pl', 'pm'] language = 'perl' lang_prefix = 'perl' scope_sep = '::' _perl_constant_list = [ "wxALL", "wxTOP", "wxBOTTOM", "wxLEFT", "wxRIGHT", "wxDOWN", "wxNORTH", "wxSOUTH", "wxWEST", "wxEAST", "wxEXPAND", "wxGROW", "wxSHAPED", "wxFIXED_MINSIZE", "wxCAPTION", "wxMINIMIZE_BOX", "wxMAXIMIZE_BOX", "wxRESIZE_BORDER", "wxYES_NO", "wxYES", "wxNO", 'wxYES_DEFAULT', 'wxNO_DEFAULT', "wxCANCEL", "wxOK", "wxBLACK", "wxWHITE", "wxRED", "wxBLUE", "wxGREEN", "wxCYAN", "wxLIGHT_GREY", 'wxDEFAULT', 'wxDECORATIVE', 'wxROMAN', 'wxSWISS', 'wxSCRIPT', 'wxMODERN', 'wxTELETYPE', 'wxNORMAL', 'wxSLANT', 'wxITALIC', 'wxNORMAL', 'wxLIGHT', 'wxBOLD', 'wxNORMAL_FONT', 'wxSMALL_FONT', 'wxITALIC_FONT', 'wxSWISS_FONT', 'wxHORIZONTAL', 'wxVERTICAL', 'wxALIGN_CENTER', 'wxALIGN_CENTRE', 'wxALIGN_LEFT', 'wxALIGN_RIGHT', 'wxALIGN_TOP', 'wxALIGN_BOTTOM', 'wxALIGN_CENTER_VERTICAL', 'wxALIGN_CENTRE_VERTICAL', 'wxALIGN_CENTER_HORIZONTAL', 'wxALIGN_CENTRE_HORIZONTAL', 'wxSTANDARD_CURSOR', 'wxHOURGLASS_CURSOR', 'wxCROSS_CURSOR', 'wxTheClipboard', 'wxFormatInvalid', 'wxThePrintPaperDatabase', 'wxNullAnimation', 'wxNullBitmap', 'wxNullIcon', 'wxNullColour', 'wxNullCursor', 'wxNullFont', 'wxNullPen', 'wxNullBrush', 'wxNullPalette', 'wxNullAcceleratorTable', 'wxLI_HORIZONTAL', 'wxLI_VERTICAL', 'wxHL_CONTEXTMENU', 'wxHL_ALIGN_LEFT', 'wxHL_ALIGN_RIGHT', 'wxHL_ALIGN_CENTRE', 'wxHL_DEFAULT_STYLE', 'wxMAJOR_VERSION', 'wxMINOR_VERSION', 'wxSPLIT_HORIZONTAL', 'wxSPLIT_VERTICAL', ] def cn(self, name): if name.startswith('wxBITMAP_TYPE_') or name.startswith("wxDefault") or name.startswith('wxSYS_COLOUR_'): return name if "_" in name: start = name.split("_",1)[0] if start in {'wxART', 'wxBORDER', 'wxBRUSHSTYLE', 'wxBU', 'wxCB', 'wxCC', 'wxCHB', 'wxCHK', 'wxCURSOR', 'wxDD', 'wxEVT', 'wxFONTENCODING', 'wxFONTFAMILY', 'wxFONTSTYLE', 'wxFONTWEIGHT', 'wxFONTFLAG', 'wxFRAME', 'wxGA', 'wxICON', 'wxID', 'wxK', 'wxLANGUAGE', 'wxLB', 'wxMOD', 'wxNB', 'wxALIGN', 'wxDefault', 'wxPD', 'wxPROPSHEET', 'wxRA', 'wxRB', 'wxSL', 'wxSP', 'wxSPLASH', 'wxST', 'wxSys', 'wxSW', 'wxSASH', 'wxTB', 'wxTE', 'wxWIZARD'}: return name if name in self._perl_constant_list: return name if name.startswith('Wx::'): return name if name[:2] == 'wx': return 'Wx::' + name[2:] if name[:4] == 'EVT_': return 'Wx::Event::' + name return name class PythonMixin(BaseLanguageMixin): comment_sign = '#' default_extensions = ['py', 'pyw'] format_flags = True language = 'python' lang_prefix = 'py' scope_sep = '.' tmpl_flag_join = ' | ' def cn(self, name): if name.startswith('wx.'): return name if name.startswith('wx'): return 'wx.' + name[2:] if name.startswith('EVT_'): return 'wx.' + name return name def cn_class(self, klass): if not klass: return klass if not klass.startswith('wx.'): klass = self.get_class(klass) klass = klass.replace('::', '_') return klass class XRCMixin(BaseLanguageMixin): default_extensions = ['xrc'] language = 'XRC' lang_prefix = 'xrc' class BaseWidgetWriter(StylesMixin, BaseCodeWriter): import_modules = [] __import_modules = [] supported_by = () tmpl_after = [] tmpl_before = [] tmpl_layout = [] tmpl_props = [] tmpl = '' tmpl_concatenate_choices = ', ' tmpl_dict = {} tmpl_flags = '%s' tmpl_inline_artprovider = '' tmpl_inline_bitmap = '' tmpl_inline_emptybitmap = '' tmpl_import_artprovider = '' tmpl_inline_wxSize = '' has_selection = False tmpl_selection = '' has_setdefault = False tmpl_setdefault = '' has_setvalue = False has_setvalue1 = False tmpl_setvalue = '' prefix_style = False set_default_style = False use_names_for_binding_events = True def __init__(self, klass=None): BaseCodeWriter.__init__(self) self.config = {} self.klass = klass if hasattr(self, 'import_modules'): self.__import_modules = self.import_modules[:] else: self.__import_modules = [] if klass in config.widget_config: for item in config.widget_config[self.klass]: if item == 'style_defs': continue self.config[item] = copy.deepcopy(config.widget_config[self.klass][item]) self.codegen = common.code_writers[self.language] self._reset_vars() def format_widget_access(self, obj): return self.codegen.format_generic_access(obj) def stmt2list(self, stmt): temp = ['%s\n' % line for line in stmt.split('\n')] return temp def _reset_vars(self): self.import_modules = self.__import_modules[:] self.has_selection = False self.has_setdefault = False self.has_setvalue = False self.has_setvalue1 = False self.tmpl_before = [] self.tmpl_after = [] self.tmpl_layout = [] self.tmpl_props = [] self.tmpl_dict = {} def _prepare_style(self, style): style_s = style.get_string_value() fmt_style = self.cn_f(style_s) fmt_default_style = self.cn_f(self.default_style) if fmt_style and fmt_style != fmt_default_style: style = self.tmpl_flags % fmt_style elif not style_s and fmt_default_style: style = self.tmpl_flags % '0' else: if self.set_default_style: if style and not fmt_style: logging.debug( _('Unsupported attribute %s use default %s instead'), style, self.default_style) style = self.tmpl_flags % fmt_default_style else: style = '' if style and self.prefix_style: style = ', %s, %s, %s' % ( self.cn('wxDefaultPosition'), self.cn('wxDefaultSize'), style ) return style def _prepare_tmpl_content(self, obj): self.tmpl_dict['comment'] = self.codegen.comment_sign self.tmpl_dict['tab'] = self.codegen.tabs(1) self.tmpl_dict['store_as_attr'] = self.codegen.store_as_attr(obj) self.tmpl_dict['id_name'], self.tmpl_dict['id_number'] = self.codegen.generate_code_id(obj) self.tmpl_dict['id'] = self.tmpl_dict['id_number'] self.tmpl_dict['obj_name'] = self.codegen._format_name(obj.name) self.tmpl_dict['klass'] = obj.get_instantiation_class(self.cn, self.cn_class, self.codegen.preview) self.tmpl_dict['store_as_attr'] = self.codegen.store_as_attr(obj) if obj.check_prop('style'): self.tmpl_dict['style'] = self._prepare_style(obj.properties["style"]) if obj.check_prop('label'): self.tmpl_dict['label'] = self.codegen.quote_str( obj.label ) if obj.check_prop('value'): self.tmpl_dict['value'] = self.codegen.quote_str( compat.unicode(obj.value) ) if obj.check_prop('value_unquoted'): self.tmpl_dict['value_unquoted'] = obj.value return def _get_default_style(self): try: name = self.config['default_style'] except (AttributeError, KeyError): name = '' return name default_style = property(_get_default_style) def _prepare_bitmaps(self, obj): need_artprovider = have_constructor_argument = False for p_name in obj.property_names: p = obj.properties[p_name] if not isinstance(p, np.BitmapProperty): continue value = p.get_value() if value.startswith('art:'): need_artprovider = True self.tmpl_dict[p_name] = self.generate_code_bitmap(value) if '%%(%s)s'%p_name in self.tmpl: have_constructor_argument = True elif value and (not p.min_version or self.codegen.for_version>=p.min_version): setname = p_name.replace( "_bitmap", "").capitalize() if compat.IS_CLASSIC and setname=="Pressed": setname = "Selected" if setname=="Bitmap": setname = "" tmpl = self.tmpl2_bitmap_property%(setname, p_name) if p_name=="bitmap" and obj.check_prop_nodefault("bitmap_dir"): direction = self.cn( obj.properties["bitmap_dir"].get_string_value() ) tmpl = self.tmpl2_bitmap_property_with_dir%(setname, p_name, direction) self.tmpl_props.append(tmpl) if need_artprovider and self.tmpl_import_artprovider: self.import_modules.append(self.tmpl_import_artprovider) if have_constructor_argument and not obj.check_prop('size') and self.tmpl_SetBestSize: self.tmpl_props.append(self.tmpl_SetBestSize) self.has_setdefault = "default" in obj.properties and obj.default or False def _prepare_choice(self, obj): choices = [c[0] for c in obj.choices] choices_str = self.tmpl_concatenate_choices.join( [self.codegen.quote_str(c) for c in choices] ) self.tmpl_dict['choices'] = choices_str self.tmpl_dict['choices_len'] = len(choices) if choices: selection_p = obj.properties.get("selection", None) if selection_p and selection_p.is_active(): self.tmpl_dict['selection'] = selection_p.get() self.has_selection = True
MIT License
cpburnz/python-sql-parameters
sqlparams/_converting.py
_NumericToNamedConverter._regex_replace
python
def _regex_replace(self, in_params, param_conversions, match): result = match.groupdict() escape = result.get('escape') if escape is not None: return escape[self._escape_start:] else: in_num_str = result['param'] in_index = int(in_num_str) - self._in_start value = in_params[in_index] if self._expand_tuples and isinstance(value, tuple): out_names = [] out_replacements = [] for i, sub_value in enumerate(value): out_name = "_{}_{}".format(in_num_str, i) out_repl = self._out_format.format(param=out_name) out_names.append(out_name) out_replacements.append(out_repl) param_conversions.append((True, in_index, out_names)) return "({})".format(",".join(out_replacements)) else: out_name = "_" + in_num_str out_repl = self._out_format.format(param=out_name) param_conversions.append((False, in_index, out_name)) return out_repl
Regular expression replace callback. *in_params* (:class:`~collections.abc.Sequence`) contains the in-style parameters to sample. *param_conversions* (:class:`list`) will be outputted with each parameter conversion to perform (:class:`tuple`). *match* (:class:`re.Match`) is the in-parameter match. Returns the out-parameter replacement string (:class:`str`).
https://github.com/cpburnz/python-sql-parameters/blob/8e58cc68c05b857ee928667d5709109a19076462/sqlparams/_converting.py#L897-L942
import itertools from collections.abc import Mapping from functools import partial from . import _styles from ._util import _is_sequence class _Converter(object): def __init__(self, escape_char, expand_tuples, in_regex, in_style, out_style): self._escape_start = len(escape_char) if escape_char is not None else 0 self._expand_tuples = expand_tuples self._in_regex = in_regex self._in_style = in_style self._out_format = out_style.out_format self._out_style = out_style def convert(self, sql, params): raise NotImplementedError("{} must implement convert().".format(self.__class__.__qualname__)) def convert_many(self, sql, many_params): raise NotImplementedError("{} must implement convert_many().".format(self.__class__.__qualname__)) class _NamedToNamedConverter(_Converter): def convert(self, sql, params): if not isinstance(params, Mapping): raise TypeError("params:{!r} is not a mapping.".format(params)) param_conversions = [] out_sql = self._in_regex.sub(partial(self._regex_replace, params, param_conversions), sql) out_params = self._convert_params(params, param_conversions) return out_sql, out_params def convert_many(self, sql, many_params): iter_params = iter(many_params) first_params = next(iter_params) if not isinstance(first_params, Mapping): raise TypeError("many_params[0]:{!r} is not a mapping.".format(first_params)) param_conversions = [] out_sql = self._in_regex.sub(partial(self._regex_replace, first_params, param_conversions), sql) out_params = self._convert_many_params(itertools.chain((first_params,), iter_params), param_conversions) return out_sql, out_params def _convert_many_params(self, many_in_params, param_conversions): many_out_params = [] for i, in_params in enumerate(many_in_params): if i and not isinstance(in_params, Mapping): raise TypeError("many_params[{}]:{!r} is not a mapping.".format(i, in_params)) out_params = {} for expand_tuple, in_name, out_name in param_conversions: if expand_tuple: out_names = out_name values = in_params[in_name] if not isinstance(values, tuple): raise TypeError("many_params[{}][{!r}]:{!r} was expected to be a tuple.".format(i, in_name, values)) elif len(values) != len(out_names): raise ValueError("many_params[{}][{!r}]:{!r} length was expected to be {}.".format(i, in_name, values, len(out_names))) for sub_name, sub_value in zip(out_names, values): out_params[sub_name] = sub_value else: out_params[out_name] = in_params[in_name] many_out_params.append(out_params) return many_out_params def _convert_params(self, in_params, param_conversions): out_params = {} for expand_tuple, in_name, out_name in param_conversions: if expand_tuple: out_names = out_name for sub_name, sub_value in zip(out_names, in_params[in_name]): out_params[sub_name] = sub_value else: out_params[out_name] = in_params[in_name] return out_params def _regex_replace(self, in_params, param_conversions, match): result = match.groupdict() escape = result.get('escape') if escape is not None: return escape[self._escape_start:] else: in_name = result['param'] value = in_params[in_name] if self._expand_tuples and isinstance(value, tuple): out_names = [] out_replacements = [] for i, sub_value in enumerate(value): out_name = "{}__{}_sqlp".format(in_name, i) out_repl = self._out_format.format(param=out_name) out_names.append(out_name) out_replacements.append(out_repl) param_conversions.append((True, in_name, out_names)) return "({})".format(",".join(out_replacements)) else: out_repl = self._out_format.format(param=in_name) param_conversions.append((False, in_name, in_name)) return out_repl class _NamedToNumericConverter(_Converter): def __init__(self, **kw): super().__init__(**kw) self._out_start = self._out_style.start def convert(self, sql, params): if not isinstance(params, Mapping): raise TypeError("params:{!r} is not a mapping.".format(params)) param_conversions = [] out_counter = itertools.count() out_lookup = {} out_sql = self._in_regex.sub(partial(self._regex_replace, params, param_conversions, out_counter, out_lookup), sql) out_params = self._convert_params(params, param_conversions) return out_sql, out_params def convert_many(self, sql, many_params): iter_params = iter(many_params) first_params = next(iter_params) if not isinstance(first_params, Mapping): raise TypeError("many_params[0]:{!r} is not a mapping.".format(first_params)) param_conversions = [] out_counter = itertools.count() out_lookup = {} out_sql = self._in_regex.sub(partial(self._regex_replace, first_params, param_conversions, out_counter, out_lookup), sql) out_params = self._convert_many_params(itertools.chain((first_params,), iter_params), param_conversions) return out_sql, out_params def _convert_many_params(self, many_in_params, param_conversions): last_conv = param_conversions[-1] size = (last_conv[2][-1] if last_conv[0] else last_conv[2]) + 1 many_out_params = [] for i, in_params in enumerate(many_in_params): if i and not isinstance(in_params, Mapping): raise TypeError("many_params[{}]:{!r} is not a mapping.".format(i, in_params)) out_params = [None] * size for expand_tuple, in_name, out_index in param_conversions: if expand_tuple: values = in_params[in_name] out_indices = out_index if not isinstance(values, tuple): raise TypeError("many_params[{}][{!r}]:{!r} was expected to be a tuple.".format(i, in_name, values)) elif len(values) != len(out_indices): raise ValueError("many_params[{}][{!r}]:{!r} length was expected to be {}.".format(i, in_name, values, len(out_indices))) for sub_index, sub_value in zip(out_indices, values): out_params[sub_index] = sub_value else: out_params[out_index] = in_params[in_name] many_out_params.append(out_params) return many_out_params def _convert_params(self, in_params, param_conversions): last_conv = param_conversions[-1] size = (last_conv[2][-1] if last_conv[0] else last_conv[2]) + 1 out_params = [None] * size for expand_tuple, in_name, out_index in param_conversions: if expand_tuple: out_indices = out_index for sub_index, sub_value in zip(out_indices, in_params[in_name]): out_params[sub_index] = sub_value else: out_params[out_index] = in_params[in_name] return out_params def _regex_replace(self, in_params, param_conversions, out_counter, out_lookup, match): result = match.groupdict() escape = result.get('escape') if escape is not None: return escape[self._escape_start:] else: in_name = result['param'] value = in_params[in_name] if self._expand_tuples and isinstance(value, tuple): is_new = True out_indices = [] out_replacements = [] for i, sub_value in enumerate(value): out_key = (in_name, i) out_result = out_lookup.get(out_key) if out_result is not None: out_index, out_repl = out_result is_new = False else: out_index = next(out_counter) out_num = out_index + self._out_start out_repl = self._out_format.format(param=out_num) out_lookup[out_key] = (out_index, out_repl) out_indices.append(out_index) out_replacements.append(out_repl) if is_new: param_conversions.append((True, in_name, out_indices)) return "({})".format(",".join(out_replacements)) else: out_result = out_lookup.get(in_name) if out_result is not None: out_repl = out_result[1] else: out_index = next(out_counter) out_num = out_index + self._out_start out_repl = self._out_format.format(param=out_num) out_lookup[in_name] = (out_index, out_repl) param_conversions.append((False, in_name, out_index)) return out_repl class _NamedToOrdinalConverter(_Converter): def convert(self, sql, params): if not isinstance(params, Mapping): raise TypeError("params:{!r} is not a mapping.".format(params)) param_conversions = [] out_format = self._out_style.out_format out_sql = self._in_regex.sub(partial(self._regex_replace, params, param_conversions, out_format), sql) out_params = self._convert_params(params, param_conversions) return out_sql, out_params def convert_many(self, sql, many_params): iter_params = iter(many_params) first_params = next(iter_params) if not isinstance(first_params, Mapping): raise TypeError("many_params[0]:{!r} is not a mapping.".format(first_params)) param_conversions = [] out_format = self._out_style.out_format out_sql = self._in_regex.sub(partial(self._regex_replace, first_params, param_conversions, out_format), sql) out_params = self._convert_many_params(itertools.chain((first_params,), iter_params), param_conversions) return out_sql, out_params def _convert_many_params(self, many_in_params, param_conversions): many_out_params = [] for i, in_params in enumerate(many_in_params): if i and not isinstance(in_params, Mapping): raise TypeError("many_params[{}]:{!r} is not a mapping.".format(i, in_params)) out_params = [] for expand_tuple, in_name, out_count in param_conversions: if expand_tuple: values = in_params[in_name] if not isinstance(values, tuple): raise TypeError("many_params[{}][{!r}]:{!r} was expected to be a tuple.".format(i, in_name, values)) elif len(values) != out_count: raise ValueError("many_params[{}][{!r}]:{!r} length was expected to be {}.".format(i, in_name, values, out_count)) for sub_value in values: out_params.append(sub_value) else: out_params.append(in_params[in_name]) many_out_params.append(out_params) return many_out_params def _convert_params(self, in_params, param_conversions): out_params = [] for expand_tuple, in_name, _out_count in param_conversions: if expand_tuple: for sub_value in in_params[in_name]: out_params.append(sub_value) else: out_params.append(in_params[in_name]) return out_params def _regex_replace(self, in_params, param_conversions, out_format, match): result = match.groupdict() escape = result.get('escape') if escape is not None: return escape[self._escape_start:] else: in_name = result['param'] value = in_params[in_name] if self._expand_tuples and isinstance(value, tuple): param_conversions.append((True, in_name, len(value))) return "({})".format(",".join(out_format for _ in value)) else: param_conversions.append((False, in_name, None)) return out_format class _NumericConverter(_Converter): def __init__(self, **kw): super().__init__(**kw) self._in_start = self._in_style.start def _mapping_as_sequence(self, in_params): start = self._in_start return {int(__key) - start: __value for __key, __value in in_params.items() if isinstance(__key, int) or (isinstance(__key, (str, bytes)) and __key.isdigit())} class _NumericToNamedConverter(_NumericConverter): def convert(self, sql, params): if _is_sequence(params): pass elif isinstance(params, Mapping): params = self._mapping_as_sequence(params) else: raise TypeError("params:{!r} is not a sequence or mapping.".format(params)) param_conversions = [] out_sql = self._in_regex.sub(partial(self._regex_replace, params, param_conversions), sql) out_params = self._convert_params(params, param_conversions) return out_sql, out_params def convert_many(self, sql, many_params): iter_params = iter(many_params) first_params = next(iter_params) if _is_sequence(first_params): pass elif isinstance(first_params, Mapping): first_params = self._mapping_as_sequence(first_params) else: raise TypeError("many_params[0]:{!r} is not a sequence or mapping.".format(first_params)) param_conversions = [] out_sql = self._in_regex.sub(partial(self._regex_replace, first_params, param_conversions), sql) out_params = self._convert_many_params(itertools.chain((first_params,), iter_params), param_conversions) return out_sql, out_params def _convert_many_params(self, many_in_params, param_conversions): many_out_params = [] for i, in_params in enumerate(many_in_params): if i: if _is_sequence(in_params): pass elif isinstance(in_params, Mapping): in_params = self._mapping_as_sequence(in_params) else: raise TypeError("many_params[{}]:{!r} is not a sequence or mapping.".format(i, in_params)) out_params = {} for expand_tuple, in_index, out_name in param_conversions: if expand_tuple: out_names = out_name values = in_params[in_index] if not isinstance(values, tuple): raise TypeError("many_params[{}][{!r}]:{!r} was expected to be a tuple.".format(i, in_index, values)) elif len(values) != len(out_names): raise ValueError("many_params[{}][{!r}]:{!r} length was expected to be {}.".format(i, in_index, values, len(out_names))) for sub_name, sub_value in zip(out_names, values): out_params[sub_name] = sub_value else: out_params[out_name] = in_params[in_index] many_out_params.append(out_params) return many_out_params def _convert_params(self, in_params, param_conversions): out_params = {} for expand_tuple, in_index, out_name in param_conversions: if expand_tuple: out_names = out_name for sub_name, sub_value in zip(out_names, in_params[in_index]): out_params[sub_name] = sub_value else: out_params[out_name] = in_params[in_index] return out_params
MIT License
thomasgerstenberg/serial_monitor
serial_monitor_commands.py
SerialOptionSelector.show
python
def show(self, callback, starting_index=0): starting_index = max(starting_index, 0) if self.header: starting_index += 1 starting_index = min(starting_index, len(self.items)) def item_selected(selected_index): if selected_index == -1: return if self.header and selected_index == 0: self.show(callback, -1) return item = self.items[selected_index] if self.header: selected_index -= 1 callback(item, selected_index) sublime.active_window().show_quick_panel(self.items, item_selected, flags=sublime.KEEP_OPEN_ON_FOCUS_LOST, selected_index=starting_index)
Shows the list for the user to choose from :param callback: function to call when the user has selected an item. Should take 2 positional arguments: the string selected, the index selected :param starting_index: the index of the initial item to be highlighted when shown :type starting_index: int
https://github.com/thomasgerstenberg/serial_monitor/blob/6e25172aca9ad755b8ec2f7e3efc5664ce35ed7e/serial_monitor_commands.py#L41-L70
import sys import os import time import sublime import sublime_plugin sys.path.append(os.path.dirname(__file__)) import logger import serial_monitor_thread from serial_settings import SerialSettings from filter.serial_filter import FilterFile, FilterException from . import command_history_event_listener from hardware import serial, hardware_factory from stream.serial_text_stream import SerialTextStream import serial_constants BAUD_RATES = ["9600", "19200", "38400", "57600", "115200"] class SerialOptionSelector(object): def __init__(self, items, header=None): self.items = items.copy() self.header = header if header: self.items.insert(0, header)
BSD 3-Clause New or Revised License
ciscodevnet/webex-teams-archiver
webexteamsarchiver/webexteamsarchiver.py
WebexTeamsArchiver.archive_room
python
def archive_room(self, room_id: str, text_format: bool = True, html_format: bool = True, json_format: bool = True, **options) -> str: compress_folder = options.get("compress_folder", True) delete_folder = options.get("delete_folder", False) reverse_order = options.get("reverse_order", True) download_attachments = options.get("download_attachments", True) download_avatars = options.get("download_avatars", True) download_workers = options.get("download_workers", 15) timestamp_format = options.get("timestamp_format", "%Y-%m-%dT%H:%M:%S") file_format = options.get("file_format", "gztar") if delete_folder and not compress_folder: raise ValueError("delete_folder cannot be True while compress_folder is False") self._gather_room_information(room_id, download_avatars) self._setup_folder(download_attachments, download_avatars, html_format) try: self._archive(reverse_order, download_attachments, download_avatars, download_workers, text_format, html_format, json_format, timestamp_format) if compress_folder: filename = self._compress_folder(file_format) else: filename = self.archive_folder_name except Exception: self._tear_down_folder() raise if delete_folder: self._tear_down_folder() return filename
Archives a Webex Teams room. This creates a file called roomTitle_timestamp_roomId with the appropriate file extension as defined by file_format param with the following contents: - roomTitle_roomId.txt - Text version of the conversations (if `text_format` is True) - roomTitle_roomId.html - HTML version of the conversations (if `html_format` is True) - files/ - Attachments added to the room (if `download_attachments` is True) Args: room_id: ID of the room to archive. text_format: Create a text version of the archive. html_format: Create an HTML version of the archive. json_format: Create a json version of the archive. Options: compress_folder: Compress archive folder. delete_folder: Delete the archive folder when done. reverse_order: Order messages by most recent on the bottom. download_attachments: Download attachments sent to the room. download_avatars: Download avatar images. download_workers: Number of download workers for downloading files. timestamp_format: Timestamp strftime format. file_format: Archive format as supported by shutil.make_archive Returns: Name of archive file. Raises: IOError: Error occurred while creating/writing to files. shutil.Error: Error occurred creating/copying/deleting files/folders. ValueError: Exception message will contain more details. TypeError: Messages contained non JSON serializable data. webexteamssdkException: An error occurred calling the Webex Teams API.
https://github.com/ciscodevnet/webex-teams-archiver/blob/9813e008b527259d9c62e6bb75aae3b16dbc8568/webexteamsarchiver/webexteamsarchiver.py#L115-L183
import concurrent.futures import os import re import requests import shutil import logging import json import datetime from collections import namedtuple from webexteamssdk import WebexTeamsAPI from webexteamssdk.exceptions import MalformedResponse, ApiError from webexteamssdk.models.immutable import Person from webexteamssdk.generator_containers import GeneratorContainer from .jinja_env import env as jinja_env from .jinja_env import sanitize_name __all__ = ['WebexTeamsArchiver', 'File', 'UserNotFound', 'UserApiFailed'] File = namedtuple( "File", "content_disposition content_length content_type filename deleted") UserNotFound = namedtuple( "UserNotFound", "id emails displayName avatar" ) UserApiFailed = namedtuple( "UserApiFailed", "id emails displayName avatar" ) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class WebexTeamsArchiver: def __init__(self, access_token: str, single_request_timeout: int = 60, special_token: bool = False) -> None: self.access_token = access_token self.special_token = special_token self.sdk = WebexTeamsAPI( self.access_token, single_request_timeout=single_request_timeout) def file_details(self, url: str) -> File: headers = { "Authorization": f"Bearer {self.access_token}", "Accept-Encoding": "", } r = requests.head(url, headers=headers) if r.status_code == 404: return File("", 0, "", "", True) if r.ok: filename_re = re.search(r"filename=\"(.+?)\"", r.headers.get("Content-Disposition", ""), re.I) if not filename_re: new_filename = re.sub(r'^.+/([^/]+)$', r'\1', url) message = ( f"Set filename to '{new_filename}' in {r.headers.get('Content-Disposition', '')} for url {url}" ) logger.debug(message) filename_re = re.search(r"filename=\"(.+?)\"", f"filename=\"{new_filename}\"", re.I) return File(r.headers.get("Content-Disposition", ""), r.headers.get("Content-Length", 0), r.headers.get("Content-Type", ""), sanitize_name(filename_re.group(1)), False) else: return File("", 0, "", "UNKNOWN", True)
MIT License
paulalbert31/labelnoisecorrection
utils.py
mixup_data_Boot
python
def mixup_data_Boot(x, y, alpha=1.0, device='cuda'): if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if device=='cuda': index = torch.randperm(batch_size).to(device) else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam, index
Returns mixed inputs, pairs of targets, and lambda
https://github.com/paulalbert31/labelnoisecorrection/blob/5e9a73ee52b7685d93a4fd1d0e66fb0ab5db955a/utils.py#L238-L253
from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms import scipy.stats as stats import math import numpy as np from matplotlib import pyplot as plt from sklearn.mixture import GaussianMixture as GMM from sklearn import preprocessing as preprocessing import sys from tqdm import tqdm def get_data_cifar(loader): data = loader.sampler.data_source.train_data.copy() labels = loader.sampler.data_source.train_labels labels = torch.Tensor(labels[:]).long() return (data, labels) def get_data_cifar_2(loader): labels = loader.sampler.data_source.train_labels labels = torch.Tensor(labels[:]).long() return labels def add_noise_cifar_wo(loader, noise_percentage = 20): torch.manual_seed(2) np.random.seed(42) noisy_labels = [sample_i for sample_i in loader.sampler.data_source.train_labels] images = [sample_i for sample_i in loader.sampler.data_source.train_data] probs_to_change = torch.randint(100, (len(noisy_labels),)) idx_to_change = probs_to_change >= (100.0 - noise_percentage) percentage_of_bad_labels = 100 * (torch.sum(idx_to_change).item() / float(len(noisy_labels))) for n, label_i in enumerate(noisy_labels): if idx_to_change[n] == 1: set_labels = list( set(range(10)) - set([label_i])) set_index = np.random.randint(len(set_labels)) noisy_labels[n] = set_labels[set_index] loader.sampler.data_source.train_data = images loader.sampler.data_source.train_labels = noisy_labels return noisy_labels def add_noise_cifar_w(loader, noise_percentage = 20): torch.manual_seed(2) np.random.seed(42) noisy_labels = [sample_i for sample_i in loader.sampler.data_source.train_labels] images = [sample_i for sample_i in loader.sampler.data_source.train_data] probs_to_change = torch.randint(100, (len(noisy_labels),)) idx_to_change = probs_to_change >= (100.0 - noise_percentage) percentage_of_bad_labels = 100 * (torch.sum(idx_to_change).item() / float(len(noisy_labels))) for n, label_i in enumerate(noisy_labels): if idx_to_change[n] == 1: set_labels = list(set(range(10))) set_index = np.random.randint(len(set_labels)) noisy_labels[n] = set_labels[set_index] loader.sampler.data_source.train_data = images loader.sampler.data_source.train_labels = noisy_labels return noisy_labels def track_training_loss(args, model, device, train_loader, epoch, bmm_model1, bmm_model_maxLoss1, bmm_model_minLoss1): model.eval() all_losses = torch.Tensor() all_predictions = torch.Tensor() all_probs = torch.Tensor() all_argmaxXentropy = torch.Tensor() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) prediction = model(data) prediction = F.log_softmax(prediction, dim=1) idx_loss = F.nll_loss(prediction, target, reduction = 'none') idx_loss.detach_() all_losses = torch.cat((all_losses, idx_loss.cpu())) probs = prediction.clone() probs.detach_() all_probs = torch.cat((all_probs, probs.cpu())) arg_entr = torch.max(prediction, dim=1)[1] arg_entr = F.nll_loss(prediction.float(), arg_entr.to(device), reduction='none') arg_entr.detach_() all_argmaxXentropy = torch.cat((all_argmaxXentropy, arg_entr.cpu())) loss_tr = all_losses.data.numpy() max_perc = np.percentile(loss_tr, 95) min_perc = np.percentile(loss_tr, 5) loss_tr = loss_tr[(loss_tr<=max_perc) & (loss_tr>=min_perc)] bmm_model_maxLoss = torch.FloatTensor([max_perc]).to(device) bmm_model_minLoss = torch.FloatTensor([min_perc]).to(device) + 10e-6 loss_tr = (loss_tr - bmm_model_minLoss.data.cpu().numpy()) / (bmm_model_maxLoss.data.cpu().numpy() - bmm_model_minLoss.data.cpu().numpy() + 1e-6) loss_tr[loss_tr>=1] = 1-10e-4 loss_tr[loss_tr <= 0] = 10e-4 bmm_model = BetaMixture1D(max_iters=10) bmm_model.fit(loss_tr) bmm_model.create_lookup(1) return all_losses.data.numpy(), all_probs.data.numpy(), all_argmaxXentropy.numpy(), bmm_model, bmm_model_maxLoss, bmm_model_minLoss def train_CrossEntropy(args, model, device, train_loader, optimizer, epoch): model.train() loss_per_batch = [] acc_train_per_batch = [] correct = 0 for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) output = F.log_softmax(output, dim=1) loss = F.nll_loss(output, target) loss.backward() optimizer.step() loss_per_batch.append(loss.item()) pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() acc_train_per_batch.append(100. * correct / ((batch_idx+1)*args.batch_size)) if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, Accuracy: {:.0f}%, Learning rate: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item(), 100. * correct / ((batch_idx + 1) * args.batch_size), optimizer.param_groups[0]['lr'])) loss_per_epoch = [np.average(loss_per_batch)] acc_train_per_epoch = [np.average(acc_train_per_batch)] return (loss_per_epoch, acc_train_per_epoch) def mixup_data(x, y, alpha=1.0, device='cuda'): if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if device=='cuda': index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam def mixup_criterion(pred, y_a, y_b, lam): return lam * F.nll_loss(pred, y_a) + (1 - lam) * F.nll_loss(pred, y_b) def train_mixUp(args, model, device, train_loader, optimizer, epoch, alpha): model.train() loss_per_batch = [] acc_train_per_batch = [] correct = 0 for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() inputs, targets_a, targets_b, lam = mixup_data(data, target, alpha, device) output = model(inputs) output = F.log_softmax(output, dim=1) loss = mixup_criterion(output, targets_a, targets_b, lam) loss.backward() optimizer.step() loss_per_batch.append(loss.item()) pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() acc_train_per_batch.append(100. * correct / ((batch_idx+1)*args.batch_size)) if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, Accuracy: {:.0f}%, Learning rate: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item(), 100. * correct / ((batch_idx + 1) * args.batch_size), optimizer.param_groups[0]['lr'])) loss_per_epoch = [np.average(loss_per_batch)] acc_train_per_epoch = [np.average(acc_train_per_batch)] return (loss_per_epoch, acc_train_per_epoch) def reg_loss_class(mean_tab,num_classes=10): loss = 0 for items in mean_tab: loss += (1./num_classes)*torch.log((1./num_classes)/items) return loss
MIT License
benmezger/dotfiles
dot_weechat/python/anotify.py
notify_dcc_chat_request
python
def notify_dcc_chat_request(match): if weechat.config_get_plugin("show_dcc") == "on": nick = match.group(1) a_notify( 'DCC', 'Direct Chat Request', '{0} wants to chat directly.'.format(nick))
Notify on DCC chat request.
https://github.com/benmezger/dotfiles/blob/42a329fb4ffb842716cb53a04c6c5c6ac4fec419/dot_weechat/python/anotify.py#L297-L304
SCRIPT_NAME = 'anotify' SCRIPT_AUTHOR = 'magnific0' SCRIPT_VERSION = '1.0.2' SCRIPT_LICENSE = 'MIT' SCRIPT_DESC = 'Sends libnotify notifications upon events.' SETTINGS = { 'show_public_message': 'off', 'show_private_message': 'on', 'show_public_action_message': 'off', 'show_private_action_message': 'on', 'show_notice_message': 'off', 'show_invite_message': 'on', 'show_highlighted_message': 'on', 'show_server': 'on', 'show_channel_topic': 'on', 'show_dcc': 'on', 'show_upgrade_ended': 'on', 'sticky': 'off', 'sticky_away': 'on', 'icon': '/usr/share/pixmaps/weechat.xpm', } try: import re import os import weechat import notify2 IMPORT_OK = True except ImportError as error: IMPORT_OK = False if str(error).find('weechat') != -1: print('This script must be run under WeeChat.') print('Get WeeChat at http://www.weechat.org.') else: weechat.prnt('', 'anotify: {0}'.format(error)) TAGGED_MESSAGES = { 'public message or action': set(['irc_privmsg', 'notify_message']), 'private message or action': set(['irc_privmsg', 'notify_private']), 'notice message': set(['irc_notice', 'notify_private']), 'invite message': set(['irc_invite', 'notify_highlight']), 'channel topic': set(['irc_topic', ]), } UNTAGGED_MESSAGES = { 'away status': re.compile(r'^You ((\w+).){2,3}marked as being away', re.UNICODE), 'dcc chat request': re.compile(r'^xfer: incoming chat request from (\w+)', re.UNICODE), 'dcc chat closed': re.compile(r'^xfer: chat closed with (\w+)', re.UNICODE), 'dcc get request': re.compile( r'^xfer: incoming file from (\w+) [^:]+: ((?:,\w|[^,])+),', re.UNICODE), 'dcc get completed': re.compile(r'^xfer: file ([^\s]+) received from \w+: OK', re.UNICODE), 'dcc get failed': re.compile( r'^xfer: file ([^\s]+) received from \w+: FAILED', re.UNICODE), 'dcc send completed': re.compile(r'^xfer: file ([^\s]+) sent to \w+: OK', re.UNICODE), 'dcc send failed': re.compile(r'^xfer: file ([^\s]+) sent to \w+: FAILED', re.UNICODE), } DISPATCH_TABLE = { 'away status': 'set_away_status', 'public message or action': 'notify_public_message_or_action', 'private message or action': 'notify_private_message_or_action', 'notice message': 'notify_notice_message', 'invite message': 'notify_invite_message', 'channel topic': 'notify_channel_topic', 'dcc chat request': 'notify_dcc_chat_request', 'dcc chat closed': 'notify_dcc_chat_closed', 'dcc get request': 'notify_dcc_get_request', 'dcc get completed': 'notify_dcc_get_completed', 'dcc get failed': 'notify_dcc_get_failed', 'dcc send completed': 'notify_dcc_send_completed', 'dcc send failed': 'notify_dcc_send_failed', } STATE = { 'icon': None, 'is_away': False } def cb_irc_server_connected(data, signal, signal_data): if weechat.config_get_plugin('show_server') == 'on': a_notify( 'Server', 'Server Connected', 'Connected to network {0}.'.format(signal_data)) return weechat.WEECHAT_RC_OK def cb_irc_server_disconnected(data, signal, signal_data): if weechat.config_get_plugin('show_server') == 'on': a_notify( 'Server', 'Server Disconnected', 'Disconnected from network {0}.'.format(signal_data)) return weechat.WEECHAT_RC_OK def cb_notify_upgrade_ended(data, signal, signal_data): if weechat.config_get_plugin('show_upgrade_ended') == 'on': a_notify( 'WeeChat', 'WeeChat Upgraded', 'WeeChat has been upgraded.') return weechat.WEECHAT_RC_OK def notify_highlighted_message(prefix, message): if weechat.config_get_plugin("show_highlighted_message") == "on": a_notify( 'Highlight', 'Highlighted Message', "{0}: {1}".format(prefix, message), priority=notify2.URGENCY_CRITICAL) def notify_public_message_or_action(prefix, message, highlighted): if prefix == ' *': regex = re.compile(r'^(\w+) (.+)$', re.UNICODE) match = regex.match(message) if match: prefix = match.group(1) message = match.group(2) notify_public_action_message(prefix, message, highlighted) else: if highlighted: notify_highlighted_message(prefix, message) elif weechat.config_get_plugin("show_public_message") == "on": a_notify( 'Public', 'Public Message', '{0}: {1}'.format(prefix, message)) def notify_private_message_or_action(prefix, message, highlighted): regex = re.compile(r'^CTCP_MESSAGE.+?ACTION (.+)$', re.UNICODE) match = regex.match(message) if match: notify_private_action_message(prefix, match.group(1), highlighted) else: if prefix == ' *': regex = re.compile(r'^(\w+) (.+)$', re.UNICODE) match = regex.match(message) if match: prefix = match.group(1) message = match.group(2) notify_private_action_message(prefix, message, highlighted) else: if highlighted: notify_highlighted_message(prefix, message) elif weechat.config_get_plugin("show_private_message") == "on": a_notify( 'Private', 'Private Message', '{0}: {1}'.format(prefix, message)) def notify_public_action_message(prefix, message, highlighted): if highlighted: notify_highlighted_message(prefix, message) elif weechat.config_get_plugin("show_public_action_message") == "on": a_notify( 'Action', 'Public Action Message', '{0}: {1}'.format(prefix, message), priority=notify2.URGENCY_NORMAL) def notify_private_action_message(prefix, message, highlighted): if highlighted: notify_highlighted_message(prefix, message) elif weechat.config_get_plugin("show_private_action_message") == "on": a_notify( 'Action', 'Private Action Message', '{0}: {1}'.format(prefix, message), priority=notify2.URGENCY_NORMAL) def notify_notice_message(prefix, message, highlighted): regex = re.compile(r'^([^\s]*) [^:]*: (.+)$', re.UNICODE) match = regex.match(message) if match: prefix = match.group(1) message = match.group(2) if highlighted: notify_highlighted_message(prefix, message) elif weechat.config_get_plugin("show_notice_message") == "on": a_notify( 'Notice', 'Notice Message', '{0}: {1}'.format(prefix, message)) def notify_invite_message(prefix, message, highlighted): if weechat.config_get_plugin("show_invite_message") == "on": regex = re.compile( r'^You have been invited to ([^\s]+) by ([^\s]+)$', re.UNICODE) match = regex.match(message) if match: channel = match.group(1) nick = match.group(2) a_notify( 'Invite', 'Channel Invitation', '{0} has invited you to join {1}.'.format(nick, channel)) def notify_channel_topic(prefix, message, highlighted): if weechat.config_get_plugin("show_channel_topic") == "on": regex = re.compile( r'^\w+ has (?:changed|unset) topic for ([^\s]+)' + '(?:(?: from "(?:(?:"\w|[^"])+)")? to "((?:"\w|[^"])+)")?', re.UNICODE) match = regex.match(message) if match: channel = match.group(1) topic = match.group(2) or '' a_notify( 'Channel', 'Channel Topic', "{0}: {1}".format(channel, topic))
MIT License
spoclab-ca/covfefe
utils/lexicosyntactic/feature.py
FeatureSet.__init__
python
def __init__(self, features=[]): self.features = features
Parameters: features : optional, list. A list of Feature objects.
https://github.com/spoclab-ca/covfefe/blob/905e8e5eb0905791de869db18c6f755838657203/utils/lexicosyntactic/feature.py#L39-L43
import collections import csv import glob import os import re import nltk.probability import nltk.tree from nltk.corpus import wordnet_ic as wnic from nltk.corpus import cmudict from utils.lexicosyntactic import lexical_features from utils.lexicosyntactic import pragmatic_features from utils.lexicosyntactic import semantic_features from utils.lexicosyntactic import syntactic_features from utils.lexicosyntactic import functions from utils.lexicosyntactic import transcript from utils.lexicosyntactic import yngve from utils import file_utils import config class Feature(object): def __init__(self, feature_type, name, value): self.feature_type = feature_type self.name = name self.value = value class FeatureSet(object):
Apache License 2.0
googleapis/gapic-generator-python
tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_config_service_v2_get_cmek_settings_sync.py
sample_get_cmek_settings
python
def sample_get_cmek_settings(): client = logging_v2.ConfigServiceV2Client() request = logging_v2.GetCmekSettingsRequest( name="projects/{project}/cmekSettings", ) response = client.get_cmek_settings(request=request) print(response)
Snippet for get_cmek_settings
https://github.com/googleapis/gapic-generator-python/blob/582fed9c43bd8c1a3c5a9a7705fa2e39b729b910/tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_config_service_v2_get_cmek_settings_sync.py#L30-L45
from google.cloud import logging_v2
Apache License 2.0
thecesrom/ignition
src/system/db.py
refresh
python
def refresh(component, propertyName): print(component, propertyName) return True
This function will programmatically cause a SQL Query or DB Browse property binding to execute immediately. This is most often used for bindings that are set to Polling - Off. In this way, you cause a binding to execute on demand, when you know that the results of its query will return a new result. To use it, you simply specify the component and name of the property on whose binding you'd like to refresh. Args: component (JComponent): The component whose property you want to refresh. propertyName (str): The name of the property that has a SQL Query binding that needs to be refreshed Returns: bool: True (1) if the property was found and refreshed successfully.
https://github.com/thecesrom/ignition/blob/c784e573530a217f4c430bd110889ce569152747/src/system/db.py#L376-L397
from __future__ import print_function __all__ = [ "addDatasource", "beginNamedQueryTransaction", "beginTransaction", "clearAllNamedQueryCaches", "clearNamedQueryCache", "closeTransaction", "commitTransaction", "createSProcCall", "dateFormat", "execSProcCall", "getConnectionInfo", "getConnections", "refresh", "removeDatasource", "rollbackTransaction", "runNamedQuery", "runPrepQuery", "runPrepUpdate", "runQuery", "runSFNamedQuery", "runSFPrepUpdate", "runSFUpdateQuery", "runScalarPrepQuery", "runScalarQuery", "runUpdateQuery", "setDatasourceConnectURL", "setDatasourceEnabled", "setDatasourceMaxConnections", ] from com.inductiveautomation.ignition.common import BasicDataset from com.inductiveautomation.ignition.common.script.builtin import ( DatasetUtilities, SProcCall, ) from javax.swing import JComponent BIT = -7 REAL = 7 LONGVARCHAR = -1 LONGVARBINARY = -4 TINYINT = -6 DOUBLE = 8 DATE = 91 NULL = 0 SMALLINT = 5 NUMERIC = 2 TIME = 92 ROWID = -8 INTEGER = 4 DECIMAL = 3 TIMESTAMP = 93 CLOB = 2005 BIGINT = -5 CHAR = 1 BINARY = -2 NCLOB = 2011 FLOAT = 6 VARCHAR = 12 VARBINARY = -3 BLOB = 2004 NCHAR = -15 NVARCHAR = -9 LONGNVARCHAR = -16 BOOLEAN = 16 ORACLE_CURSOR = -10 DISTINCT = 2001 STRUCT = 2002 REF = 2006 JAVA_OBJECT = 2000 SQLXML = 2009 ARRAY = 2003 DATALINK = 70 OTHER = 1111 READ_COMMITTED = 2 READ_UNCOMMITTED = 1 REPEATABLE_READ = 4 SERIALIZABLE = 8 def addDatasource( jdbcDriver, name, description=None, connectUrl=None, username=None, password=None, props=None, validationQuery=None, maxConnections=8, ): print( jdbcDriver, name, description, connectUrl, username, password, props, validationQuery, maxConnections, ) def beginNamedQueryTransaction(*args): print(args) return "transaction_id" def beginTransaction(database=None, isolationLevel=None, timeout=None): print(database, isolationLevel, timeout) return "transaction_id" def clearAllNamedQueryCaches(*args): print(args) def clearNamedQueryCache(*args): print(args) def closeTransaction(tx): print(tx) def commitTransaction(tx): print(tx) def createSProcCall(procedureName, database=None, tx=None, skipAudit=None): print(procedureName, database, tx, skipAudit) return SProcCall() def dateFormat(date, formatPattern): print(date, formatPattern) return "" def execSProcCall(callContext): print(callContext) def getConnectionInfo(name=""): print(name) return BasicDataset() def getConnections(): return BasicDataset()
MIT License
unbit/pysftpserver
examples/mongodb_gridfs/mongostorage.py
SFTPServerMongoStorage.close
python
def close(self, handle): handle.close()
Close the file handle.
https://github.com/unbit/pysftpserver/blob/ed2762cc9d39eb40e5e288e8fc9da7b2ac31ed59/examples/mongodb_gridfs/mongostorage.py#L35-L37
from pysftpserver.abstractstorage import SFTPAbstractServerStorage from pysftpserver.pysftpexceptions import SFTPNotFound import pymongo import gridfs class SFTPServerMongoStorage(SFTPAbstractServerStorage): def __init__(self, home, remote, port, db_name): self.home = "/" client = pymongo.MongoClient(remote, port) db = client[db_name] self.gridfs = gridfs.GridFS(db) def open(self, filename, flags, mode): filename = filename.decode() if self.gridfs.exists(filename=filename): return self.gridfs.find({'filename': filename})[0] raise SFTPNotFound def read(self, handle, off, size): return handle.read(size)
MIT License
dbis-ilm/grizzly
grizzly/dataframes/frame.py
DataFrame.to_numpy
python
def to_numpy(self): raise NotImplementedError("This method has not been implemented yet")
Return a Numpy representation of the DataFrame.
https://github.com/dbis-ilm/grizzly/blob/113e00b3cb980986f1f33e54fd7d84bf30763506/grizzly/dataframes/frame.py#L484-L488
from grizzly.aggregates import AggregateType import queue from typing import List, Tuple, Callable from grizzly.expression import ArithmExpr, ArithmeticOperation, BinaryExpression, BoolExpr, Constant, Expr, ColRef, FuncCall, ComputedCol, ExpressionException, ExprTraverser, LogicExpr, BooleanOperation, SetExpr, SetOperation from grizzly.generator import GrizzlyGenerator from grizzly.expression import ModelUDF,UDF, Param, ModelType import inspect from collections import namedtuple import logging logger = logging.getLogger(__name__) class DataFrame(object): def __init__(self, columns, parents, alias: str = "", index=None): super(DataFrame, self).__init__() self.index = index if not columns: self.columns = [] elif not isinstance(columns, list): self.columns = [columns] else: self.columns = columns self.computedCols = [] if parents is None or type(parents) is list: self.parents = parents else: self.parents = [parents] self.alias = alias def updateRef(self, x): if isinstance(x,ColRef): x.df = self return x elif isinstance(x, FuncCall): for ic in x.inputCols: self.updateRef(ic) return x elif isinstance(x, str): ref = ColRef(x, self) return ref elif isinstance(x, BinaryExpression): if x.left: x.left = self.updateRef(x.left) if isinstance(x.left, Expr) else x.left if x.right: x.right = self.updateRef(x.right) if isinstance(x.right, Expr) else x.right return x else: return x def hasColumn(self, colName): if not self.columns: return True for ref in self.columns: if ref.column == colName: return True return False def filter(self, expr): return Filter(expr, self) def project(self, cols, distinct = False): return Projection(cols, self, doDistinct=distinct) def distinct(self): if isinstance(self, Projection): self.doDistinct = True return self else: return Projection(None, self, doDistinct = True) def join(self, other, on, how="inner", comp = "="): if isinstance(on, list): lOn = None rOn = None from grizzly.expression import ExpressionException if not self.hasColumn(on[0]): raise ExpressionException(f"No such column {on[0]} for join in left hand side") else: lOn = ColRef(on[0], self) if not other.hasColumn(on[1]): raise ExpressionException(f"No such column {on[1]} for join in right hand side") else: rOn = ColRef(on[1], other) on = [lOn, rOn] return Join(self, other, on, how, comp) def groupby(self, groupCols): if not isinstance(groupCols, list): groupCols = [groupCols] return Grouping(groupCols, self) def limit(self, n: int, offset = None): if n < 0: raise ValueError(f"LIMIT must not be negative (got {n})") if isinstance(offset, int): offset = Constant(offset) return Limit(Constant(n), offset, self) def sort_values(self, by, ascending:bool=True): if not isinstance(by, list): by = [by] return Ordering(by,ascending, self) def _map(self, func, lines=[]): if inspect.isfunction(func): if not isinstance(self, Projection): ValueError("functions can only be applied to projections currently") funcName = func.__name__ sig = inspect.signature(func) fparams = sig.parameters params = [] for fp in fparams: fptype = sig.parameters[fp].annotation.__name__ p = Param(fp,fptype) params.append(p) if lines == []: (lines,_) = inspect.getsourcelines(func) returns = sig.return_annotation.__name__ udf = UDF(funcName, params, lines, returns) call = FuncCall(funcName, self.columns, udf) return call elif isinstance(func, DataFrame): return self.join(func, on = None, how = "natural") else: print(f"error: {func} is not a function or other DataFrame") exit(1) def apply_torch_model(self, path: str, toTensorFunc, clazz, outputDict, clazzParameters: List, n_predictions: int = 1, *helperFuncs): if not isinstance(self, Projection): ValueError("classification can only be applied to a projection") if len(outputDict) <= 0: raise ValueError("output dict must not be empty") sqlGenerator = GrizzlyGenerator._backend.queryGenerator modelPathHash = abs(hash(path)) funcName = f"grizzly_predict_{modelPathHash}" attrsString = "_".join([r.column for r in self.columns]) sig = inspect.signature(toTensorFunc) fparams = sig.parameters if len(fparams) != 1: raise ValueError("toTensor converter must have exactly one parameter") toTensorInputType = sig.parameters[list(sig.parameters)[0]].annotation.__name__ params = [Param("invalue", toTensorInputType), Param("n_predictions", "int")] paramsStr = ",".join([f"{p.name} {sqlGenerator._mapTypes(p.type)}" for p in params]) predictedType = "str" helpers = list(helperFuncs) helperCode = "\n" for helperFunc in helpers: (funcLines, _) = inspect.getsourcelines(helperFunc) funcLines = sqlGenerator._unindent(funcLines) helperCode += "".join(funcLines) (encoderCode, _) = inspect.getsourcelines(toTensorFunc) encoderCode = sqlGenerator._unindent(encoderCode) encoderCode = "".join(encoderCode) converter = lambda x: f"\"{x}\"" if type(x) == str else f"{x}" outDictCode = "[" + ",".join(map(converter, outputDict)) + "]" modelParameters = ",".join(map(converter, clazzParameters)) if clazzParameters else "" (clazzCodeLst, _) = inspect.getsourcelines(clazz) clazzCode = "".join(clazzCodeLst) template_replacement_dict = {} template_replacement_dict["$$modelpathhash$$"] = modelPathHash template_replacement_dict["$$modelpath$$"] = path template_replacement_dict["$$encoderfuncname$$"] = toTensorFunc.__name__ template_replacement_dict["$$helpers$$"] = helperCode template_replacement_dict["$$encoder$$"] = encoderCode template_replacement_dict["$$inputcols$$"] = paramsStr template_replacement_dict["$$outputdict$$"] = outDictCode template_replacement_dict["$$modelclassparameters$$"] = modelParameters template_replacement_dict["$$modelclassname$$"] = clazz.__name__ template_replacement_dict["$$modelclassdef$$"] = clazzCode udf = ModelUDF(funcName, params, predictedType, ModelType.TORCH, template_replacement_dict) call = FuncCall(funcName, self.columns + [n_predictions] , udf, f"predicted_{attrsString}") return call def apply_onnx_model(self, onnx_path, input_to_tensor, tensor_to_output): funcName = "apply" attrsString = "_".join([r.column for r in self.columns]) in_sig = inspect.signature(input_to_tensor) input_names = list(in_sig.parameters.keys()) input_names_str = ','.join(input_names) (lines1, _) = inspect.getsourcelines(input_to_tensor) params = [] for param in in_sig.parameters: type = in_sig.parameters[param].annotation.__name__ if (type == "_empty"): raise ValueError("Input converter function must specify parameter types") params.append(Param(param, type)) out_sig = inspect.signature(tensor_to_output) (lines2, _) = inspect.getsourcelines(tensor_to_output) returntype = out_sig.return_annotation.__name__ if (returntype == "_empty"): raise ValueError("Output converter function must specify the return type") template_replacement_dict = {} template_replacement_dict["$$inputs$$"] = str(in_sig) template_replacement_dict["$$returntype$$"] = returntype template_replacement_dict["$$input_to_tensor_func$$"] = "".join(lines1) template_replacement_dict["$$tensor_to_output_func$$"] = "".join(lines2) template_replacement_dict["$$input_names$$"] = input_names_str template_replacement_dict["$$onnx_file_path$$"] = onnx_path template_replacement_dict["$$input_to_tensor_func_name$$"] = input_to_tensor.__name__ template_replacement_dict["$$tensor_to_output_func_name$$"] = tensor_to_output.__name__ udf = ModelUDF(funcName, params, returntype, ModelType.ONNX, template_replacement_dict) call = FuncCall(funcName, self.columns, udf, f"predicted_{attrsString}") return call def apply_tensorflow_model(self, tf_checkpoint_file: str, network_input_names, constants=[], vocab_file: str = ""): funcName = "apply" attrsString = "_".join([r.column for r in self.columns]) params = [Param("a", "str")] returntype = "int" template_replacement_dict = {} template_replacement_dict["$$tf_checkpoint_file$$"] = tf_checkpoint_file template_replacement_dict["$$vocab_file$$"] = vocab_file template_replacement_dict["$$network_input_names$$"] = f"""[{', '.join('"%s"' % n for n in network_input_names)}]""" template_replacement_dict["$$constants$$"] = f"[{','.join(str(item) for item in constants)}]" udf = ModelUDF(funcName, params, returntype, ModelType.TF, template_replacement_dict) call = FuncCall(funcName, self.columns, udf, f"predicted_{attrsString}") return call def map(self, func): return self._map(func) def __iter__(self): return GrizzlyGenerator.iterator(self) def iterrows(self): num = 0 for row in self: yield (num, list(row)) num += 1 def itertuples(self, name="Grizzly",index=None): theIter = GrizzlyGenerator.iterator(self, includeHeader=True) headerRow = next(theIter) RowType = namedtuple(name, headerRow) for row in theIter: yield RowType._make(row) def items(self): arr = self.collect(includeHeader=True) header = arr[0] data = arr[1:] col = 0 for colname in header: columndata = [row[col] for row in data] yield (colname, columndata) col += 1 def __getattr__(self, name): return ColRef(name, self) def __setitem__(self, key, value): if isinstance(value, Grouping): f = value.aggFunc[-1] f.alias = key elif isinstance(value, Expr) or isinstance(value, DataFrame): if isinstance(value, FuncCall): value.alias = key newCol = value self.updateRef(value) else: newCol = ComputedCol(value, key) self.computedCols.append(newCol) else: newCol = ComputedCol(Constant(value), key) self.computedCols.append(newCol) def __getitem__(self, key): theType = type(key) if isinstance(key, slice): if key.step is not None: logger.warn("Step is not supported for slice access on DataFrames") n = key.stop offset = key.start if key.start is not None else None return self.limit(n, offset) elif theType is ColRef : return self.project(key) elif isinstance(key, BoolExpr) or isinstance(key, LogicExpr): return self.filter(key) elif theType is str: return ColRef(key,self) elif theType is list: projList = [] for e in key: t = type(e) if t is str: projList.append(ColRef(e, self)) elif t is ColRef: c = ColRef(e.colName(), self) projList.append(c) else: raise ExpressionException(f"expected a column name string or column reference, but got {e}") return self.project(projList) else: print(f"{key} has type {theType} -- ignoring") return self def info(self, verbose = None, buf=None, max_cols=None, memory_usage=None, show_counts=None, null_counts=None): raise NotImplementedError("This method has not been implemented yet") def select_types(include=None, exclude=None): raise NotImplementedError("This method has not been implemented yet") def values(self): raise NotImplementedError("This method has not been implemented yet")
MIT License
geier/pycarddav
pycarddav/carddav.py
PyCardDAV.upload_new_card
python
def upload_new_card(self, card): self._check_write_support() card = card.encode('utf-8') for _ in range(0, 5): rand_string = get_random_href() remotepath = str(self.url.resource + rand_string + ".vcf") headers = self.headers headers['content-type'] = 'text/vcard' headers['If-None-Match'] = '*' response = requests.put(remotepath, data=card, headers=headers, **self._settings) if response.ok: parsed_url = urlparse.urlparse(remotepath) if 'etag' not in response.headers.keys() or response.headers['etag'] is None: etag = '' else: etag = response.headers['etag'] return (parsed_url.path, etag) response.raise_for_status()
upload new card to the server :param card: vcard to be uploaded :type card: unicode :rtype: tuple of string (path of the vcard on the server) and etag of new card (string or None)
https://github.com/geier/pycarddav/blob/edc9150f4b21cf027f42e77c42a6030d091e0624/pycarddav/carddav.py#L196-L224
from collections import namedtuple import requests import urlparse import logging import lxml.etree as ET def get_random_href(): import random tmp_list = list() for _ in xrange(3): rand_number = random.randint(0, 0x100000000) tmp_list.append("{0:x}".format(rand_number)) return "-".join(tmp_list).upper() class UploadFailed(Exception): pass class NoWriteSupport(Exception): pass class PyCardDAV(object): def __init__(self, resource, debug='', user='', passwd='', verify=True, write_support=False, auth='basic'): urllog = logging.getLogger('requests.packages.urllib3.connectionpool') urllog.setLevel(logging.CRITICAL) urllog = logging.getLogger('urllib3.connectionpool') urllog.setLevel(logging.CRITICAL) try: import urllib3.contrib.pyopenssl except ImportError: pass else: urllib3.contrib.pyopenssl.inject_into_urllib3() split_url = urlparse.urlparse(resource) url_tuple = namedtuple('url', 'resource base path') self.url = url_tuple(resource, split_url.scheme + '://' + split_url.netloc, split_url.path) self.debug = debug self.session = requests.session() self.write_support = write_support self._settings = {'verify': verify} if auth == 'basic': self._settings['auth'] = (user, passwd,) if auth == 'digest': from requests.auth import HTTPDigestAuth self._settings['auth'] = HTTPDigestAuth(user, passwd) self._default_headers = {"User-Agent": "pyCardDAV"} headers = self.headers headers['Depth'] = '1' response = self.session.request('OPTIONS', self.url.resource, headers=headers, **self._settings) response.raise_for_status() if 'addressbook' not in response.headers.get('DAV', ''): raise Exception("URL is not a CardDAV resource") @property def verify(self): return self._settings['verify'] @verify.setter def verify(self, verify): self._settings['verify'] = verify @property def headers(self): return dict(self._default_headers) def _check_write_support(self): if not self.write_support: raise NoWriteSupport def get_abook(self): xml = self._get_xml_props() abook = self._process_xml_props(xml) return abook def get_vcard(self, href): response = self.session.get(self.url.base + href, headers=self.headers, **self._settings) response.raise_for_status() return response.content def update_vcard(self, card, href, etag): self._check_write_support() remotepath = str(self.url.base + href) headers = self.headers headers['content-type'] = 'text/vcard' if etag is not None: headers['If-Match'] = etag self.session.put(remotepath, data=card, headers=headers, **self._settings) def delete_vcard(self, href, etag): self._check_write_support() remotepath = str(self.url.base + href) headers = self.headers headers['content-type'] = 'text/vcard' if etag is not None: headers['If-Match'] = etag response = self.session.delete(remotepath, headers=headers, **self._settings) response.raise_for_status()
MIT License
gandersen101/spaczz
tests/test_matcher/test_regexmatcher.py
add_gpe_ent
python
def add_gpe_ent( matcher: RegexMatcher, doc: Doc, i: int, matches: List[Tuple[str, int, int, Tuple[int, int, int]]], ) -> None: _match_id, start, end, _fuzzy_counts = matches[i] entity = Span(doc, start, end, label="GPE") doc.ents += (entity,)
Callback on match function for later testing. Adds "GPE" entities to doc.
https://github.com/gandersen101/spaczz/blob/0139082a33353c7ba5cbfa689fa0da7692347ecb/tests/test_matcher/test_regexmatcher.py#L14-L23
import pickle from typing import List, Tuple import warnings import pytest from spacy.language import Language from spacy.tokens import Doc, Span from spaczz.exceptions import KwargsWarning from spaczz.matcher.regexmatcher import RegexMatcher
MIT License
rootpy/rootpy
rootpy/stl.py
make_string
python
def make_string(obj): if inspect.isclass(obj): if issubclass(obj, Object): return obj._ROOT.__name__ if issubclass(obj, string_types): return 'string' return obj.__name__ if not isinstance(obj, string_types): raise TypeError("expected string or class") return obj
If ``obj`` is a string, return that, otherwise attempt to figure out the name of a type.
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/stl.py#L302-L315
from __future__ import absolute_import import sys import inspect import hashlib import os import re from os.path import join as pjoin, exists import ROOT from .extern.pyparsing import ParseException from .extern.six import string_types from .base import Object from .defaults import extra_initialization from .utils.cpp import CPPGrammar from .utils.path import mkdir_p from .utils.lock import lock from . import compiled from . import userdata from . import lookup_by_name, register, QROOT from . import log; log = log[__name__] __all__ = [] STL = QROOT.std.stlclasses HAS_ITERATORS = [ 'map', 'vector', 'list' ] KNOWN_TYPES = { "pair": "<utility>", "string": "<string>", } compiled.register_code(""" #include <string> // PyROOT builtin namespace PyROOT { namespace Utility { const std::string ResolveTypedef( const std::string& name ); } } // cint magic int G__defined_tagname(const char*, int); // Returns true if the given type does not require a dictionary bool _rootpy_dictionary_already_exists(const char* type) { const std::string full_typedef = PyROOT::Utility::ResolveTypedef(type); return G__defined_tagname(full_typedef.c_str(), 4) != -1; } """, ["_rootpy_dictionary_already_exists"]) LINKDEF = '''\ %(includes)s #ifdef __CINT__ #pragma link off all globals; #pragma link off all classes; #pragma link off all functions; #pragma link C++ nestedclasses; #pragma link C++ nestedtypedefs; #pragma link C++ class %(declaration)s; #pragma link C++ class %(declaration)s::*; #ifdef HAS_ITERATOR #pragma link C++ operators %(declaration)s::iterator; #pragma link C++ operators %(declaration)s::const_iterator; #pragma link C++ operators %(declaration)s::reverse_iterator; #pragma link C++ operators %(declaration)s::const_reverse_iterator; #endif #endif ''' NEW_DICTS = False LOOKUP_TABLE_NAME = 'lookup' LOADED_DICTS = {} DICTS_PATH = os.path.join(userdata.BINARY_PATH, 'dicts') if not os.path.exists(DICTS_PATH): mkdir_p(DICTS_PATH) include_list = os.path.join(userdata.BINARY_PATH, 'include_paths.list') log.debug('Using {0} to get additional include paths'.format(include_list)) if os.path.exists(include_list): with open(include_list) as inc_list: for line in inc_list: line = line.strip() log.debug('adding {0} to the include paths'.format(line)) ROOT.gInterpreter.AddIncludePath(line) @extra_initialization def initialize(): global DICTS_PATH path = ":".join([DICTS_PATH, ROOT.gSystem.GetDynamicPath()]) ROOT.gSystem.SetDynamicPath(path) ROOT.gSystem.AddLinkedLibs("-Wl,-rpath,{0}".format(DICTS_PATH)) class CPPType(CPPGrammar): def __init__(self, parse_result): self.parse_result = parse_result self.prefix = parse_result.type_prefix self.name = ' '.join(parse_result.type_name) self.params = parse_result.template_params self.member = parse_result.template_member self.suffix = parse_result.type_suffix def __repr__(self): return self.parse_result.dump() @classmethod def make(cls, string, location, tokens): return cls(tokens) @property def is_template(self): return bool(self.params) def ensure_built(self, headers=None): if not self.params: return else: for child in self.params: child.ensure_built(headers=headers) if headers is None: headers = self.guess_headers generate(str(self), headers, has_iterators=self.name in HAS_ITERATORS) @property def guess_headers(self): name = self.name.replace("*", "") headers = [] if name in KNOWN_TYPES: headers.append(KNOWN_TYPES[name]) elif name in STL: headers.append('<{0}>'.format(name)) elif hasattr(ROOT, name) and name.startswith("T"): headers.append('<{0}.h>'.format(name)) elif '::' in name: headers.append('<{0}.h>'.format(name.replace('::', '/'))) elif name == 'allocator': headers.append('<memory>') else: try: CPPGrammar.BASIC_TYPE.parseString(name, parseAll=True) except ParseException as e: log.warning( "unable to guess headers required for {0}".format(name)) if self.params: for child in self.params: headers.extend(child.guess_headers) return list(set(headers)) @property def cls(self): return SmartTemplate(self.name)(", ".join(map(str, self.params))) @classmethod def try_parse(cls, string): try: with log.ignore("^Failed to parse.*$"): return cls.from_string(string) except ParseException: return None @classmethod def from_string(cls, string): cls.TYPE.setParseAction(cls.make) try: return cls.TYPE.parseString(string, parseAll=True)[0] except ParseException: log.error("Failed to parse '{0}'".format(string)) raise def __str__(self): prefix = ' '.join(self.prefix) if prefix: prefix += ' ' name = self.name args = [str(p) for p in self.params] if self.params else [] templatize = '<{0} >' if args and args[-1].endswith('>') else '<{0}>' args = '' if not self.params else templatize.format(', '.join(args)) member = ('::' + self.member[0]) if self.member else '' suffix = ' '.join(self.suffix) return "{0}{1}{2}{3}{4}".format(prefix, name, args, member, suffix)
BSD 3-Clause New or Revised License
narimiran/tably
tably.py
Tably.create_row
python
def create_row(self, line, indent): return r'{indent}{indent}{content} \\'.format( indent=indent, content=' & '.join(self.tex_str(line)))
Creates a row based on `line` content
https://github.com/narimiran/tably/blob/57e4b21b709e558a693de5ae6cdf8b4ba8a0bc38/tably.py#L169-L173
import argparse import csv import os PREAMBLE = r"""\documentclass[11pt, a4paper]{article} \usepackage{booktabs} \begin{document}""" HEADER = r"""\begin{{table}}[htb] {indent}\centering{caption}{label} {indent}\begin{{tabular}}{{@{{}}{align}@{{}}}} {indent}{indent}\toprule""" FOOTER = r"""{indent}{indent}\bottomrule {indent}\end{{tabular}} \end{{table}}""" LABEL = '\n{indent}\\label{{{label}}}' CAPTION = '\n{indent}\\caption{{{caption}}}' class Tably: def __init__(self, args): self.files = args.files self.no_header = args.no_header self.caption = args.caption self.label = args.label self.align = args.align self.no_indent = args.no_indent self.outfile = args.outfile self.separate_outfiles = args.separate_outfiles self.skip = args.skip self.preamble = args.preamble self.sep = get_sep(args.sep) self.units = args.units self.fragment = args.fragment self.fragment_skip_header = args.fragment_skip_header self.replace = args.replace self.tex_str = escape if not args.no_escape else lambda x: x def run(self): if self.fragment_skip_header: self.skip = 1 self.no_header = True self.fragment = True if self.fragment: self.no_indent = True self.label = None self.preamble = False if self.outfile or self.separate_outfiles is None: final_content = self.combine_tables() if not final_content: return if self.outfile: try: save_content(final_content, self.outfile, self.replace) except FileNotFoundError: print('{} is not a valid/known path. Could not save there.'.format(self.outfile)) else: print(final_content) if self.separate_outfiles is not None: outs = self.separate_outfiles if len(outs) == 0: outs = [ os.path.splitext(file)[0]+'.tex' for file in self.files ] elif os.path.isdir(outs[0]): outs = [ os.path.join(outs[0], os.path.splitext(os.path.basename(file))[0])+'.tex' for file in self.files ] elif len(outs) != len(self.files): print('WARNING: Number of .csv files and number of output files do not match!') for file, out in zip(self.files, outs): self.save_single_table(file, out) def create_table(self, file): rows = [] indent = 4*' ' if not self.no_indent else '' try: with open(file) as infile: for i, columns in enumerate(csv.reader(infile, delimiter=self.sep)): if i < self.skip: continue rows.append(self.create_row(columns, indent)) except FileNotFoundError: print("File {} doesn't exist!!\n".format(file)) return '' if not rows: print("No table created from the {} file. Check if the file is empty " "or you used too high skip value.\n".format(file)) return '' if not self.no_header: rows.insert(1, r'{0}{0}\midrule'.format(indent)) if self.units: rows[0] = rows[0] + r'\relax' units = self.get_units() rows.insert(1, r'{0}{0}{1} \\'.format(indent, units)) content = '\n'.join(rows) if not self.fragment: header = HEADER.format( label=add_label(self.label, indent), caption=add_caption(self.caption, indent), align=format_alignment(self.align, len(columns)), indent=indent, ) footer = FOOTER.format(indent=indent) return '\n'.join((header, content, footer)) else: return content
MIT License
colcon/colcon-core
colcon_core/command.py
verb_main
python
def verb_main(context, logger): try: rc = context.args.main(context=context) except RuntimeError as e: logger.error( '{context.command_name} {context.args.verb_name}: {e}' .format_map(locals())) return 1 except Exception as e: exc = traceback.format_exc() logger.error( '{context.command_name} {context.args.verb_name}: {e}\n{exc}' .format_map(locals())) return 1 return rc
Invoke the logic of the selected verb. If the invocation is interrupted the returned error code is `signal.SIGINT`. If the verb raises a `RuntimeException` an error message is logged which contains the message of the exception. For any other exception a traceback is included in the logged error message. :param context: The :class:`CommandContext` :param logger: The logger :returns: The return code
https://github.com/colcon/colcon-core/blob/7d930eaa58342a0c1579b3614a945ea484640f7d/colcon_core/command.py#L510-L542
import argparse import datetime import logging import os from pathlib import Path import shutil import signal import sys import traceback from colcon_core.environment_variable import EnvironmentVariable WARNINGS_ENVIRONMENT_VARIABLE = EnvironmentVariable( 'COLCON_WARNINGS', 'Set the warnings filter similar to PYTHONWARNINGS except that the module ' "entry is implicitly set to 'colcon.*'") warnings_filters = os.environ.get(WARNINGS_ENVIRONMENT_VARIABLE.name) if warnings_filters: import warnings for f in warnings_filters.split(','): fields = f.split(':', 4) if len(fields) < 5: fields += [''] * (5 - len(fields)) action, message, category, module, line = fields try: category = warnings._getcategory(category) except Exception: print( "The category field '{category}' must be a valid warnings " 'class name'.format_map(locals()), file=sys.stderr) sys.exit(1) if module: print( 'The module field of the {WARNINGS_ENVIRONMENT_VARIABLE.name} ' 'filter should be empty, otherwise use PYTHONWARNINGS instead' .format_map(locals()), file=sys.stderr) sys.exit(1) warnings.filterwarnings( action, message=message, category=category or Warning, module='colcon.*', lineno=line if line else 0) from colcon_core.argument_parser import decorate_argument_parser from colcon_core.argument_parser import SuppressUsageOutput from colcon_core.entry_point import load_entry_points from colcon_core.location import create_log_path from colcon_core.location import get_log_path from colcon_core.location import set_default_config_path from colcon_core.location import set_default_log_path from colcon_core.logging import add_file_handler from colcon_core.logging import colcon_logger from colcon_core.logging import get_numeric_log_level from colcon_core.logging import set_logger_level_from_env from colcon_core.plugin_system import get_first_line_doc from colcon_core.verb import get_verb_extensions LOG_LEVEL_ENVIRONMENT_VARIABLE = EnvironmentVariable( 'COLCON_LOG_LEVEL', 'Set the log level (debug|10, info|20, warn|30, error|40, critical|50, or ' 'any other positive numeric value)') HOME_ENVIRONMENT_VARIABLE = EnvironmentVariable( 'COLCON_HOME', 'Set the configuration directory (default: ~/.colcon)') _command_exit_handlers = [] def register_command_exit_handler(handler): global _command_exit_handlers if handler not in _command_exit_handlers: _command_exit_handlers.append(handler) def main(*, command_name='colcon', argv=None): global _command_exit_handlers try: return _main(command_name=command_name, argv=argv) except KeyboardInterrupt: return signal.SIGINT finally: while _command_exit_handlers: handler = _command_exit_handlers.pop() handler() def _main(*, command_name, argv): global colcon_logger colcon_logger.setLevel(logging.WARNING) set_logger_level_from_env( colcon_logger, '{command_name}_LOG_LEVEL'.format_map(locals()).upper()) colcon_logger.debug( 'Command line arguments: {argv}' .format(argv=argv if argv is not None else sys.argv)) set_default_config_path( path=( Path('~') / '.{command_name}'.format_map(locals())).expanduser(), env_var='{command_name}_HOME'.format_map(locals()).upper()) parser = create_parser('colcon_core.environment_variable') verb_extensions = get_verb_extensions() subparser = create_subparser( parser, command_name, verb_extensions, attribute='verb_name') verb_parsers = add_parsers_without_arguments( parser, subparser, verb_extensions, attribute='verb_name') with SuppressUsageOutput([parser] + list(verb_parsers.values())): known_args, _ = parser.parse_known_args(args=argv) if known_args.verb_name: add_parser_arguments(known_args.verb_parser, known_args.verb_extension) args = parser.parse_args(args=argv) context = CommandContext(command_name=command_name, args=args) if args.log_level: colcon_logger.setLevel(get_numeric_log_level(args.log_level)) colcon_logger.debug( 'Parsed command line arguments: {args}'.format_map(locals())) if args.verb_name is None: print(parser.format_usage()) return 'Error: No verb provided' now = datetime.datetime.now() now_str = str(now)[:-7].replace(' ', '_').replace(':', '-') set_default_log_path( base_path=args.log_base, env_var='{command_name}_LOG_PATH'.format_map(locals()).upper(), subdirectory='{args.verb_name}_{now_str}'.format_map(locals())) log_path = get_log_path() if log_path is not None: create_log_path(args.verb_name) handler = add_file_handler( colcon_logger, log_path / 'logger_all.log') log_record = colcon_logger.makeRecord( colcon_logger.name, logging.DEBUG, __file__, 0, 'Command line arguments: {argv}' .format(argv=argv if argv is not None else sys.argv), None, None) handler.handle(log_record) log_record = colcon_logger.makeRecord( colcon_logger.name, logging.DEBUG, __file__, 0, 'Parsed command line arguments: {args}'.format_map(locals()), None, None) handler.handle(log_record) if command_name.upper() not in os.environ: os.environ[command_name.upper()] = '1' return verb_main(context, colcon_logger) def create_parser(environment_variables_group_name): class CustomArgumentParser(argparse.ArgumentParser): def _parse_optional(self, arg_string): result = super()._parse_optional(arg_string) if result == (None, arg_string, None): return None return result parser = CustomArgumentParser( prog=get_prog_name(), formatter_class=CustomFormatter, epilog=( get_environment_variables_epilog( environment_variables_group_name ) + '\n\n' + READTHEDOCS_MESSAGE)) parser = decorate_argument_parser(parser) add_log_level_argument(parser) return parser def get_prog_name(): prog = sys.argv[0] basename = os.path.basename(prog) if basename == '__main__.py': prog = os.path.basename(os.path.dirname(prog)) elif shutil.which(basename) == prog: prog = basename return prog class CustomFormatter(argparse.RawDescriptionHelpFormatter): def _split_lines(self, text, width): lines = [] for line in text.splitlines(): if len(line) <= width: lines.append(line) else: lines += super()._split_lines(line, width) return lines def get_environment_variables_epilog(group_name): entry_points = load_entry_points(group_name) env_vars = { env_var.name: env_var.description for env_var in entry_points.values()} epilog_lines = [] for name in sorted(env_vars.keys()): epilog_lines += _format_pair(name, env_vars[name], indent=2, align=24) return 'Environment variables:\n' + '\n'.join(epilog_lines) READTHEDOCS_MESSAGE = 'For more help and usage tips, see ' 'https://colcon.readthedocs.io' def add_log_level_argument(parser): parser.add_argument( '--log-base', help='The base path for all log directories (default: ./log, to ' 'disable: {os.devnull})'.format_map(globals())) parser.add_argument( '--log-level', action=LogLevelAction, help='Set log level for the console output, either by numeric or ' 'string value (default: warning)') class LogLevelAction(argparse.Action): def __init__(self, option_strings, dest, *, nargs=None, **kwargs): if nargs is not None: raise ValueError('nargs not allowed') super().__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): try: value = get_numeric_log_level(values) except ValueError as e: parser.error( '{option_string} has unsupported value, {e}' .format_map(locals())) setattr(namespace, self.dest, value) def add_subparsers(parser, cmd_name, verb_extensions, *, attribute): subparser = create_subparser( parser, cmd_name, verb_extensions, attribute=attribute) verb_parsers = add_parsers_without_arguments( parser, subparser, verb_extensions, attribute=attribute) for name, verb_parser in verb_parsers.items(): add_parser_arguments( verb_parser, verb_extensions[name]) def create_subparser(parser, cmd_name, verb_extensions, *, attribute): global colcon_logger assert verb_extensions, 'No verb extensions' verbs = [] for name, extension in verb_extensions.items(): verbs += _format_pair( name, get_first_line_doc(extension), indent=0, align=22) subparser = parser.add_subparsers( title='{cmd_name} verbs'.format_map(locals()), description='\n'.join(verbs), dest=attribute, help='call `{cmd_name} VERB -h` for specific help' .format_map(locals()) ) return subparser def add_parsers_without_arguments( parser, subparser, verb_extensions, *, attribute ): verb_parsers = {} for name, extension in verb_extensions.items(): verb_parser = subparser.add_parser( getattr(extension, attribute.upper()), description=get_first_line_doc(extension) + '.', formatter_class=parser.formatter_class, ) verb_parser.set_defaults( verb_parser=verb_parser, verb_extension=extension, main=extension.main) verb_parsers[name] = verb_parser return verb_parsers def add_parser_arguments(verb_parser, extension): if hasattr(extension, 'add_arguments'): retval = extension.add_arguments(parser=verb_parser) if retval is not None: colcon_logger.error( "Exception in verb extension '{extension.VERB_NAME}': " 'add_arguments() should return None'.format_map(locals())) def _format_pair(key, value, *, indent, align): lines = [] prefix = ' ' * indent + key minimum_gap = 2 if len(prefix) + minimum_gap <= align: lines.append(prefix + ' ' * (align - len(prefix))) else: lines.append(prefix) lines.append(' ' * align) maximum_line_length = 80 maximum_value_length = maximum_line_length - align while value: if len(value) > maximum_value_length: try: i = value.rindex(' ', 0, maximum_value_length) except ValueError: pass else: lines[-1] += value[0:i] value = value[i + 1:].lstrip() lines.append(' ' * align) continue lines[-1] += value break return lines class CommandContext: __slots__ = ('command_name', 'args') def __init__(self, *, command_name: str, args: object): self.command_name = command_name self.args = args
Apache License 2.0
neurodiffgym/neurodiffeq
neurodiffeq/pde.py
solve2D_system
python
def solve2D_system( pde_system, conditions, xy_min=None, xy_max=None, single_net=None, nets=None, train_generator=None, valid_generator=None, optimizer=None, criterion=None, n_batches_train=1, n_batches_valid=4, additional_loss_term=None, metrics=None, max_epochs=1000, monitor=None, return_internal=False, return_best=False, batch_size=None, shuffle=None, ): warnings.warn( "The `solve2D_system` function is deprecated, use a `neurodiffeq.solvers.Solver2D` instance instead", FutureWarning, ) if single_net and nets: raise ValueError('Only one of net and nets should be specified') if (not single_net) and (not nets): single_net = FCNN( n_input_units=2, n_output_units=len(conditions), hidden_units=(32, 32), actv=nn.Tanh, ) if single_net: for ith, con in enumerate(conditions): con.set_impose_on(ith) nets = [single_net] * len(conditions) if additional_loss_term: class CustomSolver2D(Solver2D): def additional_loss(self, residual, funcs, coords): return additional_loss_term(*funcs, *coords) else: class CustomSolver2D(Solver2D): pass solver = CustomSolver2D( pde_system=pde_system, conditions=conditions, xy_min=xy_min, xy_max=xy_max, nets=nets, train_generator=train_generator, valid_generator=valid_generator, optimizer=optimizer, criterion=criterion, n_batches_train=n_batches_train, n_batches_valid=n_batches_valid, metrics=metrics, batch_size=batch_size, shuffle=shuffle, ) solver.fit(max_epochs=max_epochs, monitor=monitor) solution = solver.get_solution(copy=True, best=return_best) ret = (solution, solver.metrics_history) if return_internal: params = ['nets', 'conditions', 'train_generator', 'valid_generator', 'optimizer', 'criterion'] internals = solver.get_internals(params, return_type="dict") ret = ret + (internals,) return ret
r"""Train a neural network to solve a PDE with 2 independent variables. :param pde_system: The PDE system to solve. If the PDE is :math:`F_i(u_1, u_2, ..., u_n, x, y) = 0` where :math:`u_i` is the i-th dependent variable and :math:`x` and :math:`y` are the independent variables, then `pde_system` should be a function that maps :math:`(u_1, u_2, ..., u_n, x, y)` to a list where the i-th entry is :math:`F_i(u_1, u_2, ..., u_n, x, y)`. :type pde_system: callable :param conditions: The initial/boundary conditions. The ith entry of the conditions is the condition that :math:`x_i` should satisfy. :type conditions: list[`neurodiffeq.conditions.BaseCondition`] :param xy_min: The lower bound of 2 dimensions. If we only care about :math:`x \geq x_0` and :math:`y \geq y_0`, then `xy_min` is `(x_0, y_0)`. Only needed when train_generator or valid_generator are not specified. Defaults to None :type xy_min: tuple[float, float], optional :param xy_max: The upper bound of 2 dimensions. If we only care about :math:`x \leq x_1` and :math:`y \leq y_1`, then `xy_min` is `(x_1, y_1)`. Only needed when train_generator or valid_generator are not specified. Defaults to None :type xy_max: tuple[float, float], optional :param single_net: The single neural network used to approximate the solution. Only one of `single_net` and `nets` should be specified. Defaults to None :param single_net: `torch.nn.Module`, optional :param nets: The neural networks used to approximate the solution. Defaults to None. :type nets: list[`torch.nn.Module`], optional :param train_generator: The example generator to generate 1-D training points. Default to None. :type train_generator: `neurodiffeq.generators.Generator2D`, optional :param valid_generator: The example generator to generate 1-D validation points. Default to None. :type valid_generator: `neurodiffeq.generators.Generator2D`, optional :param optimizer: The optimization method to use for training. Defaults to None. :type optimizer: `torch.optim.Optimizer`, optional :param criterion: The loss function to use for training. Defaults to None. :type criterion: `torch.nn.modules.loss._Loss`, optional :param n_batches_train: Number of batches to train in every epoch, where batch-size equals ``train_generator.size``. Defaults to 1. :type n_batches_train: int, optional :param n_batches_valid: Number of batches to validate in every epoch, where batch-size equals ``valid_generator.size``. Defaults to 4. :type n_batches_valid: int, optional :param additional_loss_term: Extra terms to add to the loss function besides the part specified by `criterion`. The input of `additional_loss_term` should be the same as `pde_system`. :type additional_loss_term: callable :param metrics: Metrics to keep track of during training. The metrics should be passed as a dictionary where the keys are the names of the metrics, and the values are the corresponding function. The input functions should be the same as `pde_system` and the output should be a numeric value. The metrics are evaluated on both the training set and validation set. :type metrics: dict[string, callable] :param max_epochs: The maximum number of epochs to train. Defaults to 1000. :type max_epochs: int, optional :param monitor: The monitor to check the status of nerual network during training. Defaults to None. :type monitor: `neurodiffeq.pde.Monitor2D`, optional :param return_internal: Whether to return the nets, conditions, training generator, validation generator, optimizer and loss function. Defaults to False. :type return_internal: bool, optional :param return_best: Whether to return the nets that achieved the lowest validation loss. Defaults to False. :type return_best: bool, optional :param batch_size: **[DEPRECATED and IGNORED]** Each batch will use all samples generated. Please specify ``n_batches_train`` and ``n_batches_valid`` instead. :type batch_size: int :param shuffle: **[DEPRECATED and IGNORED]** Shuffling should be performed by generators. :type shuffle: bool :return: The solution of the PDE. The history of training loss and validation loss. Optionally, the nets, conditions, training generator, validation generator, optimizer and loss function. The solution is a function that has the signature `solution(xs, ys, as_type)`. :rtype: tuple[`neurodiffeq.pde.Solution`, dict] or tuple[`neurodiffeq.pde.Solution`, dict, dict] .. note:: This function is deprecated, use a ``neurodiffeq.solvers.Solver2D`` instead.
https://github.com/neurodiffgym/neurodiffeq/blob/ab670a1af2e58766849f3bc683f7e6b0a6444124/neurodiffeq/pde.py#L170-L338
import torch import warnings import torch.optim as optim import torch.nn as nn import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.animation as animation from .networks import FCNN from .neurodiffeq import safe_diff as diff from .generators import Generator2D, PredefinedGenerator from ._version_utils import warn_deprecate_class from .conditions import IrregularBoundaryCondition from .conditions import NoCondition, DirichletBVP2D, IBVP1D from .monitors import Monitor2D from .solvers import Solution2D from .solvers import Solver2D from copy import deepcopy ExampleGenerator2D = warn_deprecate_class(Generator2D) PredefinedExampleGenerator2D = warn_deprecate_class(PredefinedGenerator) Solution = warn_deprecate_class(Solution2D) def _network_output_2input(net, xs, ys, ith_unit): xys = torch.cat((xs, ys), 1) nn_output = net(xys) if ith_unit is not None: return nn_output[:, ith_unit].reshape(-1, 1) else: return nn_output def _trial_solution_2input(single_net, nets, xs, ys, conditions): if single_net: us = [ con.enforce(single_net, xs, ys) for con in conditions ] else: us = [ con.enforce(net, xs, ys) for con, net in zip(conditions, nets) ] return us def solve2D( pde, condition, xy_min=None, xy_max=None, net=None, train_generator=None, valid_generator=None, optimizer=None, criterion=None, n_batches_train=1, n_batches_valid=4, additional_loss_term=None, metrics=None, max_epochs=1000, monitor=None, return_internal=False, return_best=False, batch_size=None, shuffle=None, ): nets = None if not net else [net] return solve2D_system( pde_system=lambda u, x, y: [pde(u, x, y)], conditions=[condition], xy_min=xy_min, xy_max=xy_max, nets=nets, train_generator=train_generator, shuffle=shuffle, valid_generator=valid_generator, optimizer=optimizer, criterion=criterion, n_batches_train=n_batches_train, n_batches_valid=n_batches_valid, additional_loss_term=additional_loss_term, metrics=metrics, batch_size=batch_size, max_epochs=max_epochs, monitor=monitor, return_internal=return_internal, return_best=return_best )
MIT License
pantsbuild/pex
pex/executor.py
Executor.execute
python
def execute(cls, cmd, stdin_payload=None, **kwargs): process = cls.open_process( cmd=cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs ) stdout_raw, stderr_raw = process.communicate(input=stdin_payload) stdout = stdout_raw.decode("utf-8") if stdout_raw is not None else stdout_raw stderr = stderr_raw.decode("utf-8") if stderr_raw is not None else stderr_raw if process.returncode != 0: raise cls.NonZeroExit(cmd, process.returncode, stdout, stderr) return stdout, stderr
Execute a command via subprocess.Popen and returns the stdio. :param string|list cmd: A list or string representing the command to run. :param string stdin_payload: A string representing the stdin payload, if any, to send. :param **kwargs: Additional kwargs to pass through to subprocess.Popen. :return: A tuple of strings representing (stdout, stderr), pre-decoded for utf-8. :raises: `Executor.ExecutableNotFound` when the executable requested to run does not exist. `Executor.NonZeroExit` when the execution fails with a non-zero exit code.
https://github.com/pantsbuild/pex/blob/a2eb72e4f627ce0630cd120b5411b2fda7974ce9/pex/executor.py#L80-L101
from __future__ import absolute_import import errno import os from pex.compatibility import PY2, string from pex.tracer import TRACER if os.name == "posix" and PY2: try: import subprocess32 as subprocess except ImportError: TRACER.log( "Please build pex with the subprocess32 module for more reliable requirement " "installation and interpreter execution." ) import subprocess else: import subprocess class Executor(object): class ExecutionError(Exception): def __init__(self, msg, cmd, exc=None): super(Executor.ExecutionError, self).__init__( "%s while trying to execute `%s`" % (msg, cmd) ) self.executable = cmd.split()[0] if isinstance(cmd, string) else cmd[0] self.cmd = cmd self.exc = exc class NonZeroExit(ExecutionError): def __init__(self, cmd, exit_code, stdout, stderr): super(Executor.NonZeroExit, self).__init__( "received exit code %s during execution of `%s`" % (exit_code, cmd), cmd ) self.exit_code = exit_code self.stdout = stdout self.stderr = stderr class ExecutableNotFound(ExecutionError): def __init__(self, cmd, exc): super(Executor.ExecutableNotFound, self).__init__( "caught %r while trying to execute `%s`" % (exc, cmd), cmd ) self.exc = exc @classmethod def open_process(cls, cmd, **kwargs): assert len(cmd) > 0, "cannot execute an empty command!" try: return subprocess.Popen(cmd, **kwargs) except (IOError, OSError) as e: if e.errno == errno.ENOENT: raise cls.ExecutableNotFound(cmd, e) else: raise cls.ExecutionError(repr(e), cmd, e) @classmethod
Apache License 2.0
commvault/cvpysdk
cvpysdk/security/role.py
Roles.has_role
python
def has_role(self, role_name): if not isinstance(role_name, basestring): raise SDKException('Role', '101') return self._roles and role_name.lower() in self._roles
Checks if any role with specified name exists on this commcell Args: role_name (str) -- name of the role which has to be checked if exists Retruns: Bool- True if specified role is presnt on th ecommcell else false Raises: SDKException: if data type of input is invalid
https://github.com/commvault/cvpysdk/blob/66df30e6e31d619812b7756cb4f7e130b220a08f/cvpysdk/security/role.py#L140-L157
from past.builtins import basestring from ..exception import SDKException class Roles(object): def __init__(self, commcell_object): self._commcell_object = commcell_object self._roles = self._get_roles() def __str__(self): representation_string = '{:^5}\t{:^20}\n\n'.format('S. No.', 'Roles') for index, role in enumerate(self._roles): sub_str = '{:^5}\t{:20}\n'.format(index + 1, role) representation_string += sub_str return representation_string.strip() def __repr__(self): return "Roles class instance for Commcell: '{0}'".format( self._commcell_object.commserv_name ) def _get_roles(self): get_all_roles_service = self._commcell_object._services['ROLES'] flag, response = self._commcell_object._cvpysdk_object.make_request( 'GET', get_all_roles_service ) if flag: if response.json() and 'roleProperties' in response.json(): roles_dict = {} for role in response.json()['roleProperties']: temp_id = role['role']['roleId'] temp_name = role['role']['roleName'].lower() roles_dict[temp_name] = temp_id return roles_dict else: return {} else: response_string = self._commcell_object._update_response_(response.text) raise SDKException('Response', '101', response_string)
Apache License 2.0
ncullen93/pybn
pyBN/io/read.py
read_bn
python
def read_bn(path): if '.bif' in path: return read_bif(path) elif '.bn' in path: return read_json(path) elif '.mat' in path: return read_mat(path) else: print("Path Extension not recognized")
Wrapper function for reading BayesNet objects from various file types. Arguments --------- *path* : a string The path (relative or absolute) - MUST include extension -> that's how we know which file reader to call. Returns ------- *bn* : a BayesNet object Effects ------- None Notes -----
https://github.com/ncullen93/pybn/blob/58bf684b4ac0bbfa7e2aa394ba3dd302d3dd22db/pyBN/io/read.py#L24-L55
__author__ = """Nicholas Cullen <ncullen.th@dartouth.edu>""" import json import numpy as np import copy from pyBN.classes.bayesnet import BayesNet from pyBN.classes.factor import Factor from pyBN.utils.graph import topsort
MIT License
cheind/pytorch-blender
pkg_pytorch/blendtorch/btt/dataset.py
RemoteIterableDataset.enable_recording
python
def enable_recording(self, fname): self.record_path_prefix = fname
Enable recording to given prefix path `fname`. Needs to be set before receiving items from the dataset.
https://github.com/cheind/pytorch-blender/blob/eb5effb033094d037e7bdc2238c00806be7012ae/pkg_pytorch/blendtorch/btt/dataset.py#L54-L59
import zmq import pickle from contextlib import ExitStack from glob import glob import torch.utils as utils from .file import FileRecorder, FileReader from .constants import DEFAULT_TIMEOUTMS def _identity_item_transform(x): return x class RemoteIterableDataset(utils.data.IterableDataset): def __init__(self, addresses, queue_size=10, timeoutms=DEFAULT_TIMEOUTMS, max_items=100000, item_transform=None, record_path_prefix=None): self.addresses = addresses self.queue_size = queue_size self.timeoutms = timeoutms self.max_items = max_items self.record_path_prefix = record_path_prefix self.item_transform = item_transform or _identity_item_transform
MIT License
milkpku/betaelephant
data/datatransfer.py
state2fen
python
def state2fen(cstate): fen = '' [m,n] = cstate.state.shape for i in range(m): zcnt = 0 for j in range(n): if cstate.state[i][j] != ' ': if zcnt != 0: fen += str(zcnt) zcnt = 0 fen += cstate.state[i][j] else: zcnt += 1 if zcnt != 0: fen += str(zcnt) zcnt = 0 fen += '/' fen = fen[:-1] fen += ' ' + cstate.turn fen += ' - - 0 ' fen += str(cstate.roundcnt) return fen
transfer the chessboard to fen string state: state of the current chessboard turn: which player to play round: count of round return: fen string
https://github.com/milkpku/betaelephant/blob/0db6140d328355ac0a3c7f9f667ca760f5096711/data/datatransfer.py#L21-L49
import numpy as np class chessboradstate: def __init__(self): self.state = np.zeros([10,9], dtype=np.string_) self.turn = 'w' self.roundcnt = 1
MIT License
flyteorg/flytekit
flytekit/interfaces/data/gcs/gcs_proxy.py
GCSProxy.__init__
python
def __init__(self, raw_output_data_prefix_override: str = None): self._raw_output_data_prefix_override = raw_output_data_prefix_override super(GCSProxy, self).__init__(name="gcs-gsutil")
:param raw_output_data_prefix_override: Instead of relying on the AWS or GCS configuration (see S3_SHARD_FORMATTER for AWS and GCS_PREFIX for GCP) setting when computing the shard path (_get_shard_path), use this prefix instead as a base. This code assumes that the path passed in is correct. That is, an S3 path won't be passed in when running on GCP.
https://github.com/flyteorg/flytekit/blob/6c032035563ae645b0b93558b3fe3362080057ea/flytekit/interfaces/data/gcs/gcs_proxy.py#L29-L37
import os as _os import sys as _sys import uuid as _uuid from flytekit.common.exceptions.user import FlyteUserException as _FlyteUserException from flytekit.configuration import gcp as _gcp_config from flytekit.interfaces import random as _flyte_random from flytekit.interfaces.data import common as _common_data from flytekit.tools import subprocess as _subprocess if _sys.version_info >= (3,): from shutil import which as _which else: from distutils.spawn import find_executable as _which def _update_cmd_config_and_execute(cmd): env = _os.environ.copy() return _subprocess.check_call(cmd, env=env) def _amend_path(path): return _os.path.join(path, "*") if not path.endswith("*") else path class GCSProxy(_common_data.DataProxy): _GS_UTIL_CLI = "gsutil"
Apache License 2.0
mapnik/cascadenik
cascadenik/compile.py
fs2url
python
def fs2url(url): return safe64.decode(url)
decode a filename to the URL it is derived from
https://github.com/mapnik/cascadenik/blob/82f66859340a31dfcb24af127274f262d4f3ad85/cascadenik/compile.py#L109-L111
import os, sys import math import urllib import urllib2 import tempfile import StringIO import operator import base64 import posixpath import os.path as systempath import zipfile import shutil from hashlib import md5 from datetime import datetime from time import strftime, localtime from re import sub, compile, MULTILINE from urlparse import urlparse, urljoin from operator import lt, le, eq, ge, gt def _relpath(path, start=posixpath.curdir): if not path: raise ValueError("no path specified") start_list = posixpath.abspath(start).split(posixpath.sep) path_list = posixpath.abspath(path).split(posixpath.sep) i = len(posixpath.commonprefix([start_list, path_list])) rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return posixpath.curdir return posixpath.join(*rel_list) if sys.hexversion >= 0x020600F0: from httplib import HTTPConnection, HTTPSConnection else: posixpath.relpath = _relpath from httplib import HTTPConnection as _HTTPConnection from httplib import HTTPSConnection as _HTTPSConnection import socket def HTTPConnection(host, port=None, strict=None, timeout=None): if timeout: socket.setdefaulttimeout(timeout) return _HTTPConnection(host, port=port, strict=strict) def HTTPSConnection(host, port=None, strict=None, timeout=None): if timeout: socket.setdefaulttimeout(timeout) return _HTTPSConnection(host, port=port, strict=strict) from . import safe64, style, output, sources from . import MAPNIK_VERSION, MAPNIK_VERSION_STR from .nonposix import un_posix, to_posix from .parse import stylesheet_declarations from .style import uri try: from PIL import Image except ImportError: try: import Image except ImportError: Image = False if not Image: warn = 'Warning: PIL (Python Imaging Library) is required for proper handling of image symbolizers when using JPEG format images or not running Mapnik >=0.7.0\n' sys.stderr.write(warn) DEFAULT_ENCODING = 'utf-8' try: import xml.etree.ElementTree as ElementTree from xml.etree.ElementTree import Element except ImportError: try: import lxml.etree as ElementTree from lxml.etree import Element except ImportError: import elementtree.ElementTree as ElementTree from elementtree.ElementTree import Element opsort = {lt: 1, le: 2, eq: 3, ge: 4, gt: 5} opstr = {lt: '<', le: '<=', eq: '==', ge: '>=', gt: '>'} VERBOSE = False def msg(msg): if VERBOSE: sys.stderr.write('Cascadenik debug: %s\n' % msg) counter = 0 def next_counter(): global counter counter += 1 return counter def url2fs(url): uri, extension = posixpath.splitext(url) return safe64.dir(uri) + extension
BSD 3-Clause New or Revised License
beta-team/beta-recsys
beta_rec/datasets/instacart.py
Instacart_25.preprocess
python
def preprocess(self): print("Start loading data from raw data") order_products_prior_file = os.path.join( self.raw_path, "order_products__prior.csv" ) order_products_train_file = os.path.join( self.raw_path, "order_products__train.csv" ) if not os.path.exists(order_products_prior_file) or not os.path.exists( order_products_train_file ): print("Raw file doesn't exist, try to download it.") self.download() file_name = os.path.join(self.raw_path + ".gz") un_zip(file_name) orders_file = os.path.join(self.raw_path, "orders.csv") prior_products = pd.read_csv( order_products_prior_file, usecols=["order_id", "product_id", "add_to_cart_order"], ) train_products = pd.read_csv( order_products_train_file, usecols=["order_id", "product_id", "add_to_cart_order"], ) order_products = pd.concat([prior_products, train_products]) orders = pd.read_csv( orders_file, usecols=["user_id", "order_id", "order_number", "eval_set"] ) user_products = order_products.merge(orders, how="left", on="order_id") order_addtocart_user = ( user_products.groupby( ["order_id", "add_to_cart_order", "user_id", "product_id", "eval_set"] ) .size() .rename("ratings") .reset_index() ) order_addtocart_user.rename( columns={ "order_id": DEFAULT_ORDER_COL, "user_id": DEFAULT_USER_COL, "product_id": DEFAULT_ITEM_COL, "ratings": DEFAULT_RATING_COL, "eval_set": DEFAULT_FLAG_COL, }, inplace=True, ) timestamp_col = {DEFAULT_TIMESTAMP_COL: order_addtocart_user.index} order_addtocart_user = order_addtocart_user.assign(**timestamp_col) print("Start sampling 25% users from the raw data") users = list(order_addtocart_user[DEFAULT_USER_COL].unique()) sampled_users = random.sample(users, int(len(users) * 0.25)) order_addtocart_user = order_addtocart_user[ order_addtocart_user[DEFAULT_USER_COL].isin(sampled_users) ] print("Loading raw data completed") self.save_dataframe_as_npz( order_addtocart_user, os.path.join(self.processed_path, f"{self.dataset_name}_interaction.npz"), )
Preprocess the raw file. Preprocess the file downloaded via the url, convert it to a dataframe consist of the user-item interaction and save in the processed directory Download and load datasets 1. Download instacart dataset if this dataset is not existed. 2. Load <order> table and <order_products> table from "orders.csv" and "order_products__train.csv". 3. Merge the two tables above. 4. Add additional columns [rating, timestamp]. 5. Rename columns and save data model.
https://github.com/beta-team/beta-recsys/blob/be6a5c9307e00cc7bb06cbfd407a637e9d5afbb0/beta_rec/datasets/instacart.py#L181-L264
import os import random import pandas as pd from ..datasets.dataset_base import DatasetBase from ..utils.common_util import un_zip from ..utils.constants import ( DEFAULT_FLAG_COL, DEFAULT_ITEM_COL, DEFAULT_ORDER_COL, DEFAULT_RATING_COL, DEFAULT_TIMESTAMP_COL, DEFAULT_USER_COL, ) INSTACART_URL = "https://www.kaggle.com/c/instacart-market-basket-analysis/data" INSTACART_RANDOM_SPLIT_URL = ( r"https://1drv.ms/u/s!AjMahLyQeZqugX4W4zLO6Jkx8P-W?e=oKymnV" ) INSTACART_TEMPORAL_SPLIT_URL = ( r"https://1drv.ms/u/s!AjMahLyQeZquggAblxVFSYeu3nzh?e=pzBaAa" ) INSTACART_LEAVE_ONE_OUT_URL = ( r"https://1drv.ms/u/s!AjMahLyQeZquggLQynzcCWfNUdIg?e=HDhUjL" ) INSTACART_TIPS = """ Instacart dataset can not be downloaded by this url automatically, and you need to do: 1. Download this dataset via 'https://www.kaggle.com/c/instacart-market-basket-analysis/data', 2. Put 'instacart-market-basket-analysis.zip' into the directory `instacart/raw/`, 3. Unzip 'instacart-market-basket-analysis.zip', put all the *.csv files into 'instacart/raw/'. 4. Rerun this program. """ class Instacart(DatasetBase): def __init__( self, dataset_name="instacart", min_u_c=0, min_i_c=3, min_o_c=0, root_dir=None ): super().__init__( dataset_name=dataset_name, min_u_c=min_u_c, min_i_c=min_i_c, min_o_c=min_o_c, root_dir=root_dir, manual_download_url=INSTACART_URL, processed_leave_one_out_url=INSTACART_LEAVE_ONE_OUT_URL, processed_random_split_url=INSTACART_RANDOM_SPLIT_URL, processed_temporal_split_url=INSTACART_TEMPORAL_SPLIT_URL, tips=INSTACART_TIPS, ) def preprocess(self): print("Start loading data from raw data") order_products_prior_file = os.path.join( self.raw_path, "order_products__prior.csv" ) order_products_train_file = os.path.join( self.raw_path, "order_products__train.csv" ) if not os.path.exists(order_products_prior_file) or not os.path.exists( order_products_train_file ): print("Raw file doesn't exist, try to download it.") self.download() orders_file = os.path.join(self.raw_path, "orders.csv") prior_products = pd.read_csv( order_products_prior_file, usecols=["order_id", "product_id", "add_to_cart_order"], ) train_products = pd.read_csv( order_products_train_file, usecols=["order_id", "product_id", "add_to_cart_order"], ) order_products = pd.concat([prior_products, train_products]) orders = pd.read_csv( orders_file, usecols=["user_id", "order_id", "order_number", "eval_set"] ) user_products = order_products.merge(orders, how="left", on="order_id") order_addtocart_user = ( user_products.groupby( ["order_id", "add_to_cart_order", "user_id", "product_id", "eval_set"] ) .size() .rename("ratings") .reset_index() ) order_addtocart_user.rename( columns={ "order_id": DEFAULT_ORDER_COL, "user_id": DEFAULT_USER_COL, "product_id": DEFAULT_ITEM_COL, "ratings": DEFAULT_RATING_COL, "eval_set": DEFAULT_FLAG_COL, }, inplace=True, ) timestamp_col = {DEFAULT_TIMESTAMP_COL: order_addtocart_user.index} order_addtocart_user = order_addtocart_user.assign(**timestamp_col) print("Loading raw data completed") self.save_dataframe_as_npz( order_addtocart_user, os.path.join(self.processed_path, f"{self.dataset_name}_interaction.npz"), ) class Instacart_25(DatasetBase): def __init__( self, dataset_name="instacart_25", min_u_c=0, min_i_c=3, min_o_c=0, ): super().__init__( dataset_name=dataset_name, min_u_c=min_u_c, min_i_c=min_i_c, min_o_c=min_o_c, manual_download_url="https://www.kaggle.com/c/6644/download-all", processed_random_split_url=INSTACART_RANDOM_SPLIT_URL, processed_temporal_split_url=INSTACART_TEMPORAL_SPLIT_URL, )
MIT License
napari/napari
napari/_vispy/utils/visual.py
get_view_direction_in_scene_coordinates
python
def get_view_direction_in_scene_coordinates( view: ViewBox, ndim: int, dims_displayed: Tuple[int], ) -> np.ndarray: if len(dims_displayed) == 2: return None tform = view.scene.transform w, h = view.canvas.size screen_center = np.array([w / 2, h / 2, 0, 1]) d1 = np.array([0, 0, 1, 0]) point_in_front_of_screen_center = screen_center + d1 p1 = tform.imap(point_in_front_of_screen_center) p0 = tform.imap(screen_center) d2 = p1 - p0 d3 = d2[0:3] d4 = d3 / np.linalg.norm(d3) d4 = d4[[2, 1, 0]] view_dir_world = np.zeros((ndim,)) for i, d in enumerate(dims_displayed): view_dir_world[d] = d4[i] return view_dir_world
Calculate the unit vector pointing in the direction of the view. This is only for 3D viewing, so it returns None when len(dims_displayed) == 2. Adapted From: https://stackoverflow.com/questions/37877592/ get-view-direction-relative-to-scene-in-vispy/37882984 Parameters ---------- view : vispy.scene.widgets.viewbox.ViewBox The vispy view box object to get the view direction from. ndim : int The number of dimensions in the full nD dims model. This is typically from viewer.dims.ndim dims_displayed : Tuple[int] The indices of the dims displayed in the viewer. This is typically from viewer.dims.displayed. Returns ------- view_vector : np.ndarray Unit vector in the direction of the view in scene coordinates. Axes are ordered zyx. If the viewer is in 2D (i.e., len(dims_displayed) == 2), view_vector is None.
https://github.com/napari/napari/blob/c4c987c880fe125da608edf427767eafe7f2b3f4/napari/_vispy/utils/visual.py#L73-L133
from typing import Tuple import numpy as np from vispy.scene.widgets.viewbox import ViewBox from ...layers import ( Image, Labels, Layer, Points, Shapes, Surface, Tracks, Vectors, ) from ...utils.config import async_octree from ...utils.translations import trans from ..layers.base import VispyBaseLayer from ..layers.image import VispyImageLayer from ..layers.points import VispyPointsLayer from ..layers.shapes import VispyShapesLayer from ..layers.surface import VispySurfaceLayer from ..layers.tracks import VispyTracksLayer from ..layers.vectors import VispyVectorsLayer layer_to_visual = { Image: VispyImageLayer, Labels: VispyImageLayer, Points: VispyPointsLayer, Shapes: VispyShapesLayer, Surface: VispySurfaceLayer, Vectors: VispyVectorsLayer, Tracks: VispyTracksLayer, } if async_octree: from ..layers.image.experimental.octree_image import _OctreeImageBase from .experimental.vispy_tiled_image_layer import VispyTiledImageLayer new_mapping = {_OctreeImageBase: VispyTiledImageLayer} new_mapping.update(layer_to_visual) layer_to_visual = new_mapping def create_vispy_visual(layer: Layer) -> VispyBaseLayer: for layer_type, visual_class in layer_to_visual.items(): if isinstance(layer, layer_type): return visual_class(layer) raise TypeError( trans._( 'Could not find VispyLayer for layer of type {dtype}', deferred=True, dtype=type(layer), ) )
BSD 3-Clause New or Revised License
fisher60/friendo_bot
bot/cogs/fun.py
Fun.eight_ball
python
async def eight_ball(self, ctx: Context, *, question: str) -> None: responses = [ "It is certain", "Yes, definitely", "Without a doubt", "That's for sure", "Most likely", "Umm, try again", "Didnt quite get that", "Concentrate and try again", "Not likely at all", "My reply is no", "Obviously not", "No...", "My sources say no", ] await ctx.send(f"Question: {question}\nAnswer: {choice(responses)}")
Returns an 8ball response to a user's question.
https://github.com/fisher60/friendo_bot/blob/715fd1b08ac9f0b4f29ad38a4f3a9e59ea3af5d9/bot/cogs/fun.py#L180-L198
import functools from itertools import product from random import choice, randint, shuffle import re import string from typing import List from discord.ext.commands import Cog, Context, command from bot.bot import Friendo UWU_WORDS = { "fi": "fwi", "l": "w", "r": "w", "some": "sum", "th": "d", "thing": "fing", "tho": "fo", "you're": "yuw'we", "your": "yur", "you": "yuw", } def get_factorial(num: int) -> int: answer = 1 for i in range(num, 0, -1): answer *= i return answer def _replace_many( sentence: str, replacements: dict, *, ignore_case: bool = False, match_case: bool = False, ) -> str: if ignore_case: replacements = dict( (word.lower(), replacement) for word, replacement in replacements.items() ) words_to_replace = sorted(replacements, key=lambda s: (-len(s), s)) pattern = "|".join(re.escape(word) for word in words_to_replace) regex = re.compile(pattern, re.I if ignore_case else 0) def _repl(match: re.Match) -> str: word = match.group(0) replacement = replacements[word.lower() if ignore_case else word] if not match_case: return replacement cleaned_word = word.translate(str.maketrans("", "", string.punctuation)) if cleaned_word.isupper(): return replacement.upper() elif cleaned_word[0].isupper(): return replacement.capitalize() else: return replacement.lower() return regex.sub(_repl, sentence) class Fun(Cog): def __init__(self, bot: Friendo) -> None: self.bot = bot @command(brief="Send a number and get the factorial of it") async def factorial(self, ctx: Context, number: int) -> None: if number > 69: await ctx.send("Hey woah don't break me. Give me a number upto 69") return result = await self.bot.loop.run_in_executor(None, get_factorial, number) await ctx.send( f"The factorial of **{number}** is **{result}** ({number}! = {result})" ) @command( brief="Alternate case of inputted text", description="converts a phrase to alternating case", ) async def spongify(self, ctx: Context, *, phrase: str) -> None: count = 0 new = "" for i in phrase.lower(): if i in string.punctuation: new += i else: if count % 2 == 0: new += i else: new += i.upper() count += 1 await ctx.send(new) @command( brief="simulates a coin toss", description="accepts 'heads' or 'tails' and tells if the output matches the user input.", name="flip", ) async def coin_toss(self, ctx: Context, toss: str) -> None: outcomes = ["heads", "tails"] if toss == choice(outcomes): msg = f"{ctx.author.mention} wins!" else: msg = f"{ctx.author.mention} loses!" await ctx.send(msg) @command( brief="simulates a dice roll", description=".dice [quantity] [sides]\n" "`quantity` - how many to roll\n" "`sides` - how many sides each die will have", ) async def dice(self, ctx: Context, n: int, sides: int) -> None: if n <= 0: await ctx.send("you must roll at least one die") elif sides < 2: await ctx.send(f"you can't roll a {sides}-sided die") else: result = sum(randint(1, sides) for _ in range(n)) await ctx.send(f"you rolled {result}") @command( brief="Ask any question to the 8ball", description="accepts a question and gives you an 8ball answer", name="8ball", )
MIT License
maier/cadvisor-collectd
src/cadvisor/python/cadvisor.py
CAdvisor.output_metrics
python
def output_metrics(self, container_name, container_id, metrics, fs_metrics=False): if metrics['has_cpu'] and 'cpu' in self.active_metrics: self.emit_cpu_metrics(container_name, container_id, metrics['cpu']) if metrics['has_memory'] and 'memory' in self.active_metrics: self.emit_memory_metrics(container_name, container_id, metrics['memory']) if metrics['has_network'] and 'network' in self.active_metrics: self.emit_network_metrics(container_name, container_id, metrics['network']) if metrics['has_diskio'] and 'diskio' in self.active_metrics: self.emit_diskio_metrics(container_name, container_id, metrics['diskio']) if metrics['has_load'] and 'load_stats' in self.active_metrics: self.emit_load_metrics(container_name, container_id, metrics['load_stats']) if metrics['has_filesystem'] and fs_metrics: self.emit_filesystem_metrics(container_name, container_id, metrics['filesystem'])
parcel out the various metric sections to dedicated (isolated) handlers for each of the distinct structures.
https://github.com/maier/cadvisor-collectd/blob/7cc8a77161d8d1e413ee548c3e19b6c737c282c9/src/cadvisor/python/cadvisor.py#L511-L530
from __future__ import print_function import sys from abc import ABCMeta, abstractmethod import json import yaml import urllib2 import socket import docker import re class CAdvisor(object): __metaclass__ = ABCMeta def __init__(self, config): super(CAdvisor, self).__init__() self.name = self.__class__.__name__ self.doc_url = 'https://github.com/maier/cadvisor-collectd/wiki/Configuring-CAdvisor' self.config_host = config.get('host', 'cadvisor/docker') self.config_port = config.get('port', 8080) self.config_file = config.get('config_file', '/etc/collectd/cadvisor.yaml') self.host = None self.port = None self.config = {} try: f = open(self.config_file, 'r') self.config = yaml.load(f) except Exception, e: self.log_error('Unable to load configuration "{}": {}'.format(self.config_file, e)) sys.exit(1) self.docker_socket = self.config.get('docker_socket', '/var/run/docker.sock') self.active_metrics = self.get_active_metrics() self.system_enabled = self.config.get('system_enabled', False) self.system_fs_metrics = self.config.get('system_fs_metrics', False) self.system_services = self.config.get('system_services', { 'options': { 'include_mounts': False, 'include_sockets': False, 'include_docker_scopes': False, 'include_system_slice': False, 'include_user_slice': False, 'include_other_slices': False }, 'include': [], 'exclude': ['*'] }) self.service_filter = None if type(self.system_services['include']).__name__ != 'list': self.system_services['include'] = [] if type(self.system_services['exclude']).__name__ != 'list': self.system_services['exclude'] = [] if not self.system_services['include'] and not self.system_services['exclude']: self.service_filter = 'all' elif '*' in self.system_services['exclude'] and '*' not in self.system_services['include']: self.service_filter = 'include' elif '*' in self.system_services['include'] and '*' not in self.system_services['exclude']: self.service_filter = 'exclude' elif'*' in self.system_services['include'] and '*' in self.system_services['exclude']: self.log_error('Conflicting service filter configuration, cannot be include and exclude simultaneously. See documentation: {}'.format(self.doc_url)) sys.exit(1) else: self.log_error('No service filter configuration identified. See documentation: {}'.format(self.doc_url)) sys.exit(1) self.docker_enabled = self.config.get('docker_enabled', True) self.docker_container_config = self.config.get('docker_containers', []) if type(self.docker_container_config).__name__ != 'list': self.docker_container_config = [] self.host_namespec = self.config.get('ns_host', '{hn}') self.plugin_namespec = self.config.get('ns_plugin', '{cn}.') def log(self, message, level='INFO'): msg = '{level} -- {msg}'.format(level=level, msg=message) if level == 'ERR': print(msg, file=sys.stderr) else: print(msg) @abstractmethod def log_error(self, message): self.log('{name}: {msg}'.format(name=self.name, msg=message), 'ERR') @abstractmethod def log_warning(self, message): self.log('{name}: {msg}'.format(name=self.name, msg=message), 'ERR') @abstractmethod def log_notice(self, message): self.log('{name}: {msg}'.format(name=self.name, msg=message)) @abstractmethod def log_info(self, message): self.log('{name}: {msg}'.format(name=self.name, msg=message)) @abstractmethod def log_debug(self, message): self.log('{name}: {msg}'.format(name=self.name, msg=message)) @abstractmethod def dispatch_metric(self, container_name, container_id, plugin, plugin_instance, metric_type, type_instance, metric_value): pass def gen_host_name(self, hostname, container_name, container_id): return(self.host_namespec.format(hn=hostname, cn=container_name, cid=container_id)) def gen_plugin_name(self, hostname, container_name, container_id, plugin): return('{}{}'.format(self.plugin_namespec.format(hn=hostname, cn=container_name, cid=container_id), plugin)) def is_container_id(self, id): try: if int(id, 16): return True except ValueError: return False def fix_container_name(self, name): if name[0:1] != '/': return('/' + name) else: return(name) def container_match(self, target, container_id, container_names): if self.is_container_id(target): if container_id[0:len(target)] == target: return True else: if self.fix_container_name(target) in container_names: return True return False def set_container_slice_ids(self): docker_container_config = self.docker_container_config all_containers = '*' in docker_container_config for container_idx, container in enumerate(self.docker_container_list): self.docker_container_list[container_idx]['SliceId'] = None slice_id = "/system.slice/docker-{cid}.scope".format(cid=container['Id']) if all_containers: self.docker_container_list[container_idx]['SliceId'] = slice_id else: for include_container in docker_container_config: if self.container_match(include_container, container['Id'], container['Names']): self.docker_container_list[container_idx]['SliceId'] = slice_id break def set_cadvisor_connect_info(self): host_spec = self.config_host port_spec = self.config_port docker_prefix = 'docker/' if self.host and self.port and not host_spec.lower().startswith(docker_prefix): return True ip = None port = port_spec if re.match('^\d{1,3}(\.\d{1,3}){3}$', host_spec): ip = host_spec elif host_spec.lower().startswith(docker_prefix): container_identifier = host_spec[len(docker_prefix):] cadvisor_container = None try: cli = docker.Client(base_url='unix:/{}'.format(self.docker_socket)) cadvisor_container = cli.inspect_container(container_identifier) if not cadvisor_container['State']['Running']: self.log_error('Error specified CAdvisor container "{}" is not running.'.format(host_spec)) sys.exit(1) ip = cadvisor_container['NetworkSettings']['IPAddress'] for exposed_port in cadvisor_container['Config']['ExposedPorts']: if '/tcp' in exposed_port: port = exposed_port.split('/')[0] break except docker.errors.APIError, e: self.log_error('Error retrieving container from docker: {}'.format(e)) sys.exit(1) except IOError, e: self.log_error('Error connecting to docker socket "{}": {}'.format(self.docker_socket, e)) sys.exit(1) else: self.log_error('Invalid cadvisor connection method specified "{}".'.format(host_spec)) sys.exit(2) connection_specifier = '{}:{}'.format(ip, port) if not re.match('^\d{1,3}(\.\d{1,3}){3}:\d+$', connection_specifier): self.log_error('No valid connection specifier found for cadvisor "{}" = "{}".'.format(host_spec, connection_specifier)) sys.exit(2) self.host = ip self.port = port return(True) def set_docker_container_list(self): try: cli = docker.Client(base_url='unix:/{}'.format(self.docker_socket)) self.docker_container_list = cli.containers(all=False) except docker.errors.APIError, e: self.log_error('Error retrieving from docker: {}'.format(e)) sys.exit(1) except IOError, e: self.log_error('Error connecting to docker socket "{}": {}'.format(self.docker_socket, e)) sys.exit(1) return(True) def get_active_metrics(self): key_prefix = 'metrics_' active_metrics = {} for k, v in self.config.iteritems(): if k.startswith(key_prefix): if 'none' not in map(str.lower, self.config[k]): active_metrics[k[len(key_prefix):]] = v return(active_metrics) def fetch_metrics(self): self.set_cadvisor_connect_info() url = "http://{}:{}/api/v2.0/stats?recursive=true&count=1".format(self.host, self.port) stats = {} try: response = urllib2.urlopen(url, None, 5) stats = json.loads(response.read()) except urllib2.URLError, e: if hasattr(e, 'reason'): self.log_error("Failed to reach server, reason {}".format(e.reason)) elif hasattr(e, 'code'): self.log_error("Server unable to fulfill request {}".format(e.code)) sys.exit(1) except socket.timeout: self.log_error("Timeout connecting to {}".format(url)) sys.exit(1) return(stats) def emit_cpu_metrics(self, container_name, container_id, metrics): plugin = 'cpu' plugin_instance = None metric_type = 'gauge' type_instance = 'avg' self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [metrics['load_average']]) plugin_instance = None metric_type = 'time_ns' for key in ('system', 'total', 'user'): type_instance = key self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [metrics['usage'][key]]) metric_type = 'time_ns' type_instance = None for i, v in enumerate(metrics['usage']['per_cpu_usage']): plugin_instance = str(i) self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [v]) def emit_memory_metrics(self, container_name, container_id, metrics): plugin = 'memory' plugin_instance = None metric_type = 'memory' type_instance = 'usage' self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [metrics['usage']]) plugin_instance = None metric_type = 'memory' type_instance = 'working_set' self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [metrics['working_set']]) plugin_instance = None metric_type = 'gauge' type_instance = None for item in ('hierarchical', 'container'): item_key = '{}_data'.format(item) plugin_instance = item_key for key in metrics[item_key]: type_instance = key self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [metrics[item_key][key]]) def emit_network_metrics(self, container_name, container_id, metrics): plugin = 'net' plugin_instance = None metric_type = None type_instance = None if 'interfaces' in metrics.keys(): metrics = metrics['interfaces'] for i, v in enumerate(metrics): plugin_instance = 'if{}'.format(i) for item in ('dropped', 'packets', 'bytes', 'errors'): rx_key = 'rx_{}'.format(item) tx_key = 'tx_{}'.format(item) metric_type = 'if_{}'.format('octets' if item == 'bytes' else item) self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [v[rx_key], v[tx_key]]) def emit_diskio_metrics(self, container_name, container_id, metrics): plugin = 'blkio' plugin_instance = None metric_type = None type_instance = None metric = 'io_time' if metric in metrics: metric_type = 'time_ms' type_instance = metric for device in metrics[metric]: plugin_instance = '{}_{}'.format(device['major'], device['minor']) self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [device['stats']['Count']]) metric_type = 'time_ns' for metric in ('io_wait_time', 'io_service_time'): if metric in metrics: for device in metrics[metric]: plugin_instance = '{}_{}'.format(device['major'], device['minor']) for stat in device['stats']: type_instance = '{}_{}'.format(metric, stat) self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [device['stats'][stat]]) metric = 'io_service_bytes' metric_type = 'bytes' if metric in metrics: for device in metrics[metric]: plugin_instance = '{}_{}'.format(device['major'], device['minor']) for stat in device['stats']: type_instance = '{}_{}'.format(metric, stat) self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [device['stats'][stat]]) metric = 'sectors' metric_type = 'gauge' if metric in metrics: type_instance = '{}'.format(metric) for device in metrics[metric]: plugin_instance = '{}_{}'.format(device['major'], device['minor']) self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [device['stats']['Count']]) metric_type = 'gauge' for metric in ('io_serviced', 'io_merged'): if metric in metrics: for device in metrics[metric]: plugin_instance = '{}_{}'.format(device['major'], device['minor']) for stat in device['stats']: type_instance = '{}_{}'.format(metric, stat) self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [device['stats'][stat]]) metric = 'io_queued' metric_type = 'counter' if metric in metrics: for device in metrics[metric]: plugin_instance = '{}_{}'.format(device['major'], device['minor']) for stat in device['stats']: type_instance = '{}_{}'.format(metric, stat) self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [device['stats'][stat]]) def emit_load_metrics(self, container_name, container_id, metrics): plugin = 'load_stats' plugin_instance = None metric_type = 'gauge' type_instance = None for metric in metrics: type_instance = '-{}'.format(metric) self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [metrics[metric]]) def emit_filesystem_metrics(self, container_name, container_id, metrics): plugin = 'fs' plugin_instance = None metric_type = None type_instance = None for device in metrics: device_name = device['device'] if device_name[0:19].lower() == '/dev/mapper/docker-': device_name = device_name.replace('/dev/mapper/', '') device_name_parts = device_name.split('-') device_name_parts[-1] = device_name_parts[-1][0:12] device_name = '_'.join(device_name_parts) plugin_instance = device_name metric_type = 'bytes' for stat in ('capacity', 'usage'): type_instance = stat self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [device[stat]]) metric_type = 'time_ms' for stat in ('read_time', 'io_time', 'weighted_io_time', 'write_time'): type_instance = stat self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [device[stat]]) metric_type = 'gauge' for stat in ('writes_completed', 'reads_completed', 'writes_merged', 'sectors_written', 'reads_merged', 'sectors_read'): type_instance = stat self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [device[stat]]) metric_type = 'counter' type_instance = 'io_in_progress' self.dispatch_metric(container_name, container_id, plugin, plugin_instance, metric_type, type_instance, [device['io_in_progress']])
Apache License 2.0
paddlepaddle/paddlesleeve
Robustness/perceptron/utils/adversarial/detection.py
DetAdversarial.predictions_and_gradient
python
def predictions_and_gradient( self, image=None, annotation=None, strict=True, return_details=False): assert self.has_gradient() if image is None: image = self._original_image assert not strict or self.in_bounds(image) in_bounds = self.in_bounds(image) assert not strict or in_bounds self._total_prediction_calls += 1 self._total_gradient_calls += 1 predictions, loss, gradient = self._model.predictions_and_gradient(image, self._criterion) is_adversarial, is_best, distance = self._is_adversarial( image, predictions, in_bounds) assert gradient.shape == image.shape if return_details: return predictions, loss, gradient, is_adversarial, is_best, distance else: return predictions, loss, gradient, is_adversarial
Interface to model.predictions_and_gradient for attacks. Parameters ---------- image : `numpy.ndarray` Image with shape (height, width, channels). Defaults to the original image. label : int Label used to calculate the loss that is differentiated. Defaults to the original label. strict : bool Controls if the bounds for the pixel values should be checked.
https://github.com/paddlepaddle/paddlesleeve/blob/18cc4b83ae311365b8d132ea4619d60abf3945bf/Robustness/perceptron/utils/adversarial/detection.py#L70-L105
import numpy as np import numbers from .base import Adversarial from .base import StopAttack from perceptron.utils.distances import MSE from perceptron.utils.distances import Distance class DetAdversarial(Adversarial): def __init__( self, model, criterion, original_image, original_pred, threshold=None, distance=MSE, verbose=False): super(DetAdversarial, self).__init__( model, criterion, original_image, original_pred, threshold, distance, verbose) self._task = 'det' def model_task(self): return self._task def gradient(self, image=None, label=None, strict=True): pass
Apache License 2.0
theislab/sfaira
sfaira/data/store/single_store.py
DistributedStoreSingleFeatureSpace.write_config
python
def write_config(self, fn: Union[str, os.PathLike]): with open(fn + '.pickle', 'wb') as f: pickle.dump(self.indices, f)
Writes a config file that describes the current data sub-setting. This config file can be loaded later to recreate a sub-setting. This config file contains observation-wise subsetting information. :param fn: Output file without file type extension.
https://github.com/theislab/sfaira/blob/9590015acbc4f84454d3d75ff03b191c2472a219/sfaira/data/store/single_store.py#L330-L340
import abc import anndata import dask.array import dask.dataframe import numpy as np import os import pandas as pd import pickle import scipy.sparse from typing import Dict, List, Tuple, Union from sfaira.consts import AdataIdsSfaira, OCS from sfaira.data.dataloaders.base.utils import is_child, UNS_STRING_META_IN_OBS from sfaira.data.store.base import DistributedStoreBase from sfaira.data.store.generators import GeneratorAnndata, GeneratorDask, GeneratorSingle from sfaira.versions.genomes.genomes import GenomeContainer def _process_batch_size(batch_size: int, retrival_batch_size: int) -> Tuple[int, int]: if batch_size != 1: raise ValueError("batch size is only supported as 1") return batch_size, retrival_batch_size class DistributedStoreSingleFeatureSpace(DistributedStoreBase): _adata_by_key: Dict[str, anndata.AnnData] _indices: Dict[str, np.ndarray] _obs_by_key: Union[None, Dict[str, dask.dataframe.DataFrame]] data_source: str def __init__(self, adata_by_key: Dict[str, anndata.AnnData], indices: Dict[str, np.ndarray], obs_by_key: Union[None, Dict[str, dask.dataframe.DataFrame]] = None, data_source: str = "X"): self.adata_by_key = adata_by_key self.indices = indices self.obs_by_key = obs_by_key self.ontology_container = OCS self._genome_container = None self._adata_ids_sfaira = AdataIdsSfaira() self.data_source = data_source self._celltype_universe = None @property def idx(self) -> np.ndarray: idx_global = np.arange(0, np.sum([len(v) for v in self.indices.values()])) return idx_global @property def organisms_by_key(self) -> Dict[str, str]: ks = self.indices.keys() organisms = [self._adata_by_key[k].uns[self._adata_ids_sfaira.organism] for k in ks] organisms = [x[0] if (isinstance(x, list) or isinstance(x, tuple)) else x for x in organisms] return dict(list(zip(ks, organisms))) @property def organism(self): organisms = np.unique(list(self.organisms_by_key.values())) assert len(organisms) == 1, organisms return organisms[0] def _validate_feature_space_homogeneity(self) -> List[str]: reference_k = list(self._adata_by_key.keys())[0] var_names = self._adata_by_key[reference_k].var_names.tolist() for k in list(self._adata_by_key.keys()): assert len(var_names) == len(self._adata_by_key[k].var_names), f"number of features in store differed in object {k} compared to {reference_k}" assert np.all(var_names == self._adata_by_key[k].var_names), f"var_names in store were not matched in object {k} compared to {reference_k}" return var_names @property def adata_by_key(self) -> Dict[str, anndata.AnnData]: return self._adata_by_key @adata_by_key.setter def adata_by_key(self, x: Dict[str, anndata.AnnData]): self._adata_by_key = x @property def data_by_key(self): return dict([(k, v.X) for k, v in self.adata_by_key.items()]) @property def indices(self) -> Dict[str, np.ndarray]: return self._indices @indices.setter def indices(self, x: Dict[str, np.ndarray]): for k, v in x.items(): assert k in self._adata_by_key.keys(), f"did not find key {k}" assert np.max(v) < self._adata_by_key[k].n_obs, f"found index for key {k} that exceeded data set size" assert len(v) == len(np.unique(v)), f"found duplicated indices for key {k}" assert np.all(np.diff(v) >= 0), f"indices not sorted for key {k}" self._indices = x @property def obs_by_key(self) -> Dict[str, Union[pd.DataFrame, dask.dataframe.DataFrame]]: if self._obs_by_key is not None: assert np.all(list(self._adata_by_key.keys()) == list(self._obs_by_key.keys())) assert np.all([self._obs_by_key[k].shape[0] == self._adata_by_key[k].shape[0] for k in self._obs_by_key.keys()]) return self._obs_by_key else: return dict([(k, v.obs) for k, v in self.adata_by_key.items()]) @obs_by_key.setter def obs_by_key(self, x: Union[None, Dict[str, dask.dataframe.DataFrame]]): if x is not None: for k, v in x.items(): if not (isinstance(v, dask.dataframe.DataFrame) or isinstance(v, pd.DataFrame)): raise ValueError(f"value of entry {k} was not a dask.dataframe.DataFrame but {type(v)}") self._obs_by_key = x @property def genome_container(self) -> Union[GenomeContainer, None]: return self._genome_container @genome_container.setter def genome_container(self, x: Union[GenomeContainer]): var_names = self._validate_feature_space_homogeneity() assert np.all([y in var_names for y in x.ensembl]), "did not find variable names from genome container in store" self._genome_container = x @property def dataset_weights(self): return self._dataset_weights @dataset_weights.setter def dataset_weights(self, x: Dict[str, float]): assert np.all([k in self.adata_by_key.keys() for k in x.keys()]), "did not recognize some keys" assert np.all([k in x.keys() for k in self.indices.keys()]), "some data sets in index were omitted" self._dataset_weights = x def get_subset_idx(self, attr_key, values: Union[str, List[str], None], excluded_values: Union[str, List[str], None]) -> dict: if not isinstance(values, list): values = [values] assert (values is None or excluded_values is not None) or (values is not None or excluded_values is None), "supply either values or excluded_values" def get_idx(adata, obs, k, v, xv, dataset): read_from_uns = (getattr(self._adata_ids_sfaira, k) in adata.uns.keys() and adata.uns[getattr(self._adata_ids_sfaira, k)] != UNS_STRING_META_IN_OBS and getattr(self._adata_ids_sfaira, k) not in obs.columns) read_from_obs = not read_from_uns and getattr(self._adata_ids_sfaira, k) in obs.columns if read_from_uns: values_found = adata.uns[getattr(self._adata_ids_sfaira, k)] if isinstance(values_found, np.ndarray): values_found = values_found.tolist() elif not isinstance(values_found, list): values_found = [values_found] if len(values_found) > 1: values_found = None else: values_found = [values_found[0] for _ in range(adata.n_obs)] elif read_from_obs: values_found = obs[getattr(self._adata_ids_sfaira, k)].values else: values_found = [] print(f"WARNING: did not find attribute {k} in data set {dataset}") values_found_unique = np.unique(values_found) try: ontology = getattr(self.ontology_container, k) except AttributeError: raise ValueError(f"{k} not a valid property of ontology_container object") if v is not None: values_found_unique_matched = [ x for x in values_found_unique if np.any([ is_child(query=x, ontology=ontology, ontology_parent=y) for y in v ]) ] else: values_found_unique_matched = [ x for x in values_found_unique if np.all([ not is_child(query=x, ontology=ontology, ontology_parent=y) for y in xv ]) ] idx = np.where([x in values_found_unique_matched for x in values_found])[0] return idx indices = {} for key in self.indices.keys(): if key not in self.adata_by_key.keys(): raise ValueError(f"data set {key} queried by indices does not exist in store (.adata_by_key)") adata_k = self.adata_by_key[key] obs_k = self.obs_by_key[key] idx_old = self.indices[key] idx_subset = get_idx(adata=adata_k, obs=obs_k, k=attr_key, v=values, xv=excluded_values, dataset=key) idx_new = np.sort(list(set(np.asarray(idx_old).tolist()).intersection( set(np.asarray(idx_subset).tolist())))) if len(idx_new) > 0: indices[key] = np.asarray(idx_new, dtype="int32") return indices def subset(self, attr_key, values: Union[str, List[str], None] = None, excluded_values: Union[str, List[str], None] = None, verbose: int = 1): self.indices = self.get_subset_idx(attr_key=attr_key, values=values, excluded_values=excluded_values) if self.n_obs == 0 and verbose > 0: print(f"WARNING: store is now empty after subsetting {attr_key} for {values}, excluding {excluded_values}.")
BSD 3-Clause New or Revised License
tensorflow/recommenders-addons
tensorflow_recommenders_addons/dynamic_embedding/python/ops/cuckoo_hashtable_ops.py
CuckooHashTable._gather_saveables_for_checkpoint
python
def _gather_saveables_for_checkpoint(self): full_name = self._table_name return { "table": functools.partial( CuckooHashTable._Saveable, table=self, name=self._name, full_name=full_name, ) }
For object-based checkpointing.
https://github.com/tensorflow/recommenders-addons/blob/009d0956772522267717df771fa1b9a1df3c07d9/tensorflow_recommenders_addons/dynamic_embedding/python/ops/cuckoo_hashtable_ops.py#L341-L353
from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops.lookup_ops import LookupInterface from tensorflow.python.training.saver import BaseSaverBuilder from tensorflow_recommenders_addons.utils.resource_loader import LazySO from tensorflow_recommenders_addons.utils.resource_loader import prefix_op_name cuckoo_ops = LazySO("dynamic_embedding/core/_cuckoo_hashtable_ops.so").ops class CuckooHashTable(LookupInterface): def __init__( self, key_dtype, value_dtype, default_value, name="CuckooHashTable", checkpoint=True, init_size=0, config=None, ): self._default_value = ops.convert_to_tensor(default_value, dtype=value_dtype) self._value_shape = self._default_value.get_shape() self._checkpoint = checkpoint self._key_dtype = key_dtype self._value_dtype = value_dtype self._init_size = init_size self._name = name self._shared_name = None if context.executing_eagerly(): self._shared_name = "table_%d" % (ops.uid(),) super(CuckooHashTable, self).__init__(key_dtype, value_dtype) self._resource_handle = self._create_resource() if checkpoint: _ = CuckooHashTable._Saveable(self, name) if not context.executing_eagerly(): self.saveable = CuckooHashTable._Saveable( self, name=self._resource_handle.op.name, full_name=self._resource_handle.op.name, ) ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self.saveable) else: self.saveable = CuckooHashTable._Saveable(self, name=name, full_name=name) def _create_resource(self): use_node_name_sharing = self._checkpoint and self._shared_name is None table_ref = cuckoo_ops.tfra_cuckoo_hash_table_of_tensors( shared_name=self._shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=self._key_dtype, value_dtype=self._value_dtype, value_shape=self._default_value.get_shape(), init_size=self._init_size, name=self._name, ) if context.executing_eagerly(): self._table_name = None else: self._table_name = table_ref.op.name.split("/")[-1] return table_ref @property def name(self): return self._table_name def size(self, name=None): with ops.name_scope(name, "%s_Size" % self.name, [self.resource_handle]): with ops.colocate_with(self.resource_handle): return cuckoo_ops.tfra_cuckoo_hash_table_size(self.resource_handle) def remove(self, keys, name=None): if keys.dtype != self._key_dtype: raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." % (self._key_dtype, keys.dtype)) with ops.name_scope( name, "%s_lookup_table_remove" % self.name, (self.resource_handle, keys, self._default_value), ): op = cuckoo_ops.tfra_cuckoo_hash_table_remove(self.resource_handle, keys) return op def clear(self, name=None): with ops.name_scope(name, "%s_lookup_table_clear" % self.name, (self.resource_handle, self._default_value)): op = cuckoo_ops.tfra_cuckoo_hash_table_clear( self.resource_handle, key_dtype=self._key_dtype, value_dtype=self._value_dtype) return op def lookup(self, keys, dynamic_default_values=None, return_exists=False, name=None): with ops.name_scope( name, "%s_lookup_table_find" % self.name, (self.resource_handle, keys, self._default_value), ): keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys") with ops.colocate_with(self.resource_handle, ignore_existing=True): if return_exists: values, exists = cuckoo_ops.tfra_cuckoo_hash_table_find_with_exists( self.resource_handle, keys, dynamic_default_values if dynamic_default_values is not None else self._default_value, ) else: values = cuckoo_ops.tfra_cuckoo_hash_table_find( self.resource_handle, keys, dynamic_default_values if dynamic_default_values is not None else self._default_value, ) return (values, exists) if return_exists else values def insert(self, keys, values, name=None): with ops.name_scope( name, "%s_lookup_table_insert" % self.name, [self.resource_handle, keys, values], ): keys = ops.convert_to_tensor(keys, self._key_dtype, name="keys") values = ops.convert_to_tensor(values, self._value_dtype, name="values") with ops.colocate_with(self.resource_handle, ignore_existing=True): op = cuckoo_ops.tfra_cuckoo_hash_table_insert(self.resource_handle, keys, values) return op def accum(self, keys, values_or_deltas, exists, name=None): with ops.name_scope( name, "%s_lookup_table_accum" % self.name, [self.resource_handle, keys, values_or_deltas], ): keys = ops.convert_to_tensor(keys, self._key_dtype, name="keys") values_or_deltas = ops.convert_to_tensor(values_or_deltas, self._value_dtype, name="values_or_deltas") exists = ops.convert_to_tensor(exists, dtypes.bool, name="exists") with ops.colocate_with(self.resource_handle, ignore_existing=True): op = cuckoo_ops.tfra_cuckoo_hash_table_accum(self.resource_handle, keys, values_or_deltas, exists) return op def export(self, name=None): with ops.name_scope(name, "%s_lookup_table_export_values" % self.name, [self.resource_handle]): with ops.colocate_with(self.resource_handle): keys, values = cuckoo_ops.tfra_cuckoo_hash_table_export( self.resource_handle, self._key_dtype, self._value_dtype) return keys, values
Apache License 2.0
clericpy/torequests
torequests/main.py
tPool.post
python
def post(self, url, data=None, json=None, callback=None, retry=0, response_validator=None, **kwargs): return self.request("post", url=url, data=data, json=json, callback=callback, retry=retry, response_validator=response_validator, **kwargs)
Similar to `requests.post`, but return as NewFuture.
https://github.com/clericpy/torequests/blob/e57ce331aa850db45c198dc90b9d01e437384b61/torequests/main.py#L634-L650
import atexit from concurrent.futures import (ProcessPoolExecutor, ThreadPoolExecutor, as_completed) from concurrent.futures._base import (CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, PENDING, RUNNING, CancelledError, Error, Executor, Future, TimeoutError) from concurrent.futures.thread import _threads_queues, _WorkItem from functools import wraps from logging import getLogger from threading import Thread, Timer from time import sleep from time import time as time_time from weakref import WeakSet from requests import PreparedRequest, RequestException, Session from requests.adapters import HTTPAdapter from urllib3 import disable_warnings from .configs import Config from .exceptions import FailureException, ValidationError from .frequency_controller.sync_tools import Frequency from .versions import PY2, PY3 try: from queue import Empty, Queue except ImportError: from Queue import Empty, Queue if PY3: from concurrent.futures.process import BrokenProcessPool __all__ = [ "Pool", "ProcessPool", "NewFuture", "Async", "threads", "get_results_generator", "run_after_async", "tPool", "get", "post", "options", "delete", "put", "head", "patch", "request", "disable_warnings", "Workshop" ] logger = getLogger("torequests") def _abandon_all_tasks(): _threads_queues.clear() def ensure_waiting_for_threads(): if Config.wait_futures_before_exiting: _abandon_all_tasks() atexit.register(ensure_waiting_for_threads) class NewExecutorPoolMixin(Executor): def async_func(self, function): @wraps(function) def wrapped(*args, **kwargs): return self.submit(function, *args, **kwargs) return wrapped def close(self, wait=True): return self.shutdown(wait=wait) def _get_cpu_count(self): try: from multiprocessing import cpu_count return cpu_count() except Exception as e: logger.error("_get_cpu_count failed for %s" % e) @property def x(self): return self.wait_futures_done(list(self._all_futures)) def wait_futures_done(self, tasks=None): tasks = tasks or self._all_futures fs = [] try: for f in as_completed(tasks, timeout=self._timeout): fs.append(f.x) except TimeoutError: pass return fs class Pool(ThreadPoolExecutor, NewExecutorPoolMixin): def __init__(self, n=None, timeout=None, default_callback=None, catch_exception=True, *args, **kwargs): n = n or kwargs.pop("max_workers", None) if PY2 and n is None: n = (self._get_cpu_count() or 1) * 5 super(Pool, self).__init__(n, *args, **kwargs) self._timeout = timeout self.default_callback = default_callback self._all_futures = WeakSet() self.catch_exception = catch_exception @property def all_tasks(self): return self._all_futures def submit(self, func, *args, **kwargs): with self._shutdown_lock: if self._shutdown: raise RuntimeError("cannot schedule new futures after shutdown") callback = kwargs.pop("callback", self.default_callback) future = NewFuture( self._timeout, args, kwargs, callback=callback, catch_exception=self.catch_exception, ) w = _WorkItem(future, func, args, kwargs) self._work_queue.put(w) self._adjust_thread_count() self._all_futures.add(future) return future class ProcessPool(ProcessPoolExecutor, NewExecutorPoolMixin): def __init__(self, n=None, timeout=None, default_callback=None, catch_exception=True, *args, **kwargs): n = n or kwargs.pop("max_workers", None) if PY2 and n is None: n = self._get_cpu_count() or 1 super(ProcessPool, self).__init__(n, *args, **kwargs) self._timeout = timeout self.default_callback = default_callback self._all_futures = WeakSet() self.catch_exception = catch_exception def submit(self, func, *args, **kwargs): with self._shutdown_lock: if PY3 and self._broken: raise BrokenProcessPool( "A child process terminated " "abruptly, the process pool is not usable anymore") if self._shutdown_thread: raise RuntimeError("cannot schedule new futures after shutdown") callback = kwargs.pop("callback", self.default_callback) future = NewFuture( self._timeout, args, kwargs, callback=callback, catch_exception=self.catch_exception, ) w = _WorkItem(future, func, args, kwargs) self._pending_work_items[self._queue_count] = w self._work_ids.put(self._queue_count) self._queue_count += 1 self._result_queue.put(None) self._start_queue_management_thread() if PY2: self._adjust_process_count() self._all_futures.add(future) return future def async_func(self, *args): raise NotImplementedError class NewFuture(Future): if PY3: from ._py3_patch import _new_future_await __await__ = _new_future_await def __init__(self, timeout=None, args=None, kwargs=None, callback=None, catch_exception=True): super(NewFuture, self).__init__() self._timeout = timeout self._args = args or () self._kwargs = kwargs or {} self._callback_result = None self.catch_exception = catch_exception self.task_start_time = time_time() self.task_end_time = 0 self.task_cost_time = 0 self._user_callbacks = set() if callback: if not isinstance(callback, (list, tuple)): callback = [callback] for fn in callback: self.add_done_callback(fn) self._user_callbacks.add(fn) def __getattr__(self, name): return getattr(self.x, name) def _invoke_callbacks(self): self.task_end_time = time_time() self.task_cost_time = self.task_end_time - self.task_start_time with self._condition: for callback in self._done_callbacks: try: result = callback(self) if callback in self._user_callbacks: self._callback_result = result except Exception as e: logger.error("exception calling callback for %s" % e) self._condition.notify_all() @property def _callbacks(self): return self._done_callbacks @property def cx(self): return self.callback_result @property def callback_result(self): if self._state in [PENDING, RUNNING]: self.x if self._user_callbacks: return self._callback_result else: return self.x @property def x(self): with self._condition: result = None if not self.done(): self._condition.wait(self._timeout) if not self.done(): self.set_exception(TimeoutError()) if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: result = CancelledError() elif self._state == FINISHED: if self._exception: result = self._exception else: result = self._result if isinstance(result, Exception): if self.catch_exception: result = FailureException(result) return result else: raise result return result def Async(f, n=None, timeout=None): return threads(n=n, timeout=timeout)(f) def threads(n=None, timeout=None): return Pool(n, timeout).async_func def get_results_generator(future_list, timeout=None, sort_by_completed=False): try: if sort_by_completed: for future in as_completed(future_list, timeout=timeout): yield future.x else: for future in future_list: yield future.x except TimeoutError: return def run_after_async(seconds, func, *args, **kwargs): t = Timer(seconds, func, args, kwargs) t.daemon = True t.start() return t class FailedRequest(PreparedRequest): allow_keys = { "method", "url", "headers", "files", "data", "params", "auth", "cookies", "hooks", "json", } def __init__(self, **kwargs): self.kwargs = kwargs filted_kwargs = { key: value for key, value in kwargs.items() if key in self.allow_keys } super(FailedRequest, self).__init__() self.prepare(**filted_kwargs) class tPool(object): def __init__( self, n=None, interval=0, timeout=None, session=None, catch_exception=True, default_callback=None, retry_exceptions=(RequestException, Error), ): self.pool = Pool(n, timeout) self.session = session if session else Session() self.n = n or 10 custom_adapter = HTTPAdapter(pool_connections=self.n, pool_maxsize=self.n) self.session.mount("http://", custom_adapter) self.session.mount("https://", custom_adapter) self.interval = interval self.catch_exception = catch_exception self.default_callback = default_callback self.frequency = Frequency(self.n, self.interval) self.retry_exceptions = retry_exceptions @property def all_tasks(self): return self.pool._all_futures @property def x(self): return self.pool.x def close(self, wait=False): self.session.close() self.pool.shutdown(wait=wait) def __enter__(self): return self def __exit__(self, *args): self.close() def __del__(self): self.close() def _request(self, method, url, retry=0, response_validator=None, **kwargs): if not url: raise ValueError("url should not be null, but given: %s" % url) kwargs["url"] = url kwargs["method"] = method referer_info = kwargs.pop("referer_info", None) encoding = kwargs.pop("encoding", None) error = Exception() for _ in range(retry + 1): with self.frequency: try: resp = self.session.request(**kwargs) if encoding: resp.encoding = encoding logger.debug("%s done, %s" % (url, kwargs)) resp.referer_info = referer_info if response_validator and not response_validator(resp): raise ValidationError(response_validator.__name__) return resp except self.retry_exceptions as e: error = e logger.debug( "Retry %s for the %s time, Exception: %r . kwargs= %s" % (url, _ + 1, e, kwargs)) continue kwargs["retry"] = retry if referer_info: kwargs["referer_info"] = referer_info if encoding: kwargs["encoding"] = encoding logger.debug("Retry %s times failed again: %s." % (retry, error)) failure = FailureException(error) failure.request = FailedRequest(**kwargs) if self.catch_exception: return failure else: raise failure def request(self, method, url, callback=None, retry=0, response_validator=None, **kwargs): return self.pool.submit(self._request, method=method, url=url, retry=retry, response_validator=response_validator, callback=callback or self.default_callback, **kwargs) def get(self, url, params=None, callback=None, retry=0, response_validator=None, **kwargs): kwargs.setdefault("allow_redirects", True) return self.request("get", url=url, params=params, callback=callback, retry=retry, response_validator=response_validator, **kwargs)
MIT License
rob-blackbourn/bareasgi
examples/chunking.py
without_chunking
python
async def without_chunking(request: HttpRequest) -> HttpResponse: return HttpResponse( 200, [(b'content-type', b'text/plain')], non_chunking_writer(request.info['text']) )
A response handler which sends it's body as a single chunk without content length
https://github.com/rob-blackbourn/bareasgi/blob/5966ab6b54bc99420b06fb90cff05d15447e98b7/examples/chunking.py#L61-L67
import logging from typing import AsyncIterable from bareasgi import ( Application, HttpRequest, HttpResponse ) logging.basicConfig(level=logging.DEBUG) async def non_chunking_writer( text: str, encoding: str = 'utf-8' ) -> AsyncIterable[bytes]: yield text.encode(encoding=encoding) async def chunking_writer( text: str, encoding: str = 'utf-8', bufsiz: int = 512 ) -> AsyncIterable[bytes]: start, end = 0, bufsiz while start < len(text): yield text[start:end].encode(encoding=encoding) start, end = end, end + bufsiz async def with_chunking(request: HttpRequest) -> HttpResponse: return HttpResponse( 200, [(b'content-type', b'text/plain')], chunking_writer(request.info['text'], bufsiz=64) )
Apache License 2.0
gesturegeneration/speech_driven_gesture_generation_with_autoencoder
motion_repr_learning/ae/DAE.py
DAE.run_less_layers
python
def run_less_layers(self, input_pl, n, is_target=False): assert n > 0 assert n <= self.num_hidden_layers last_output = input_pl for i in range(n - 1): w = self._w(i + 1, "_pretrained") b = self._b(i + 1, "_pretrained") last_output = self._feedforward(last_output, w, b) if is_target: return last_output last_output = self._feedforward(last_output, self._w(n), self._b(n)) out = self._feedforward(last_output, self._w(n), self["bias" + str(n) + "_out"]) return out
Return result of a net after n layers or n-1 layer (if is_target is true) This function will be used for the layer-wise pretraining of the AE Args: input_pl: TensorFlow placeholder of AE inputs n: int specifying pretrain step is_target: bool specifying if required tensor should be the target tensor meaning if we should run n layers or n-1 (if is_target) Returns: Tensor giving pretraining net result or pretraining target
https://github.com/gesturegeneration/speech_driven_gesture_generation_with_autoencoder/blob/59e8ebdb0f6e87b0e81268046c99a4d6c9bf62a8/motion_repr_learning/ae/DAE.py#L310-L340
from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np from utils.utils import add_noise, loss_reconstruction from utils.flags import FLAGS class DAE: def __init__(self, shape, sess, variance_coef, data_info): self.__shape = shape self.__variables = {} self.__sess = sess self.num_hidden_layers = np.size(shape) - 2 self.batch_size = FLAGS.batch_size self.sequence_length = FLAGS.chunk_length self.scaling_factor = 1 self.max_val = data_info.max_val self.mean_pose = data_info.mean_pose self._train_data_initializer = tf.placeholder(dtype=tf.float32, shape=data_info.train_shape) self._train_data = tf.Variable(self._train_data_initializer, trainable=False, collections=[], name='Train_data') train_epochs = FLAGS.training_epochs + FLAGS.pretraining_epochs * FLAGS.num_hidden_layers train_frames = tf.train.slice_input_producer([self._train_data], num_epochs=train_epochs) self._train_batch = tf.train.shuffle_batch(train_frames, batch_size=FLAGS.batch_size, capacity=5000, min_after_dequeue=1000, name='Train_batch') self._valid_data_initializer = tf.placeholder(dtype=tf.float32, shape=data_info.eval_shape) self._valid_data = tf.Variable(self._valid_data_initializer, trainable=False, collections=[], name='Valid_data') valid_frames = tf.train.slice_input_producer([self._valid_data], num_epochs=FLAGS.training_epochs) self._valid_batch = tf.train.shuffle_batch(valid_frames, batch_size=FLAGS.batch_size, capacity=5000, min_after_dequeue=1000, name='Valid_batch') if FLAGS.weight_decay is not None: print('\nWe apply weight decay') with sess.graph.as_default(): with tf.variable_scope("AE_Variables"): for i in range(self.num_hidden_layers + 1): self._create_variables(i, FLAGS.weight_decay) ''' 1 - Setup network for TRAINing ''' self._input_ = add_noise(self._train_batch, variance_coef, data_info.data_sigma) self._target_ = self._train_batch self._output, _, _ = self.construct_graph(self._input_, FLAGS.dropout) self._reconstruction_loss = loss_reconstruction(self._output, self._target_, self.max_val) tf.add_to_collection('losses', self._reconstruction_loss) self._loss = tf.add_n(tf.get_collection('losses'), name='total_loss') ''' 2 - Setup network for TESTing ''' self._valid_input_ = self._valid_batch self._valid_target_ = self._valid_batch self._valid_output, self._encode, self._decode = self.construct_graph(self._valid_input_, 1) self._valid_loss = loss_reconstruction(self._valid_output, self._valid_target_, self.max_val) @property def session(self): return self.__sess @property def shape(self): return self.__shape def _w(self, n, suffix=""): return self["matrix"+str(n)+suffix] def _b(self, n, suffix=""): return self["bias"+str(n)+suffix] @staticmethod def _feedforward(x, w, b): y = tf.tanh(tf.nn.bias_add(tf.matmul(x, w), b)) return y def construct_graph(self, input_seq_pl, dropout): network_input = input_seq_pl curr_layer = tf.reshape(network_input, [self.batch_size, FLAGS.chunk_length * FLAGS.frame_size]) numb_layers = self.num_hidden_layers + 1 with tf.name_scope("Joint_run"): for i in range(numb_layers): if i == FLAGS.middle_layer: with tf.name_scope('middle_layer'): middle_layer = tf.identity(curr_layer) with tf.name_scope('hidden'+str(i)): curr_layer = tf.nn.dropout(curr_layer, dropout) w = self._w(i + 1) b = self._b(i + 1) curr_layer = self._feedforward(curr_layer, w, b) output = curr_layer with tf.name_scope("Decoding"): layer = self._representation = tf.placeholder (dtype=tf.float32, shape=middle_layer.get_shape().as_list(), name="Respres.") for i in range(FLAGS.middle_layer, numb_layers): with tf.name_scope('hidden' + str(i)): layer = tf.nn.dropout(layer, dropout) w = self._w(i + 1) b = self._b(i + 1) layer = self._feedforward(layer, w, b) decoding = layer return output, middle_layer, decoding def __getitem__(self, item): return self.__variables[item] def __setitem__(self, key, value): self.__variables[key] = value def _create_variables(self, i, wd): w_shape = (self.__shape[i], self.__shape[i + 1]) a = tf.multiply(2.0, tf.sqrt(6.0 / (w_shape[0] + w_shape[1]))) name_w = "matrix"+str(i + 1) self[name_w] = tf.get_variable("Variables/"+name_w, initializer=tf.random_uniform(w_shape, -1 * a, a)) if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(self[name_w]), wd, name='wgt_'+str(i)+'_loss') tf.add_to_collection('losses', weight_decay) tf.summary.histogram(name_w, self[name_w]) name_b = "bias"+str(i + 1) b_shape = (self.__shape[i + 1],) self[name_b] = tf.get_variable("Variables/"+name_b, initializer=tf.zeros(b_shape)) if i < self.num_hidden_layers: self[name_w + "_pretr"] = tf.get_variable(name="Var/" + name_w + "_pretr", initializer= tf.random_uniform(w_shape, -1 * a, a), trainable=False) self[name_b + "_pretr"] = tf.get_variable("Var/"+name_b+"_pretr", trainable=False, initializer=tf.zeros(b_shape)) name_b_out = "bias" + str(i+1) + "_out" b_shape = (self.__shape[i],) b_init = tf.zeros(b_shape) self[name_b_out] = tf.get_variable(name="Var/"+name_b_out, initializer=b_init, trainable=True)
Apache License 2.0
jaymon/captain
captain/reflection.py
ReflectCommand.parseargs
python
def parseargs(self): for pa in self.method().parseargs(): yield pa
yield all the ParseArg instances of the arguments defined for this command
https://github.com/jaymon/captain/blob/12fe62189c53d810ce8ff308c164967048a3d309/captain/reflection.py#L52-L55
from __future__ import unicode_literals, division, print_function, absolute_import import re import inspect import types import argparse from .compat import * class ReflectCommand(object): @property def desc(self): def get_desc(o): desc = "" comment_regex = re.compile(r"^\s*#\s*", flags=re.M) desc = inspect.getdoc(o) if not desc: desc = inspect.getcomments(o) if desc: desc = comment_regex.sub("", desc).strip() desc = re.sub(r"^(?:\s*#\s*)?\-\*\-.*", "", desc, flags=re.M).strip() return desc desc = get_desc(self.command_class.handle) if not desc: desc = get_desc(self.command_class) if not desc: desc = get_desc(self.command_class.module) if not desc: desc = "" return desc def __init__(self, command): self.command_class = command if inspect.isclass(command) else command.__class__ def method(self, method_name="handle"): return ReflectMethod(self.command_class.handle)
MIT License
qiskit/qiskit-aqua
test/aqua/operators/test_cvar.py
TestCVaRMeasurement.expected_cvar
python
def expected_cvar(self, statevector, operator, alpha): probabilities = statevector * np.conj(statevector) num_bits = int(np.log2(len(statevector))) energies = [] for i, _ in enumerate(probabilities): basis_state = np.binary_repr(i, num_bits) energies += [operator.eval(basis_state).eval(basis_state)] i_sorted = np.argsort(energies) energies = [energies[i] for i in i_sorted] probabilities = [probabilities[i] for i in i_sorted] result = 0 accumulated_probabilities = 0 for energy, probability in zip(energies, probabilities): accumulated_probabilities += probability if accumulated_probabilities <= alpha: result += probability * energy else: result += (alpha - accumulated_probabilities + probability) * energy break return result / alpha
Compute the expected CVaR expected value.
https://github.com/qiskit/qiskit-aqua/blob/5ccf0e20129880e78a57f2f78c59b9a362ebb208/test/aqua/operators/test_cvar.py#L31-L59
from test.aqua import QiskitAquaTestCase import numpy as np from ddt import ddt, data from qiskit import QuantumCircuit from qiskit.aqua import AquaError from qiskit.aqua.operators import ( CVaRMeasurement, StateFn, Z, I, X, Y, Plus, PauliExpectation, MatrixExpectation, CVaRExpectation, ListOp, CircuitOp, AerPauliExpectation, MatrixOp ) class TestCVaRMeasurement(QiskitAquaTestCase):
Apache License 2.0
castagnait/plugin.video.netflix
resources/lib/common/misc_utils.py
is_numeric
python
def is_numeric(string): try: int(string) except ValueError: return False return True
Return true if string represents an integer, else false
https://github.com/castagnait/plugin.video.netflix/blob/1c68c7d4c399603a5dcbeef1e7637de7a9036a72/resources/lib/common/misc_utils.py#L72-L78
import operator from urllib.parse import quote, urlencode from resources.lib.globals import G def find(value_to_find, attribute, search_space): for video in search_space: if video[attribute] == value_to_find: return video raise KeyError(f'Metadata for {value_to_find} does not exist') def find_episode_metadata(episode_videoid, metadata): season = find(int(episode_videoid.seasonid), 'id', metadata['seasons']) episode = find(int(episode_videoid.episodeid), 'id', season.get('episodes', {})) return episode, season def get_class_methods(class_item=None): from types import FunctionType _type = FunctionType return [x for x, y in class_item.__dict__.items() if isinstance(y, _type)] def build_url(pathitems=None, videoid=None, params=None, mode=None): if not (pathitems or videoid): raise ValueError('Either pathitems or videoid must be set.') path = f'{G.BASE_URL}/{_encode_path(mode, pathitems, videoid)}/{_encode_params(params)}' return path def _expand_mode(mode): return [mode] if mode else [] def _expand_videoid(videoid): return videoid.to_path() if videoid else [] def _encode_path(mode, pathitems, videoid): return quote( '/'.join(_expand_mode(mode) + (pathitems or []) + _expand_videoid(videoid)).encode('utf-8')) def _encode_params(params): return f'?{urlencode(params)}' if params else ''
MIT License
rapidsai/cuml
python/cuml/metrics/_ranking.py
_binary_roc_auc_score
python
def _binary_roc_auc_score(y_true, y_score): if cp.unique(y_true).shape[0] == 1: raise ValueError("roc_auc_score cannot be used when " "only one class present in y_true. ROC AUC score " "is not defined in that case.") if cp.unique(y_score).shape[0] == 1: return 0.5 fps, tps, thresholds = _binary_clf_curve(y_true, y_score) tpr = tps/tps[-1] fpr = fps/fps[-1] return _calculate_area_under_curve(fpr, tpr).item()
Compute binary roc_auc_score using cupy
https://github.com/rapidsai/cuml/blob/91abe6747ea61a5b59526f76568ea14d52814454/python/cuml/metrics/_ranking.py#L192-L207
import typing import cupy as cp import numpy as np import cuml.internals from cuml.common.array import CumlArray from cuml.common.input_utils import input_to_cupy_array import math @cuml.internals.api_return_generic(get_output_type=True) def precision_recall_curve( y_true, probs_pred) -> typing.Tuple[CumlArray, CumlArray, CumlArray]: y_true, n_rows, n_cols, ytype = input_to_cupy_array(y_true, check_dtype=[np.int32, np.int64, np.float32, np.float64]) y_score, _, _, _ = input_to_cupy_array(probs_pred, check_dtype=[np.int32, np.int64, np.float32, np.float64], check_rows=n_rows, check_cols=n_cols) if cp.any(y_true) == 0: raise ValueError("precision_recall_curve cannot be used when " "y_true is all zero.") fps, tps, thresholds = _binary_clf_curve(y_true, y_score) precision = cp.flip(tps/(tps+fps), axis=0) recall = cp.flip(tps/tps[-1], axis=0) n = (recall == 1).sum() if n > 1: precision = precision[n-1:] recall = recall[n-1:] thresholds = thresholds[n-1:] precision = cp.concatenate([precision, cp.ones(1)]) recall = cp.concatenate([recall, cp.zeros(1)]) return precision, recall, thresholds @cuml.internals.api_return_any() def roc_auc_score(y_true, y_score): y_true, n_rows, n_cols, ytype = input_to_cupy_array(y_true, check_dtype=[np.int32, np.int64, np.float32, np.float64]) y_score, _, _, _ = input_to_cupy_array(y_score, check_dtype=[np.int32, np.int64, np.float32, np.float64], check_rows=n_rows, check_cols=n_cols) return _binary_roc_auc_score(y_true, y_score) def _binary_clf_curve(y_true, y_score): if y_true.dtype.kind == 'f' and np.any(y_true != y_true.astype(int)): raise ValueError("Continuous format of y_true " "is not supported.") ids = cp.argsort(-y_score) sorted_score = y_score[ids] ones = y_true[ids].astype('float32') zeros = 1 - ones group = _group_same_scores(sorted_score) num = int(group[-1]) tps = cp.zeros(num, dtype='float32') fps = cp.zeros(num, dtype='float32') tps = _addup_x_in_group(group, ones, tps) fps = _addup_x_in_group(group, zeros, fps) tps = cp.cumsum(tps) fps = cp.cumsum(fps) thresholds = cp.unique(y_score) return fps, tps, thresholds
Apache License 2.0
adamcharnock/seed
seed/vcs/git.py
GitVcs.parse_log_messages
python
def parse_log_messages(self, text): regex = r"commit ([0-9a-f]+)\nAuthor: (.*?)\n\n(.*?)(?:\n\n|$)" messages = re.findall(regex, text, re.DOTALL) parsed = [] for commit, author, message in messages: parsed.append(( commit[:10], re.sub(r"\s*<.*?>", "", author), message.strip() )) return parsed
Will parse git log messages in the 'short' format
https://github.com/adamcharnock/seed/blob/48232a2497bd94c5e1466a5b33a929f253ab5112/seed/vcs/git.py#L44-L56
import re from pipes import quote from seed.vcs import BaseVcs from seed.utilities import run_command from seed.exceptions import ShellCommandError class GitVcs(BaseVcs): name = "git" def get_suitability(self): try: run_command("git status") except ShellCommandError: return 0 return 1 def get_changes(self, since_version): log_range = "%s..HEAD" % self.make_tag_name(since_version) commits = run_command("git log --pretty=short %s" % quote(log_range)) return self.parse_log_messages(commits) def commit(self, message, files): quoted_files = " ".join(map(quote, files)) run_command("git add %s" % quoted_files) run_command("git commit -m %s %s" % (quote(message), quoted_files)) def tag(self, version): name = self.make_tag_name(version) run_command("git tag %s" % quote(name)) def push(self): run_command("git push") run_command("git push --tags") def add(self, file_path): run_command("git add %s" % quote(file_path)) def get_download_url(self, version): return None
MIT License
hopeit-git/hopeit.engine
engine/src/hopeit/server/web.py
_handle_multipart_invocation
python
async def _handle_multipart_invocation( app_engine: AppEngine, impl: AppEngine, event_name: str, datatype: Optional[Type[DataObject]], auth_types: List[AuthType], request: web.Request) -> ResponseType: context = None try: event_settings = get_event_settings(app_engine.settings, event_name) context = _request_start(app_engine, impl, event_name, event_settings, request) query_args = dict(request.query) _validate_authorization(app_engine.app_config, context, auth_types, request) hook = PreprocessHook( headers=request.headers, multipart_reader=await request.multipart() ) return await _request_execute( impl, event_name, context, query_args, payload=None, preprocess_hook=hook ) except Unauthorized as e: return _ignored_response(context, 401, e) except BadRequest as e: return _ignored_response(context, 400, e) except Exception as e: return _failed_response(context, e)
Handler to execute POST calls
https://github.com/hopeit-git/hopeit.engine/blob/79137e483a9577f336524f7e3b22a26a8d5a7ea3/engine/src/hopeit/server/web.py#L621-L650
import aiohttp setattr(aiohttp.http, 'SERVER_SOFTWARE', '') import argparse import asyncio import gc import logging import re import sys import uuid from datetime import datetime, timezone from functools import partial from typing import ( Any, Callable, Coroutine, Dict, List, Optional, Tuple, Type, Union ) import aiohttp_cors import aiojobs import aiojobs.aiohttp as aiojobs_http from aiohttp import web from aiohttp.web_response import Response from aiohttp_cors import CorsConfig from aiojobs import Scheduler from stringcase import snakecase, titlecase from hopeit.app.config import ( AppConfig, EventDescriptor, EventPlugMode, EventSettings, EventType, parse_app_config_json ) from hopeit.app.context import ( EventContext, NoopMultiparReader, PostprocessHook, PreprocessHook ) from hopeit.app.errors import BadRequest, Unauthorized from hopeit.dataobjects import DataObject, EventPayload, EventPayloadType from hopeit.dataobjects.payload import Payload from hopeit.server import api, runtime from hopeit.server.api import app_route_name from hopeit.server.config import ( AuthType, ServerConfig, parse_server_config_json ) from hopeit.server.engine import AppEngine, Server from hopeit.server.errors import ErrorInfo from hopeit.server.events import get_event_settings from hopeit.server.logger import ( EngineLoggerWrapper, combined, engine_logger, extra_logger ) from hopeit.server.metrics import metrics from hopeit.server.names import route_name from hopeit.server.steps import find_datatype_handler from hopeit.toolkit import auth __all__ = ['parse_args', 'main', 'start_server', 'start_app', 'stop_server'] logger: EngineLoggerWrapper = logging.getLogger(__name__) extra = extra_logger() ResponseType = Union[web.Response, web.FileResponse] web_server = web.Application() aiojobs_http.setup(web_server) auth_info_default = {} def main(host: Optional[str], port: Optional[int], path: Optional[str], start_streams: bool, config_files: List[str], api_file: Optional[str]): loop = asyncio.get_event_loop() scheduler = loop.run_until_complete(aiojobs.create_scheduler()) logger.info("Loading engine config file=%s...", config_files[0]) server_config = _load_engine_config(config_files[0]) loop.run_until_complete(start_server(server_config)) if server_config.auth.domain: auth_info_default['domain'] = server_config.auth.domain if api_file is not None: api.load_api_file(api_file) api.register_server_config(server_config) apps_config = [] for config_file in config_files[1:]: logger.info(__name__, f"Loading app config file={config_file}...") config = _load_app_config(config_file) config.server = server_config apps_config.append(config) api.register_apps(apps_config) api.enable_swagger(server_config, web_server) for config in apps_config: loop.run_until_complete(start_app(config, scheduler, start_streams)) logger.debug(__name__, "Performing forced garbage collection...") gc.collect() web.run_app(web_server, path=path, port=port, host=host) def init_logger(): global logger logger = engine_logger() async def start_server(config: ServerConfig): await runtime.server.start(config=config) init_logger() async def stop_server(): global web_server await runtime.server.stop() await web_server.shutdown() runtime.server = Server() web_server = web.Application() async def start_app(config: AppConfig, scheduler: Scheduler, start_streams: bool = False): app_engine = await runtime.server.start_app(app_config=config) cors_origin = aiohttp_cors.setup(web_server, defaults={ config.engine.cors_origin: aiohttp_cors.ResourceOptions( allow_credentials=True, expose_headers="*", allow_headers="*", ) }) if config.engine.cors_origin else None _setup_app_event_routes(app_engine) for plugin in config.plugins: plugin_engine = runtime.server.app_engine(app_key=plugin.app_key()) _setup_app_event_routes(app_engine, plugin_engine) if cors_origin: app = app_engine.app_config.app _enable_cors(route_name('api', app.name, app.version), cors_origin) if start_streams: await _start_streams(app_engine, scheduler) def _effective_events(app_engine: AppEngine, plugin: Optional[AppEngine] = None): if plugin is None: return { k: v for k, v in app_engine.effective_events.items() if v.plug_mode == EventPlugMode.STANDALONE } return { k: v for k, v in plugin.effective_events.items() if v.plug_mode == EventPlugMode.ON_APP } def _load_engine_config(path: str): with open(path, encoding="utf-8") as f: return parse_server_config_json(f.read()) def _load_app_config(path: str) -> AppConfig: with open(path, encoding="utf-8") as f: return parse_app_config_json(f.read()) def _enable_cors(prefix: str, cors: CorsConfig): for route in web_server.router.routes(): if route.resource and route.resource.canonical.startswith(prefix): cors.add(route) def _setup_app_event_routes(app_engine: AppEngine, plugin: Optional[AppEngine] = None): for event_name, event_info in _effective_events(app_engine, plugin).items(): if event_info.type == EventType.POST: web_server.add_routes([ _create_post_event_route( app_engine, plugin=plugin, event_name=event_name, override_route_name=event_info.route ) ]) elif event_info.type == EventType.GET: web_server.add_routes([ _create_get_event_route( app_engine, plugin=plugin, event_name=event_name, override_route_name=event_info.route ) ]) elif event_info.type == EventType.MULTIPART: web_server.add_routes([ _create_multipart_event_route( app_engine, plugin=plugin, event_name=event_name, override_route_name=event_info.route ) ]) elif event_info.type == EventType.STREAM and plugin is None: web_server.add_routes( _create_event_management_routes( app_engine, event_name=event_name, event_info=event_info ) ) elif event_info.type == EventType.SERVICE and plugin is None: web_server.add_routes( _create_event_management_routes( app_engine, event_name=event_name, event_info=event_info ) ) else: raise ValueError(f"Invalid event_type:{event_info.type} for event:{event_name}") def _auth_types(app_engine: AppEngine, event_name: str): assert app_engine.app_config.server event_info = app_engine.app_config.events[event_name] if event_info.auth: return event_info.auth return app_engine.app_config.server.auth.default_auth_methods def _create_post_event_route( app_engine: AppEngine, *, plugin: Optional[AppEngine] = None, event_name: str, override_route_name: Optional[str]) -> web.RouteDef: datatype = find_datatype_handler(app_config=app_engine.app_config, event_name=event_name) route = app_route_name(app_engine.app_config.app, event_name=event_name, plugin=None if plugin is None else plugin.app_config.app, override_route_name=override_route_name) logger.info(__name__, f"POST path={route} input={str(datatype)}") impl = plugin if plugin else app_engine handler = partial(_handle_post_invocation, app_engine, impl, event_name, datatype, _auth_types(impl, event_name)) setattr(handler, '__closure__', None) setattr(handler, '__code__', _handle_post_invocation.__code__) api_handler = api.add_route('post', route, handler) return web.post(route, api_handler) def _create_get_event_route( app_engine: AppEngine, *, plugin: Optional[AppEngine] = None, event_name: str, override_route_name: Optional[str]) -> web.RouteDef: route = app_route_name(app_engine.app_config.app, event_name=event_name, plugin=None if plugin is None else plugin.app_config.app, override_route_name=override_route_name) logger.info(__name__, f"GET path={route}") impl = plugin if plugin else app_engine handler = partial(_handle_get_invocation, app_engine, impl, event_name, _auth_types(impl, event_name)) setattr(handler, '__closure__', None) setattr(handler, '__code__', _handle_post_invocation.__code__) api_handler = api.add_route('get', route, handler) return web.get(route, api_handler) def _create_multipart_event_route( app_engine: AppEngine, *, plugin: Optional[AppEngine] = None, event_name: str, override_route_name: Optional[str]) -> web.RouteDef: datatype = find_datatype_handler(app_config=app_engine.app_config, event_name=event_name) route = app_route_name(app_engine.app_config.app, event_name=event_name, plugin=None if plugin is None else plugin.app_config.app, override_route_name=override_route_name) logger.info(__name__, f"MULTIPART path={route} input={str(datatype)}") impl = plugin if plugin else app_engine handler = partial(_handle_multipart_invocation, app_engine, impl, event_name, datatype, _auth_types(impl, event_name)) setattr(handler, '__closure__', None) setattr(handler, '__code__', _handle_multipart_invocation.__code__) api_handler = api.add_route('post', route, handler) return web.post(route, api_handler) def _create_event_management_routes( app_engine: AppEngine, *, event_name: str, event_info: EventDescriptor) -> List[web.RouteDef]: evt = event_name.replace('.', '/').replace('$', '/') base_route = app_route_name(app_engine.app_config.app, event_name=evt, prefix='mgmt', override_route_name=event_info.route) logger.info(__name__, f"{event_info.type.value.upper()} path={base_route}/[start|stop]") handler: Optional[partial[Coroutine[Any, Any, Response]]] = None if event_info.type == EventType.STREAM: handler = partial(_handle_stream_start_invocation, app_engine, event_name) elif event_info.type == EventType.SERVICE: handler = partial(_handle_service_start_invocation, app_engine, event_name) assert handler is not None, f"No handler for event={event_name} type={event_info.type}" return [ web.get(base_route + '/start', handler), web.get( base_route + '/stop', partial(_handle_event_stop_invocation, app_engine, event_name) ) ] def _response(*, track_ids: Dict[str, str], key: str, payload: EventPayload, hook: PostprocessHook) -> ResponseType: response: ResponseType headers = { **hook.headers, **{f"X-{re.sub(' ', '-', titlecase(k))}": v for k, v in track_ids.items()} } if hook.file_response is not None: response = web.FileResponse( path=hook.file_response, headers={'Content-Type': hook.content_type, **headers} ) else: serializer: Callable[..., str] = CONTENT_TYPE_BODY_SER.get( hook.content_type, _text_response ) body = serializer(payload, key=key) response = web.Response( body=body, headers=headers, content_type=hook.content_type ) for name, cookie in hook.cookies.items(): value, args, kwargs = cookie response.set_cookie(name, value, *args, **kwargs) for name, args, kwargs in hook.del_cookies: response.del_cookie(name, *args, **kwargs) if hook.status: response.set_status(hook.status) return response def _response_info(response: ResponseType): return extra(prefix='response.', status=str(response.status)) def _track_ids(request: web.Request) -> Dict[str, str]: return { 'track.operation_id': str(uuid.uuid4()), 'track.request_id': str(uuid.uuid4()), 'track.request_ts': datetime.now().astimezone(timezone.utc).isoformat(), **{ "track." + snakecase(k[8:].lower()): v for k, v in request.headers.items() if k.lower().startswith('x-track-') } } def _failed_response(context: Optional[EventContext], e: Exception) -> web.Response: if context: logger.error(context, e) logger.failed(context) else: logger.error(__name__, e) info = ErrorInfo.from_exception(e) return web.Response( status=500, body=Payload.to_json(info) ) def _ignored_response(context: Optional[EventContext], status: int, e: BaseException) -> web.Response: if context: logger.error(context, e) logger.ignored(context) else: logger.error(__name__, e) info = ErrorInfo.from_exception(e) return web.Response( status=status, body=Payload.to_json(info) ) def _request_start(app_engine: AppEngine, plugin: AppEngine, event_name: str, event_settings: EventSettings, request: web.Request) -> EventContext: context = EventContext( app_config=app_engine.app_config, plugin_config=plugin.app_config, event_name=event_name, settings=event_settings, track_ids=_track_ids(request), auth_info=auth_info_default ) logger.start(context) return context def _extract_auth_header(request: web.Request, context: EventContext) -> Optional[str]: return request.headers.get("Authorization") def _extract_refresh_cookie(request: web.Request, context: EventContext) -> Optional[str]: return request.cookies.get(f"{context.app_key}.refresh") def _ignore_auth(request: web.Request, context: EventContext) -> str: return 'Unsecured -' AUTH_HEADER_EXTRACTORS = { AuthType.BASIC: _extract_auth_header, AuthType.BEARER: _extract_auth_header, AuthType.REFRESH: _extract_refresh_cookie, AuthType.UNSECURED: _ignore_auth } def _extract_authorization(auth_methods: List[AuthType], request: web.Request, context: EventContext): for auth_type in auth_methods: auth_header = AUTH_HEADER_EXTRACTORS[auth_type](request, context) if auth_header is not None: return auth_header return 'Unsecured -' def _validate_authorization(app_config: AppConfig, context: EventContext, auth_types: List[AuthType], request: web.Request): auth_methods = context.event_info.auth if (len(auth_methods) == 0) and (app_config.server is not None): auth_methods = app_config.server.auth.default_auth_methods auth_header = _extract_authorization(auth_methods, request, context) try: method, data = auth_header.split(" ") except ValueError as e: raise BadRequest("Malformed Authorization") from e context.auth_info['allowed'] = False for auth_type in auth_types: if method.upper() == auth_type.name.upper(): auth.validate_auth_method(auth_type, data, context) if context.auth_info.get('allowed'): return None raise Unauthorized(method) def _application_json_response(result: DataObject, key: str, *args, **kwargs) -> str: return Payload.to_json(result, key=key) def _text_response(result: str, *args, **kwargs) -> str: return str(result) CONTENT_TYPE_BODY_SER: Dict[str, Callable[..., str]] = { 'application/json': _application_json_response, 'text/html': _text_response, 'text/plain': _text_response } async def _request_execute( app_engine: AppEngine, event_name: str, context: EventContext, query_args: Dict[str, Any], payload: Optional[EventPayloadType], preprocess_hook: PreprocessHook) -> ResponseType: response_hook = PostprocessHook() result = await app_engine.preprocess( context=context, query_args=query_args, payload=payload, request=preprocess_hook) if (preprocess_hook.status is None) or (preprocess_hook.status == 200): result = await app_engine.execute(context=context, query_args=query_args, payload=result) result = await app_engine.postprocess(context=context, payload=result, response=response_hook) else: response_hook.set_status(preprocess_hook.status) response = _response( track_ids=context.track_ids, key=event_name, payload=result, hook=response_hook ) logger.done(context, extra=combined( _response_info(response), metrics(context) )) return response async def _request_process_payload( context: EventContext, datatype: Optional[Type[EventPayloadType]], request: web.Request) -> Optional[EventPayloadType]: try: payload_raw = await request.read() if (payload_raw is None) or (payload_raw == b''): return None payload = Payload.from_json(payload_raw, datatype) if datatype else payload_raw.decode() return payload except ValueError as e: logger.error(context, e) raise BadRequest(e) from e async def _handle_post_invocation( app_engine: AppEngine, impl: AppEngine, event_name: str, datatype: Optional[Type[DataObject]], auth_types: List[AuthType], request: web.Request) -> ResponseType: context = None try: event_settings = get_event_settings(app_engine.settings, event_name) context = _request_start(app_engine, impl, event_name, event_settings, request) query_args = dict(request.query) _validate_authorization(app_engine.app_config, context, auth_types, request) payload = await _request_process_payload(context, datatype, request) hook: PreprocessHook[NoopMultiparReader] = PreprocessHook(headers=request.headers) return await _request_execute( impl, event_name, context, query_args, payload, preprocess_hook=hook ) except Unauthorized as e: return _ignored_response(context, 401, e) except BadRequest as e: return _ignored_response(context, 400, e) except Exception as e: return _failed_response(context, e) async def _handle_get_invocation( app_engine: AppEngine, impl: AppEngine, event_name: str, auth_types: List[AuthType], request: web.Request) -> ResponseType: context = None try: event_settings = get_event_settings(app_engine.settings, event_name) context = _request_start(app_engine, impl, event_name, event_settings, request) _validate_authorization(app_engine.app_config, context, auth_types, request) query_args = dict(request.query) payload = query_args.get('payload') if payload is not None: del query_args['payload'] hook: PreprocessHook[NoopMultiparReader] = PreprocessHook(headers=request.headers) return await _request_execute( impl, event_name, context, query_args, payload=payload, preprocess_hook=hook ) except Unauthorized as e: return _ignored_response(context, 401, e) except BadRequest as e: return _ignored_response(context, 400, e) except Exception as e: return _failed_response(context, e)
Apache License 2.0
navneet-nmk/hierarchical-meta-reinforcement-learning
rlkit/rlkit/data_management/obs_dict_replay_buffer.py
postprocess_obs_dict
python
def postprocess_obs_dict(obs_dict): for obs_key, obs in obs_dict.items(): if 'image' in obs_key and obs is not None: obs_dict[obs_key] = normalize_image(obs) return obs_dict
Undo internal replay buffer representation changes: save images as bytes
https://github.com/navneet-nmk/hierarchical-meta-reinforcement-learning/blob/4ef92cebfa11760d9de55d54fc8e827271982a63/rlkit/rlkit/data_management/obs_dict_replay_buffer.py#L294-L301
import numpy as np from gym.spaces import Dict, Discrete from rlkit.data_management.replay_buffer import ReplayBuffer class ObsDictRelabelingBuffer(ReplayBuffer): def __init__( self, max_size, env, fraction_goals_rollout_goals=1.0, fraction_goals_env_goals=0.0, internal_keys=None, goal_keys=None, observation_key='observation', desired_goal_key='desired_goal', achieved_goal_key='achieved_goal', ): if internal_keys is None: internal_keys = [] self.internal_keys = internal_keys if goal_keys is None: goal_keys = [] if desired_goal_key not in goal_keys: goal_keys.append(desired_goal_key) self.goal_keys = goal_keys assert isinstance(env.observation_space, Dict) assert 0 <= fraction_goals_rollout_goals assert 0 <= fraction_goals_env_goals assert 0 <= fraction_goals_rollout_goals + fraction_goals_env_goals assert fraction_goals_rollout_goals + fraction_goals_env_goals <= 1 self.max_size = max_size self.env = env self.fraction_goals_rollout_goals = fraction_goals_rollout_goals self.fraction_goals_env_goals = fraction_goals_env_goals self.ob_keys_to_save = [ observation_key, desired_goal_key, achieved_goal_key, ] self.observation_key = observation_key self.desired_goal_key = desired_goal_key self.achieved_goal_key = achieved_goal_key if isinstance(self.env.action_space, Discrete): self._action_dim = env.action_space.n else: self._action_dim = env.action_space.low.size self._actions = np.zeros((max_size, self._action_dim)) self._terminals = np.zeros((max_size, 1), dtype='uint8') self._obs = {} self._next_obs = {} self.ob_spaces = self.env.observation_space.spaces for key in self.ob_keys_to_save + internal_keys: assert key in self.ob_spaces, "Key not found in the observation space: %s" % key type = np.float64 if key.startswith('image'): type = np.uint8 self._obs[key] = np.zeros( (max_size, self.ob_spaces[key].low.size), dtype=type) self._next_obs[key] = np.zeros( (max_size, self.ob_spaces[key].low.size), dtype=type) self._top = 0 self._size = 0 self._idx_to_future_obs_idx = [None] * max_size def add_sample(self, observation, action, reward, terminal, next_observation, **kwargs): raise NotImplementedError("Only use add_path") def terminate_episode(self): pass def num_steps_can_sample(self): return self._size def add_path(self, path): obs = path["observations"] actions = path["actions"] rewards = path["rewards"] next_obs = path["next_observations"] terminals = path["terminals"] path_len = len(rewards) actions = flatten_n(actions) if isinstance(self.env.action_space, Discrete): actions = np.eye(self._action_dim)[actions].reshape((-1, self._action_dim)) obs = flatten_dict(obs, self.ob_keys_to_save + self.internal_keys) next_obs = flatten_dict(next_obs, self.ob_keys_to_save + self.internal_keys) obs = preprocess_obs_dict(obs) next_obs = preprocess_obs_dict(next_obs) if self._top + path_len >= self.max_size: num_pre_wrap_steps = self.max_size - self._top pre_wrap_buffer_slice = np.s_[ self._top:self._top + num_pre_wrap_steps, : ] pre_wrap_path_slice = np.s_[0:num_pre_wrap_steps, :] num_post_wrap_steps = path_len - num_pre_wrap_steps post_wrap_buffer_slice = slice(0, num_post_wrap_steps) post_wrap_path_slice = slice(num_pre_wrap_steps, path_len) for buffer_slice, path_slice in [ (pre_wrap_buffer_slice, pre_wrap_path_slice), (post_wrap_buffer_slice, post_wrap_path_slice), ]: self._actions[buffer_slice] = actions[path_slice] self._terminals[buffer_slice] = terminals[path_slice] for key in self.ob_keys_to_save + self.internal_keys: self._obs[key][buffer_slice] = obs[key][path_slice] self._next_obs[key][buffer_slice] = next_obs[key][path_slice] for i in range(self._top, self.max_size): self._idx_to_future_obs_idx[i] = np.hstack(( np.arange(i, self.max_size), np.arange(0, num_post_wrap_steps) )) for i in range(0, num_post_wrap_steps): self._idx_to_future_obs_idx[i] = np.arange( i, num_post_wrap_steps, ) else: slc = np.s_[self._top:self._top + path_len, :] self._actions[slc] = actions self._terminals[slc] = terminals for key in self.ob_keys_to_save + self.internal_keys: self._obs[key][slc] = obs[key] self._next_obs[key][slc] = next_obs[key] for i in range(self._top, self._top + path_len): self._idx_to_future_obs_idx[i] = np.arange( i, self._top + path_len ) self._top = (self._top + path_len) % self.max_size self._size = min(self._size + path_len, self.max_size) def _sample_indices(self, batch_size): return np.random.randint(0, self._size, batch_size) def random_batch(self, batch_size): indices = self._sample_indices(batch_size) resampled_goals = self._next_obs[self.desired_goal_key][indices] num_env_goals = int(batch_size * self.fraction_goals_env_goals) num_rollout_goals = int(batch_size * self.fraction_goals_rollout_goals) num_future_goals = batch_size - (num_env_goals + num_rollout_goals) new_obs_dict = self._batch_obs_dict(indices) new_next_obs_dict = self._batch_next_obs_dict(indices) if num_env_goals > 0: env_goals = self.env.sample_goals(num_env_goals) env_goals = preprocess_obs_dict(env_goals) last_env_goal_idx = num_rollout_goals + num_env_goals resampled_goals[num_rollout_goals:last_env_goal_idx] = ( env_goals[self.desired_goal_key] ) for goal_key in self.goal_keys: new_obs_dict[goal_key][num_rollout_goals:last_env_goal_idx] = env_goals[goal_key] new_next_obs_dict[goal_key][ num_rollout_goals:last_env_goal_idx] = env_goals[goal_key] if num_future_goals > 0: future_obs_idxs = [] for i in indices[-num_future_goals:]: possible_future_obs_idxs = self._idx_to_future_obs_idx[i] num_options = len(possible_future_obs_idxs) next_obs_i = int(np.random.randint(0, num_options)) future_obs_idxs.append(possible_future_obs_idxs[next_obs_i]) future_obs_idxs = np.array(future_obs_idxs) resampled_goals[-num_future_goals:] = self._next_obs[ self.achieved_goal_key ][future_obs_idxs] for goal_key in self.goal_keys: new_obs_dict[goal_key][-num_future_goals:] = self._next_obs[goal_key][future_obs_idxs] new_next_obs_dict[goal_key][-num_future_goals:] = self._next_obs[goal_key][future_obs_idxs] new_obs_dict[self.desired_goal_key] = resampled_goals new_next_obs_dict[self.desired_goal_key] = resampled_goals new_obs_dict = postprocess_obs_dict(new_obs_dict) new_next_obs_dict = postprocess_obs_dict(new_next_obs_dict) resampled_goals = new_next_obs_dict[self.desired_goal_key] new_actions = self._actions[indices] if hasattr(self.env, 'compute_rewards'): new_rewards = self.env.compute_rewards( new_actions, new_next_obs_dict, ) else: new_rewards = np.ones((batch_size, 1)) for i in range(batch_size): new_rewards[i] = self.env.compute_reward( new_next_obs_dict[self.achieved_goal_key][i], new_next_obs_dict[self.desired_goal_key][i], None ) new_rewards = new_rewards.reshape(-1, 1) new_obs = new_obs_dict[self.observation_key] new_next_obs = new_next_obs_dict[self.observation_key] batch = { 'observations': new_obs, 'actions': new_actions, 'rewards': new_rewards, 'terminals': self._terminals[indices], 'next_observations': new_next_obs, 'resampled_goals': resampled_goals, 'indices': np.array(indices).reshape(-1, 1), } return batch def _batch_obs_dict(self, indices): return { key: self._obs[key][indices] for key in self.ob_keys_to_save } def _batch_next_obs_dict(self, indices): return { key: self._next_obs[key][indices] for key in self.ob_keys_to_save } def flatten_n(xs): xs = np.asarray(xs) return xs.reshape((xs.shape[0], -1)) def flatten_dict(dicts, keys): return { key: flatten_n([d[key] for d in dicts]) for key in keys } def preprocess_obs_dict(obs_dict): for obs_key, obs in obs_dict.items(): if 'image' in obs_key and obs is not None: obs_dict[obs_key] = unnormalize_image(obs) return obs_dict
MIT License
swaglyrics/swspotify
SwSpotify/spotify.py
get_info_windows
python
def get_info_windows(): import win32gui windows = [] old_window = win32gui.FindWindow("SpotifyMainWindow", None) old = win32gui.GetWindowText(old_window) def find_spotify_uwp(hwnd, windows): text = win32gui.GetWindowText(hwnd) classname = win32gui.GetClassName(hwnd) if classname == "Chrome_WidgetWin_0" and len(text) > 0: windows.append(text) if old: windows.append(old) else: win32gui.EnumWindows(find_spotify_uwp, windows) if len(windows) == 0: raise SpotifyClosed try: artist, track = windows[0].split(" - ", 1) except ValueError: artist = "" track = windows[0] if windows[0].startswith("Spotify"): raise SpotifyPaused return track, artist
Reads the window titles to get the data. Older Spotify versions simply use FindWindow for "SpotifyMainWindow", the newer ones create an EnumHandler and flood the list with Chrome_WidgetWin_0s
https://github.com/swaglyrics/swspotify/blob/2e7f389294754f0164de7bbc6306dccee505cf63/SwSpotify/spotify.py#L6-L48
import sys import subprocess from SwSpotify import SpotifyClosed, SpotifyPaused, SpotifyNotRunning
MIT License
pallets/werkzeug
src/werkzeug/sansio/request.py
Request.cache_control
python
def cache_control(self) -> RequestCacheControl: cache_control = self.headers.get("Cache-Control") return parse_cache_control_header(cache_control, None, RequestCacheControl)
A :class:`~werkzeug.datastructures.RequestCacheControl` object for the incoming cache control headers.
https://github.com/pallets/werkzeug/blob/d4987ee711c3d69ca665bf3b7e5e84705b2f91b5/src/werkzeug/sansio/request.py#L418-L423
import typing as t from datetime import datetime from .._internal import _to_str from ..datastructures import Accept from ..datastructures import Authorization from ..datastructures import CharsetAccept from ..datastructures import ETags from ..datastructures import Headers from ..datastructures import HeaderSet from ..datastructures import IfRange from ..datastructures import ImmutableList from ..datastructures import ImmutableMultiDict from ..datastructures import LanguageAccept from ..datastructures import MIMEAccept from ..datastructures import MultiDict from ..datastructures import Range from ..datastructures import RequestCacheControl from ..http import parse_accept_header from ..http import parse_authorization_header from ..http import parse_cache_control_header from ..http import parse_cookie from ..http import parse_date from ..http import parse_etags from ..http import parse_if_range_header from ..http import parse_list_header from ..http import parse_options_header from ..http import parse_range_header from ..http import parse_set_header from ..urls import url_decode from ..user_agent import UserAgent from ..useragents import _UserAgent as _DeprecatedUserAgent from ..utils import cached_property from ..utils import header_property from .utils import get_current_url from .utils import get_host class Request: charset = "utf-8" encoding_errors = "replace" parameter_storage_class: t.Type[MultiDict] = ImmutableMultiDict dict_storage_class: t.Type[MultiDict] = ImmutableMultiDict list_storage_class: t.Type[t.List] = ImmutableList user_agent_class = _DeprecatedUserAgent trusted_hosts: t.Optional[t.List[str]] = None def __init__( self, method: str, scheme: str, server: t.Optional[t.Tuple[str, t.Optional[int]]], root_path: str, path: str, query_string: bytes, headers: Headers, remote_addr: t.Optional[str], ) -> None: self.method = method.upper() self.scheme = scheme self.server = server self.root_path = root_path.rstrip("/") self.path = "/" + path.lstrip("/") self.query_string = query_string self.headers = headers self.remote_addr = remote_addr def __repr__(self) -> str: try: url = self.url except Exception as e: url = f"(invalid URL: {e})" return f"<{type(self).__name__} {url!r} [{self.method}]>" @property def url_charset(self) -> str: return self.charset @cached_property def args(self) -> "MultiDict[str, str]": return url_decode( self.query_string, self.url_charset, errors=self.encoding_errors, cls=self.parameter_storage_class, ) @cached_property def access_route(self) -> t.List[str]: if "X-Forwarded-For" in self.headers: return self.list_storage_class( parse_list_header(self.headers["X-Forwarded-For"]) ) elif self.remote_addr is not None: return self.list_storage_class([self.remote_addr]) return self.list_storage_class() @cached_property def full_path(self) -> str: return f"{self.path}?{_to_str(self.query_string, self.url_charset)}" @property def is_secure(self) -> bool: return self.scheme in {"https", "wss"} @cached_property def url(self) -> str: return get_current_url( self.scheme, self.host, self.root_path, self.path, self.query_string ) @cached_property def base_url(self) -> str: return get_current_url(self.scheme, self.host, self.root_path, self.path) @cached_property def root_url(self) -> str: return get_current_url(self.scheme, self.host, self.root_path) @cached_property def host_url(self) -> str: return get_current_url(self.scheme, self.host) @cached_property def host(self) -> str: return get_host( self.scheme, self.headers.get("host"), self.server, self.trusted_hosts ) @cached_property def cookies(self) -> "ImmutableMultiDict[str, str]": wsgi_combined_cookie = ";".join(self.headers.getlist("Cookie")) return parse_cookie( wsgi_combined_cookie, self.charset, self.encoding_errors, cls=self.dict_storage_class, ) content_type = header_property[str]( "Content-Type", doc="""The Content-Type entity-header field indicates the media type of the entity-body sent to the recipient or, in the case of the HEAD method, the media type that would have been sent had the request been a GET.""", read_only=True, ) @cached_property def content_length(self) -> t.Optional[int]: if self.headers.get("Transfer-Encoding", "") == "chunked": return None content_length = self.headers.get("Content-Length") if content_length is not None: try: return max(0, int(content_length)) except (ValueError, TypeError): pass return None content_encoding = header_property[str]( "Content-Encoding", doc="""The Content-Encoding entity-header field is used as a modifier to the media-type. When present, its value indicates what additional content codings have been applied to the entity-body, and thus what decoding mechanisms must be applied in order to obtain the media-type referenced by the Content-Type header field. .. versionadded:: 0.9""", read_only=True, ) content_md5 = header_property[str]( "Content-MD5", doc="""The Content-MD5 entity-header field, as defined in RFC 1864, is an MD5 digest of the entity-body for the purpose of providing an end-to-end message integrity check (MIC) of the entity-body. (Note: a MIC is good for detecting accidental modification of the entity-body in transit, but is not proof against malicious attacks.) .. versionadded:: 0.9""", read_only=True, ) referrer = header_property[str]( "Referer", doc="""The Referer[sic] request-header field allows the client to specify, for the server's benefit, the address (URI) of the resource from which the Request-URI was obtained (the "referrer", although the header field is misspelled).""", read_only=True, ) date = header_property( "Date", None, parse_date, doc="""The Date general-header field represents the date and time at which the message was originated, having the same semantics as orig-date in RFC 822. .. versionchanged:: 2.0 The datetime object is timezone-aware. """, read_only=True, ) max_forwards = header_property( "Max-Forwards", None, int, doc="""The Max-Forwards request-header field provides a mechanism with the TRACE and OPTIONS methods to limit the number of proxies or gateways that can forward the request to the next inbound server.""", read_only=True, ) def _parse_content_type(self) -> None: if not hasattr(self, "_parsed_content_type"): self._parsed_content_type = parse_options_header( self.headers.get("Content-Type", "") ) @property def mimetype(self) -> str: self._parse_content_type() return self._parsed_content_type[0].lower() @property def mimetype_params(self) -> t.Dict[str, str]: self._parse_content_type() return self._parsed_content_type[1] @cached_property def pragma(self) -> HeaderSet: return parse_set_header(self.headers.get("Pragma", "")) @cached_property def accept_mimetypes(self) -> MIMEAccept: return parse_accept_header(self.headers.get("Accept"), MIMEAccept) @cached_property def accept_charsets(self) -> CharsetAccept: return parse_accept_header(self.headers.get("Accept-Charset"), CharsetAccept) @cached_property def accept_encodings(self) -> Accept: return parse_accept_header(self.headers.get("Accept-Encoding")) @cached_property def accept_languages(self) -> LanguageAccept: return parse_accept_header(self.headers.get("Accept-Language"), LanguageAccept) @cached_property
BSD 3-Clause New or Revised License
deepmind/jax_verify
jax_verify/src/nonconvex/nonconvex.py
_nonconvex_dotproductattention
python
def _nonconvex_dotproductattention( bound_cls: Type[NnCvx], index: Index, inp: NnCvx, weights_query: Tuple[Tensor, Tensor], weights_key: Tuple[Tensor, Tensor], ) -> NnCvx: inp_lb = inp.lower inp_ub = inp.upper wq, bq = weights_query wk, bk = weights_key assert wq.shape[-1] == inp_lb.shape[-1] assert wk.shape[-1] == inp_lb.shape[-1] assert len(wq.shape) == 3 assert len(wk.shape) == 3 inp_dim = wq.shape[2] def word_pair_attention(attention_matrix, lb_a, ub_a, x_a, lb_b, ub_b, x_b): def accumulate_attention_by_inputfeat(carry, cross_inp_indx): feat_x, feat_y = jnp.divmod(cross_inp_indx, inp_dim) att_mat_elt = attention_matrix[feat_x, feat_y] pos_att_mat_elt = jnp.maximum(att_mat_elt, 0.) neg_att_mat_elt = jnp.minimum(att_mat_elt, 0.) x_a_feat = x_a[feat_x] lb_a_feat = lb_a[feat_x] ub_a_feat = ub_a[feat_x] x_b_feat = x_b[feat_y] lb_b_feat = lb_b[feat_y] ub_b_feat = ub_b[feat_y] (mc_l, mc_u) = mccormick.mccormick_outer_product( x_a_feat, x_b_feat, lb_a_feat, ub_a_feat, lb_b_feat, ub_b_feat) mc_l = mc_l.squeeze() mc_u = mc_u.squeeze() (carry_l, carry_u) = carry new_carry = (carry_l + pos_att_mat_elt * mc_l + neg_att_mat_elt * mc_u, carry_u + neg_att_mat_elt * mc_l + pos_att_mat_elt * mc_u) return new_carry, None inp_cross_indxs = jnp.arange(inp_dim**2) ini_attention = (jnp.array(0.), jnp.array(0.)) attention, _ = jax.lax.scan(accumulate_attention_by_inputfeat, ini_attention, inp_cross_indxs) return attention all_words_attention = jax.vmap(jax.vmap(word_pair_attention, in_axes=(None,) + (0,)*3 + (None,)*3, out_axes=0), in_axes=(None,) + (None,)*3 + (0,)*3, out_axes=0) def per_head_attention(wq, bq, wk, bk, x_lb, x_ub, x): attention_matrix = jnp.dot(wq.T, wk) wq_bk = jnp.dot(wq.T, bk) wk_bq = jnp.dot(wk.T, bq) bk_bq = jnp.dot(bq.T, bk) quad_term_lower, quad_term_upper = all_words_attention(attention_matrix, x_lb, x_ub, x, x_lb, x_ub, x) lin_term = (jnp.expand_dims(jnp.dot(x, wq_bk), 0) + jnp.expand_dims(jnp.dot(x, wk_bq), 1)) constant_term = bk_bq return (quad_term_lower + lin_term + constant_term, quad_term_upper + lin_term + constant_term) all_words_all_heads_attention = jax.vmap(per_head_attention, in_axes=(0,)*4 + (None, None, None), out_axes=0) batched_awah_attention = jax.vmap(all_words_all_heads_attention, in_axes=(None,)*4 + (0,)*3, out_axes=0) batched_per_target_awah_attention = jax.vmap(batched_awah_attention, in_axes=(None,)*6 + (0,), out_axes=0) folded_arguments = functools.partial(batched_per_target_awah_attention, wq, bq, wk, bk, inp_lb, inp_ub) lb_fun = lambda x: jnp.transpose(folded_arguments(x)[0], (0, 1, 2, 4, 3)) ub_fun = lambda x: jnp.transpose(folded_arguments(x)[1], (0, 1, 2, 4, 3)) return _activation_convex_relaxation( bound_cls, index, [inp], 'DotProductSA', lb_fun, ub_fun, None)
Builds the NonConvexBound object corresponding to dot product. The weights are in the form: [num_heads, emb_dim, inp_dim] The bias are in the form: [num_heads, emb_dim] Bounds on the inputs are [batch_size, num_words, inp_dim] When evaluating the bounds, there will be an additional target dimension, so Inputs should evaluate to [nb_targets, batch_size, num_words, inp_dim] Output is going to be [nb_targets, batch_size, num_heads, num_words, num_words] Compute bounds on (inp @ wq + bq)' @ (inp @ wk + bk). Args: bound_cls: Bound class to use. index: Index of input. inp: Input of the non-linearity. weights_query: Query weights (wq, bq) weights_key: Key weights (wk, bk) Returns: out_bounds: NonConvexBound for the output of the non-linearity
https://github.com/deepmind/jax_verify/blob/96e4abb160f5022af4bf1aa8bb854822eb45a59b/jax_verify/src/nonconvex/nonconvex.py#L713-L892
import abc import functools from typing import Callable, Dict, Generic, List, Optional, Tuple, Type, TypeVar, Union from absl import logging import jax from jax import lax import jax.numpy as jnp from jax_verify.src import activation_relaxation from jax_verify.src import bound_propagation from jax_verify.src import graph_traversal from jax_verify.src import ibp from jax_verify.src import intersection from jax_verify.src import mccormick from jax_verify.src import synthetic_primitives from jax_verify.src import utils NnCvx = TypeVar('NnCvx', bound='NonConvexBound') Tensor = jnp.ndarray Index = bound_propagation.Index TransformContext = bound_propagation.TransformContext ParamSet = Dict[Index, Tensor] PrimitiveInput = Union[Tensor, 'NonConvexBound'] Nest = bound_propagation.Nest def _sum_fn(fn, *args, **kwargs): out = fn(*args, **kwargs) summand = out[0] if isinstance(out, tuple) else out return summand.sum(), out def _sum_over_acts(var: Tensor) -> Tensor: return var.sum(axis=tuple(range(1, var.ndim))) class NonConvexBound(bound_propagation.Bound, metaclass=abc.ABCMeta): def __init__(self, index: Index, shape: Tuple[int, ...], previous_bounds: Dict[Index, 'NonConvexBound'], eval_fn: Callable[[ParamSet, ParamSet, ParamSet], Tensor], variables: Dict[Index, Tuple[int, ...]], concretized_bounds: Optional[bound_propagation.Bound] = None ): self.index = index self._shape = shape self.previous_bounds = previous_bounds self.previous_bounds[index] = self self._eval_fn = eval_fn self.variables = variables self._concretized_bounds = concretized_bounds def primal(var_set: ParamSet, objectives: ParamSet, dummy_inps: Optional[ParamSet] = None ) -> Tuple[Tensor, ParamSet]: acts = {} self.evaluate(var_set, dummy_inps, acts) primals = self._objective_fn(acts, objectives) return primals, acts self.primal_fn = primal self.primal_sumfn = functools.partial(_sum_fn, primal) def _objective_fn(self, acts, objectives): primal_objs = sum(_sum_over_acts(acts[index] * act_objectives) for index, act_objectives in objectives.items()) return primal_objs @property def shape(self): return self._shape @property def lower(self) -> Tensor: if self._concretized_bounds is None: logging.warning('.lower called on a non-concretized bound.' 'Returning spurious bounds.') return -float('inf') * jnp.ones(self.shape) return self._concretized_bounds.lower @property def upper(self) -> Tensor: if self._concretized_bounds is None: logging.warning('.upper called on a non-concretized bound.' 'Returning spurious bounds.') return float('inf') * jnp.ones(self.shape) return self._concretized_bounds.upper def evaluate(self, var_set: ParamSet, dummy_inps: Optional[ParamSet] = None, acts: Optional[ParamSet] = None) -> Tensor: if acts is None: acts = {} if dummy_inps is None: dummy_inps = {} if self.index in acts: return acts[self.index] else: val = self._eval_fn(var_set, dummy_inps, acts) if self.index in dummy_inps: val = val + dummy_inps[self.index] acts[self.index] = val return val @abc.abstractmethod def dual(self, var_set: ParamSet, objectives: ParamSet) -> Tensor: @classmethod @abc.abstractmethod def get_initial_bound_constructor( cls: Type[NnCvx], index: Index, lb: Tensor, ub: Tensor) -> Callable[..., NnCvx]: raise NotImplementedError('Initial bound constructor not implemented') @classmethod @abc.abstractmethod def get_linear_activation_constructor( cls: Type[NnCvx], index: Index, vlin_fun: Callable[..., Tensor], in_vals: Tuple[Tensor, ...]) -> Callable[..., NnCvx]: raise NotImplementedError('Linear activation constructor not implemented') @classmethod @abc.abstractmethod def get_nonlinearity_activation_constructor( cls: Type[NnCvx], index: Index, inp: NnCvx, act_type: str, lb_fun: Callable[[Tensor], Tensor], ub_fun: Callable[[Tensor], Tensor]) -> Callable[..., NnCvx]: raise NotImplementedError('Nonlinearity activation constructor not' 'implemented') @abc.abstractmethod def requires_concretizing(self, primitive) -> bool: raise NotImplementedError('Specification of when concretization is required' 'is not implemented.') def _compute_dualvars_nonconvexgrad(self, var_set: ParamSet, objectives: ParamSet ) -> Tuple[ParamSet, ParamSet]: grad_fun = jax.grad(self.primal_sumfn, argnums=2, has_aux=True) dummy_acts = {key: 0*val for key, val in var_set.items()} dual_vars, (_, acts) = grad_fun(var_set, objectives, dummy_acts) return dual_vars, acts def _compute_dualvars_convexgrad(self, var_set: ParamSet, objectives: ParamSet ) -> Tuple[ParamSet, ParamSet]: acts = {} self.evaluate(var_set, {}, acts) primal_gradfun_wrt_act = utils.batch_value_and_grad( self._objective_fn, (0,)) _, dual_vars = primal_gradfun_wrt_act(acts, objectives) return dual_vars, acts def concretize(self, concretizer: 'Concretizer'): self._concretized_bounds = concretizer.get_bounds(self) @classmethod def initial_nonconvex_bound( cls: Type[NnCvx], index: Index, lower_bound: Tensor, upper_bound: Tensor) -> NnCvx: shape = lower_bound.shape variables = {index: lower_bound.shape} lb = jnp.expand_dims(lower_bound, axis=0) ub = jnp.expand_dims(upper_bound, axis=0) previous_bounds = {} def eval_fn(var_set, *_): val = lb + (ub - lb) * var_set[index] return val bound_ctor = cls.get_initial_bound_constructor(index, lb, ub) return bound_ctor(index, shape, previous_bounds, eval_fn, variables, ibp.IntervalBound(lower_bound, upper_bound)) class ConstrainedNonConvexBound(NonConvexBound, metaclass=abc.ABCMeta): def __init__(self, index: Index, shape: Tuple[int, ...], previous_bounds: Dict[Index, 'ConstrainedNonConvexBound'], eval_fn: Callable[[ParamSet, ParamSet, ParamSet], Tensor], variables: Dict[Index, Tuple[int, ...]], concretized_bounds: Optional[bound_propagation.Bound] = None): super().__init__(index, shape, previous_bounds, eval_fn, variables, concretized_bounds) self._imposed_bounds = None def is_constrained(self) -> bool: return self._imposed_bounds is not None def set_imposed_bounds(self, imposed_bounds: bound_propagation.Bound): self._imposed_bounds = imposed_bounds if self._concretized_bounds is not None: self._concretized_bounds = intersection.IntersectionBound( self._concretized_bounds, self._imposed_bounds) def evaluate(self, var_set: ParamSet, dummy_inps: Optional[ParamSet] = None, acts: Optional[ParamSet] = None) -> Tensor: unconstrained_eval = super().evaluate(var_set, dummy_inps, acts) if not self.is_constrained(): return unconstrained_eval brd_lower = jnp.expand_dims(self.lower, 0) brd_upper = jnp.expand_dims(self.upper, 0) if dummy_inps and (self.index in dummy_inps): dummy_inp = dummy_inps[self.index] constrained_eval = jnp.clip(unconstrained_eval, brd_lower + dummy_inp, brd_upper + dummy_inp) else: constrained_eval = jnp.clip(unconstrained_eval, brd_lower, brd_upper) if acts: acts[self.index] = constrained_eval return constrained_eval @property def lower(self) -> Tensor: if self._imposed_bounds is not None and self._concretized_bounds is None: return self._imposed_bounds.lower return super().lower @property def upper(self) -> Tensor: if self._imposed_bounds is not None and self._concretized_bounds is None: return self._imposed_bounds.upper return super().upper def concretize(self, concretizer: 'Concretizer'): super().concretize(concretizer) if self._imposed_bounds is not None: self._concretized_bound = intersection.IntersectionBound( self._concretized_bounds, self._imposed_bounds) class Concretizer(abc.ABC): @abc.abstractmethod def accept_input( self, context: TransformContext, lower_bound: Tensor, upper_bound: Tensor): @abc.abstractmethod def accept_primitive( self, context: TransformContext, primitive: bound_propagation.Primitive, *in_vals: PrimitiveInput, **params): @abc.abstractmethod def get_bounds( self, nonconvex_bound: NonConvexBound) -> bound_propagation.Bound: class BaseBoundConcretizer(Concretizer): def __init__(self, bound_transform: bound_propagation.BoundTransform): self._bound_transform = bound_transform self._base_bounds = {} def accept_input( self, context: TransformContext, lower_bound: Tensor, upper_bound: Tensor): self._base_bounds[context.index] = self._bound_transform.input_transform( context, lower_bound, upper_bound) def accept_primitive( self, context: TransformContext, primitive: bound_propagation.Primitive, *in_vals: PrimitiveInput, **params): base_in_vals = [ self._base_bounds[inp.index] if isinstance(inp, NonConvexBound) else inp for inp in in_vals] self._base_bounds[context.index] = ( self._bound_transform.equation_transform( context, primitive, *base_in_vals, **params)) def get_bounds(self, nonconvex_bound: NonConvexBound ) -> bound_propagation.Bound: return self._base_bounds[nonconvex_bound.index] def eval_if_nonconvexbound(inp: Union[NonConvexBound, Tensor], var_set: ParamSet, dummy_inps: Optional[ParamSet], activations: Optional[ParamSet]) -> Tensor: if isinstance(inp, NonConvexBound): return inp.evaluate(var_set, dummy_inps, activations) else: return inp def _nonconvex_linear_op(primitive: bound_propagation.Primitive, bound_cls: Type[NnCvx], index: Index, *in_vals: PrimitiveInput, **kwargs) -> NnCvx: in_axes_to_vmap = [0 if isinstance(inp, NonConvexBound) else None for inp in in_vals] kwarged_lin_fun = lambda args: primitive.bind(*args, **kwargs) vlin_fun = jax.vmap(kwarged_lin_fun, [in_axes_to_vmap], 0) bound_parents = [inp for inp in in_vals if isinstance(inp, NonConvexBound)] variables = {} previous_bounds = {} for parent in bound_parents: variables.update(parent.variables) previous_bounds.update(parent.previous_bounds) placeholder_invals = [] for inp in in_vals: if isinstance(inp, NonConvexBound): placeholder_invals.append(jax.core.ShapedArray(inp.shape, jnp.float32)) else: placeholder_invals.append(inp) output_shape = jax.eval_shape(kwarged_lin_fun, placeholder_invals).shape def eval_fn(var_set: ParamSet, dummy_inps: Optional[ParamSet], activations: Optional[ParamSet]) -> Tensor: inps = [eval_if_nonconvexbound(inp, var_set, dummy_inps, activations) for inp in in_vals] out = vlin_fun(inps) return out variables[index] = output_shape new_bound_ctor = bound_cls.get_linear_activation_constructor( index, vlin_fun, in_vals) return new_bound_ctor(index, output_shape, previous_bounds, eval_fn, variables) def _nonconvex_div(bound_cls: Type[NnCvx], index: Index, lhs: PrimitiveInput, rhs: PrimitiveInput) -> NnCvx: if isinstance(rhs, bound_propagation.Bound): raise ValueError('Bound propagation through the denominator unsupported.') return _nonconvex_linear_op(lax.mul_p, bound_cls, index, lhs, 1. / rhs) def _nonconvex_softplus(bound_cls: Type[NnCvx], index: Index, inp: NnCvx) -> NnCvx: inp_lb = inp.lower inp_ub = inp.upper out_lb = jax.nn.softplus(inp_lb) out_ub = jax.nn.softplus(inp_ub) slope = (out_ub - out_lb) / jnp.maximum(inp_ub - inp_lb, 1e-12) offset = out_lb - slope * inp_lb broad_slope = jnp.expand_dims(slope, 0) broad_offset = jnp.expand_dims(offset, 0) lb_fun = jax.nn.softplus ub_fun = lambda x: broad_slope * x + broad_offset return _activation_convex_relaxation( bound_cls, index, [inp], 'Softplus', lb_fun, ub_fun, ibp.IntervalBound(jax.nn.softplus(inp_lb), jax.nn.softplus(inp_ub))) def _nonconvex_relu(bound_cls: Type[NnCvx], index: Index, inp: NnCvx) -> NnCvx: inp_lb = inp.lower inp_ub = inp.upper relu_on = (inp_lb >= 0.) relu_amb = jnp.logical_and(inp_lb < 0., inp_ub >= 0.) slope = relu_on.astype(jnp.float32) slope += jnp.where(relu_amb, inp_ub / jnp.maximum(inp_ub - inp_lb, 1e-12), jnp.zeros_like(inp_lb)) offset = jnp.where(relu_amb, -slope * inp_lb, jnp.zeros_like(inp_lb)) broad_slope = jnp.expand_dims(slope, 0) broad_offset = jnp.expand_dims(offset, 0) lb_fun = lambda x: lax.max(x, 0.) ub_fun = lambda x: broad_slope * x + broad_offset return _activation_convex_relaxation( bound_cls, index, [inp], 'ReLU', lb_fun, ub_fun, ibp.IntervalBound(lax.max(inp_lb, 0.), lax.max(inp_ub, 0.))) def _activation_convex_relaxation( bound_cls: Type[NnCvx], index: Index, inputs: List[NnCvx], act_type: str, lb_fun: Callable[..., Tensor], ub_fun: Callable[..., Tensor], precomputed_bound: Optional[bound_propagation.Bound]) -> NnCvx: bound_parents = [inp for inp in inputs if isinstance(inp, NonConvexBound)] variables = {} previous_bounds = {} for parent in bound_parents: variables.update(parent.variables) previous_bounds.update(parent.previous_bounds) inputs_lb = [jnp.expand_dims(inp.lower, 0) for inp in inputs] output_shape_with_target = jax.eval_shape(lb_fun, *inputs_lb).shape output_shape = output_shape_with_target[1:] variables[index] = output_shape def eval_fn(var_set, dummy_inps, activations): inp_eval = [inp.evaluate(var_set, dummy_inps, activations) for inp in inputs] lb_val = lb_fun(*inp_eval) ub_val = ub_fun(*inp_eval) theta = var_set[index] out_val = lb_val + theta * (ub_val - lb_val) return out_val shape = output_shape new_bound_ctor = bound_cls.get_nonlinearity_activation_constructor( index, act_type, lb_fun, ub_fun, *inputs) return new_bound_ctor(index, shape, previous_bounds, eval_fn, variables, precomputed_bound)
Apache License 2.0
kuri65536/python-for-android
python-modules/twisted/twisted/plugins/cred_memory.py
InMemoryCheckerFactory.generateChecker
python
def generateChecker(self, argstring): checker = InMemoryUsernamePasswordDatabaseDontUse() if argstring: pieces = argstring.split(':') if len(pieces) % 2: from twisted.cred.strcred import InvalidAuthArgumentString raise InvalidAuthArgumentString( "argstring must be in format U:P:...") for i in range(0, len(pieces), 2): username, password = pieces[i], pieces[i+1] checker.addUser(username, password) return checker
This checker factory expects to get a list of username:password pairs, with each pair also separated by a colon. For example, the string 'alice:f:bob:g' would generate two users, one named 'alice' and one named 'bob'.
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/plugins/cred_memory.py#L47-L64
from zope.interface import implements from twisted import plugin from twisted.cred.strcred import ICheckerFactory from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse from twisted.cred.credentials import IUsernamePassword, IUsernameHashedPassword inMemoryCheckerFactoryHelp = """ A checker that uses an in-memory user database. This is only of use in one-off test programs or examples which don't want to focus too much on how credentials are verified. You really don't want to use this for anything else. It is a toy. """ class InMemoryCheckerFactory(object): implements(ICheckerFactory, plugin.IPlugin) authType = 'memory' authHelp = inMemoryCheckerFactoryHelp argStringFormat = 'A colon-separated list (name:password:...)' credentialInterfaces = (IUsernamePassword, IUsernameHashedPassword)
Apache License 2.0
parashardhapola/scarf
scarf/readers.py
LoomReader.feature_ids
python
def feature_ids(self) -> List[str]: if self.featureAttrsKey not in self.h5: pass elif self.featureIdsKey is None: pass elif self.featureIdsKey not in self.h5[self.featureAttrsKey]: logger.warning( f"Feature names key ({self.featureIdsKey}) is missing in attributes" ) else: return self.h5[self.featureAttrsKey][self.featureIdsKey][:] return [f"feature_{x}" for x in range(self.nFeatures)]
Returns a list of feature IDs.
https://github.com/parashardhapola/scarf/blob/95d27b05a07973214cf71a2e5af3c3a089791093/scarf/readers.py#L889-L903
from abc import ABC, abstractmethod from typing import Generator, Dict, List, Optional, Tuple import numpy as np import pandas as pd import os import sparse from typing import IO import h5py from .utils import logger, tqdmbar __all__ = [ "CrH5Reader", "CrDirReader", "CrReader", "H5adReader", "NaboH5Reader", "LoomReader", ] def get_file_handle(fn: str) -> IO: import gzip try: if fn.rsplit(".", 1)[-1] == "gz": return gzip.open(fn, mode="rt") else: return open(fn, "r") except (OSError, IOError, FileNotFoundError): raise FileNotFoundError("ERROR: FILE NOT FOUND: %s" % fn) def read_file(fn: str): fh = get_file_handle(fn) for line in fh: yield line.rstrip() class CrReader(ABC): def __init__(self, grp_names, file_type: str = None): self.autoNames = { "Gene Expression": "RNA", "Peaks": "ATAC", "Antibody Capture": "ADT", } self.grpNames: Dict = grp_names self.nFeatures: int = len(self.feature_names()) self.nCells: int = len(self.cell_names()) self.assayFeats = self._make_feat_table() self._auto_rename_assay_names() @abstractmethod def _handle_version(self): pass @abstractmethod def _read_dataset(self, key: Optional[str] = None) -> List: pass @abstractmethod def consume(self, batch_size: int, lines_in_mem: int): pass def _subset_by_assay(self, v, assay) -> List: if assay is None: return v elif assay not in self.assayFeats: raise ValueError(f"ERROR: Assay ID {assay} is not valid") if len(self.assayFeats[assay].shape) == 2: ret_val = [] for i in self.assayFeats[assay].values[1:3].T: ret_val.extend(list(v[i[0] : i[1]])) return ret_val elif len(self.assayFeats[assay].shape) == 1: idx = self.assayFeats[assay] return v[idx.start : idx.end] else: raise ValueError( "ERROR: assay feats is 3D. Something went really wrong. Create a github issue" ) def _make_feat_table(self) -> pd.DataFrame: s = self.feature_types() span: List[Tuple] = [] last = s[0] last_n: int = 0 for n, i in enumerate(s[1:], 1): if i != last: span.append((last, last_n, n)) last_n = n elif n == len(s) - 1: span.append((last, last_n, n + 1)) last = i df = pd.DataFrame(span, columns=["type", "start", "end"]) df.index = ["ASSAY%s" % str(x + 1) for x in df.index] df["nFeatures"] = df.end - df.start return df.T def _auto_rename_assay_names(self): new_names = [] for k, v in self.assayFeats.T["type"].to_dict().items(): if v in self.autoNames: new_names.append(self.autoNames[v]) else: new_names.append(k) self.assayFeats.columns = new_names def rename_assays(self, name_map: Dict[str, str]) -> None: self.assayFeats.rename(columns=name_map, inplace=True) def feature_ids(self, assay: str = None) -> List[str]: return self._subset_by_assay(self._read_dataset("feature_ids"), assay) def feature_names(self, assay: str = None) -> List[str]: vals = self._read_dataset("feature_names") if vals is None: logger.warning("Feature names extraction failed using feature IDs") vals = self._read_dataset("feature_ids") return self._subset_by_assay(vals, assay) def feature_types(self) -> List[str]: if self.grpNames["feature_types"] is not None: ret_val = self._read_dataset("feature_types") if ret_val is not None: return ret_val default_name = list(self.autoNames.keys())[0] return [default_name for _ in range(self.nFeatures)] def cell_names(self) -> List[str]: return self._read_dataset("cell_names") class CrH5Reader(CrReader): def __init__(self, h5_fn, file_type: str = None): self.h5obj = h5py.File(h5_fn, mode="r") self.grp = None super().__init__(self._handle_version(), file_type) def _handle_version(self): root_key = list(self.h5obj.keys())[0] self.grp = self.h5obj[root_key] if root_key == "matrix": grps = { "feature_ids": "features/id", "feature_names": "features/name", "feature_types": "features/feature_type", "cell_names": "barcodes", } else: grps = { "feature_ids": "genes", "feature_names": "gene_names", "feature_types": None, "cell_names": "barcodes", } return grps def _read_dataset(self, key: Optional[str] = None): return [x.decode("UTF-8") for x in self.grp[self.grpNames[key]][:]] def consume(self, batch_size: int, lines_in_mem: int): s = 0 for ind_n in range(0, self.nCells, batch_size): i = self.grp["indptr"][ind_n : ind_n + batch_size] e = i[-1] if s != 0: idx = np.array([s] + list(i)) idx = idx - idx[0] else: idx = np.array(i) n = idx.shape[0] - 1 nidx = np.repeat(range(n), np.diff(idx).astype("int32")) yield sparse.COO( [nidx, self.grp["indices"][s:e]], self.grp["data"][s:e], shape=(n, self.nFeatures), ) s = e def close(self) -> None: self.h5obj.close() class CrDirReader(CrReader): def __init__( self, loc, file_type: str = None, mtx_separator: str = " ", index_offset: int = -1, ): self.loc: str = loc.rstrip("/") + "/" self.matFn = None self.sep = mtx_separator self.indexOffset = index_offset super().__init__(self._handle_version()) def _handle_version(self): show_error = False if os.path.isfile(self.loc + "matrix.mtx.gz"): self.matFn = self.loc + "matrix.mtx.gz" elif os.path.isfile(self.loc + "matrix.mtx"): self.matFn = self.loc + "matrix.mtx" else: show_error = True if os.path.isfile(self.loc + "features.tsv.gz"): feat_fn = "features.tsv.gz" elif os.path.isfile(self.loc + "features.tsv"): feat_fn = "features.tsv" elif os.path.isfile(self.loc + "genes.tsv.gz"): feat_fn = "genes.tsv.gz" elif os.path.isfile(self.loc + "genes.tsv"): feat_fn = "genes.tsv" elif os.path.isfile(self.loc + "peaks.bed"): feat_fn = "peaks.bed" elif os.path.isfile(self.loc + "peaks.bed.gz"): feat_fn = "peaks.bed.gz" else: feat_fn = None show_error = True if os.path.isfile(self.loc + "barcodes.tsv.gz"): cell_fn = "barcodes.tsv.gz" elif os.path.isfile(self.loc + "barcodes.tsv"): cell_fn = "barcodes.tsv" else: cell_fn = None show_error = True if show_error: raise IOError( "ERROR: Couldn't find either of these expected combinations of files:\n" "\t- matrix.mtx, barcodes.tsv and genes.tsv\n" "\t- matrix.mtx.gz, barcodes.tsv.gz and features.tsv.gz\n" "Please make sure that you have not compressed or uncompressed the Cellranger output files " "manually" ) return { "feature_ids": (feat_fn, 0), "feature_names": (feat_fn, 1), "feature_types": (feat_fn, 2), "cell_names": (cell_fn, 0), } def _read_dataset(self, key: Optional[str] = None): try: vals = [ x.split("\t")[self.grpNames[key][1]] for x in read_file(self.loc + self.grpNames[key][0]) ] except IndexError: logger.warning( f"{key} extraction failed from {self.grpNames[key][0]} " f"in column {self.grpNames[key][1]}", flush=True, ) vals = None return vals def to_sparse(self, a: np.ndarray) -> sparse.COO: idx = np.where(np.diff(a[:, 1]) > 0)[0] + 1 return sparse.COO( [(a[:, 1] - a[0, 1]).astype(int), (a[:, 0] + self.indexOffset).astype(int)], a[:, 2], shape=(len(idx) + 1, self.nFeatures), ) def consume( self, batch_size: int, lines_in_mem: int = int(1e5) ) -> Generator[List[np.ndarray], None, None]: stream = pd.read_csv( self.matFn, skiprows=3, sep=self.sep, header=None, chunksize=lines_in_mem ) start = 1 dfs = [] for df in stream: if df.iloc[-1, 1] - start >= batch_size: idx = df[1] < batch_size + start dfs.append(df[idx]) yield self.to_sparse(np.vstack(dfs)) dfs = [df[~idx]] start += batch_size else: dfs.append(df) yield self.to_sparse(np.vstack(dfs)) class H5adReader: def __init__( self, h5ad_fn: str, cell_attrs_key: str = "obs", cell_ids_key: str = "_index", feature_attrs_key: str = "var", feature_ids_key: str = "_index", feature_name_key: str = "gene_short_name", matrix_key: str = "X", category_names_key: str = "__categories", dtype: str = None, ): self.h5 = h5py.File(h5ad_fn, mode="r") self.matrixKey = matrix_key self.cellAttrsKey, self.featureAttrsKey = cell_attrs_key, feature_attrs_key self.groupCodes = { self.cellAttrsKey: self._validate_group(self.cellAttrsKey), self.featureAttrsKey: self._validate_group(self.featureAttrsKey), self.matrixKey: self._validate_group(self.matrixKey), } self.nCells, self.nFeatures = self._get_n(self.cellAttrsKey), self._get_n( self.featureAttrsKey ) self.cellIdsKey = self._fix_name_key(self.cellAttrsKey, cell_ids_key) self.featIdsKey = self._fix_name_key(self.featureAttrsKey, feature_ids_key) self.featNamesKey = feature_name_key self.catNamesKey = category_names_key self.matrixDtype = self._get_matrix_dtype() if dtype is None else dtype def _validate_group(self, group: str) -> int: if group not in self.h5: logger.warning(f"`{group}` group not found in the H5ad file") ret_val = 0 elif type(self.h5[group]) == h5py.Dataset: ret_val = 1 elif type(self.h5[group]) == h5py.Group: ret_val = 2 else: logger.warning( f"`{group}` slot in H5ad file is not of Dataset or Group type. " f"Due to this, no information in `{group}` can be used" ) ret_val = 0 if ret_val == 2: if len(self.h5[group].keys()) == 0: logger.warning(f"`{group}` slot in H5ad file is empty.") ret_val = 0 elif ( len( set( [ self.h5[group][x].shape[0] for x in self.h5[group].keys() if type(self.h5[group][x]) == h5py.Dataset ] ) ) > 1 ): if sorted(self.h5[group].keys()) != ["data", "indices", "indptr"]: logger.info( f"`{group}` slot in H5ad file has unequal sized child groups" ) return ret_val def _get_matrix_dtype(self): if self.groupCodes[self.matrixKey] == 1: return self.h5[self.matrixKey].dtype elif self.groupCodes[self.matrixKey] == 2: return self.h5[self.matrixKey]["data"].dtype else: raise ValueError( f"ERROR: {self.matrixKey} is neither Dataset or Group type. Will not consume data" ) def _check_exists(self, group: str, key: str) -> bool: if group in self.groupCodes: group_code = self.groupCodes[group] else: group_code = self._validate_group(group) self.groupCodes[group] = group_code if group_code == 1: if key in list(self.h5[group].dtype.names): return True if group_code == 2: if key in self.h5[group].keys(): return True return False def _fix_name_key(self, group: str, key: str) -> str: if self._check_exists(group, key) is False: if key.startswith("_"): temp_key = key[1:] if self._check_exists(group, temp_key): return temp_key return key def _get_n(self, group: str) -> int: if self.groupCodes[group] == 0: if self._check_exists(self.matrixKey, "shape"): return self.h5[self.matrixKey]["shape"][0] else: raise KeyError( f"ERROR: `{group}` not found and `shape` key is missing in the {self.matrixKey} group. " f"Aborting read process." ) elif self.groupCodes[group] == 1: return self.h5[group].shape[0] else: for i in self.h5[group].keys(): if type(self.h5[group][i]) == h5py.Dataset: return self.h5[group][i].shape[0] raise KeyError( f"ERROR: `{group}` key doesn't contain any child node of Dataset type." f"Aborting because unexpected H5ad format." ) def cell_ids(self) -> np.ndarray: if self._check_exists(self.cellAttrsKey, self.cellIdsKey): if self.groupCodes[self.cellAttrsKey] == 1: return self.h5[self.cellAttrsKey][self.cellIdsKey] else: return self.h5[self.cellAttrsKey][self.cellIdsKey][:] logger.warning(f"Could not find cells ids key: {self.cellIdsKey} in `obs`.") return np.array([f"cell_{x}" for x in range(self.nCells)]) def feat_ids(self) -> np.ndarray: if self._check_exists(self.featureAttrsKey, self.featIdsKey): if self.groupCodes[self.featureAttrsKey] == 1: return self.h5[self.featureAttrsKey][self.featIdsKey] else: return self.h5[self.featureAttrsKey][self.featIdsKey][:] logger.warning( f"Could not find feature ids key: {self.featIdsKey} in {self.featureAttrsKey}." ) return np.array([f"feature_{x}" for x in range(self.nFeatures)]) def feat_names(self) -> np.ndarray: if self._check_exists(self.featureAttrsKey, self.featNamesKey): if self.groupCodes[self.featureAttrsKey] == 1: values = self.h5[self.featureAttrsKey][self.featNamesKey] else: values = self.h5[self.featureAttrsKey][self.featNamesKey][:] return self._replace_category_values( values, self.featNamesKey, self.featureAttrsKey ).astype(object) logger.warning( f"Could not find feature names key: {self.featNamesKey} in self.featureAttrsKey." ) return self.feat_ids() def _replace_category_values(self, v: np.ndarray, key: str, group: str): if self.catNamesKey is not None: if self._check_exists(group, self.catNamesKey): cat_g = self.h5[group][self.catNamesKey] if type(cat_g) == h5py.Group: if key in cat_g: c = cat_g[key][:] try: return np.array([c[x] for x in v]) except (IndexError, TypeError): return v if "uns" in self.h5: if key + "_categories" in self.h5["uns"]: c = self.h5["uns"][key + "_categories"][:] try: return np.array([c[x] for x in v]) except (IndexError, TypeError): return v return v def _get_col_data( self, group: str, ignore_keys: List[str] ) -> Generator[Tuple[str, np.ndarray], None, None]: if self.groupCodes[group] == 1: for i in tqdmbar( self.h5[group].dtype.names, desc=f"Reading attributes from group {group}", ): if i in ignore_keys: continue yield i, self._replace_category_values(self.h5[group][i][:], i, group) if self.groupCodes[group] == 2: for i in tqdmbar( self.h5[group].keys(), desc=f"Reading attributes from group {group}" ): if i in ignore_keys: continue if type(self.h5[group][i]) == h5py.Dataset: yield i, self._replace_category_values( self.h5[group][i][:], i, group ) def get_cell_columns(self) -> Generator[Tuple[str, np.ndarray], None, None]: for i, j in self._get_col_data(self.cellAttrsKey, [self.cellIdsKey]): yield i, j def get_feat_columns(self) -> Generator[Tuple[str, np.ndarray], None, None]: for i, j in self._get_col_data( self.featureAttrsKey, [self.featIdsKey, self.featNamesKey] ): yield i, j def consume_dataset( self, batch_size: int = 1000 ) -> Generator[sparse.COO, None, None]: dset = self.h5[self.matrixKey] s = 0 for e in range(batch_size, dset.shape[0] + batch_size, batch_size): if e > dset.shape[0]: e = dset.shape[0] yield dset[s:e] s = e def consume_group(self, batch_size: int) -> Generator[sparse.COO, None, None]: grp = self.h5[self.matrixKey] s = 0 for ind_n in range(0, self.nCells, batch_size): i = grp["indptr"][ind_n : ind_n + batch_size] e = i[-1] if s != 0: idx = np.array([s] + list(i)) idx = idx - idx[0] else: idx = np.array(i) n = idx.shape[0] - 1 nidx = np.repeat(range(n), np.diff(idx).astype("int32")) yield sparse.COO( [nidx, grp["indices"][s:e]], grp["data"][s:e], shape=(n, self.nFeatures) ).todense() s = e def consume(self, batch_size: int = 1000): if self.groupCodes[self.matrixKey] == 1: return self.consume_dataset(batch_size) elif self.groupCodes[self.matrixKey] == 2: return self.consume_group(batch_size) class NaboH5Reader: def __init__(self, h5_fn: str): self.h5 = h5py.File(h5_fn, mode="r") self._check_integrity() self.nCells = self.h5["names"]["cells"].shape[0] self.nFeatures = self.h5["names"]["genes"].shape[0] def _check_integrity(self) -> bool: for i in ["cell_data", "gene_data", "names"]: if i not in self.h5: raise KeyError(f"ERROR: Expected group: {i} is missing in the H5 file") return True def cell_ids(self) -> List[str]: return [x.decode("UTF-8") for x in self.h5["names"]["cells"][:]] def feat_ids(self) -> np.ndarray: return np.array([f"feature_{x}" for x in range(self.nFeatures)]) def feat_names(self) -> List[str]: return [ x.decode("UTF-8").rsplit("_", 1)[0] for x in self.h5["names"]["genes"][:] ] def consume(self, batch_size: int = 100) -> Generator[np.ndarray, None, None]: batch = [] for i in self.h5["cell_data"]: a = np.zeros(self.nFeatures).astype(int) v = self.h5["cell_data"][i][:][::-1] a[v["idx"]] = v["val"] batch.append(a) if len(batch) >= batch_size: batch = np.array(batch) yield batch batch = [] if len(batch) > 0: yield np.array(batch) class LoomReader: def __init__( self, loom_fn: str, matrix_key: str = "matrix", cell_attrs_key="col_attrs", cell_names_key: str = "obs_names", feature_attrs_key: str = "row_attrs", feature_names_key: str = "var_names", feature_ids_key: str = None, dtype: str = None, ) -> None: self.h5 = h5py.File(loom_fn, mode="r") self.matrixKey = matrix_key self.cellAttrsKey, self.featureAttrsKey = cell_attrs_key, feature_attrs_key self.cellNamesKey, self.featureNamesKey = cell_names_key, feature_names_key self.featureIdsKey = feature_ids_key self.matrixDtype = self.h5[self.matrixKey].dtype if dtype is None else dtype self._check_integrity() self.nFeatures, self.nCells = self.h5[self.matrixKey].shape def _check_integrity(self) -> bool: if self.matrixKey not in self.h5: raise KeyError( f"ERROR: Matrix key (location): {self.matrixKey} is missing in the H5 file" ) if self.cellAttrsKey not in self.h5: logger.warning( f"Cell attributes are missing. Key {self.cellAttrsKey} was not found" ) if self.featureAttrsKey not in self.h5: logger.warning( f"Feature attributes are missing. Key {self.featureAttrsKey} was not found" ) return True def cell_names(self) -> List[str]: if self.cellAttrsKey not in self.h5: pass elif self.cellNamesKey not in self.h5[self.cellAttrsKey]: logger.warning( f"Cell names/ids key ({self.cellNamesKey}) is missing in attributes" ) else: return self.h5[self.cellAttrsKey][self.cellNamesKey][:] return [f"cell_{x}" for x in range(self.nCells)] def cell_ids(self) -> List[str]: return self.cell_names() def _stream_attrs( self, key, ignore ) -> Generator[Tuple[str, np.ndarray], None, None]: if key in self.h5: for i in tqdmbar(self.h5[key].keys(), desc=f"Reading {key} attributes"): if i in [ignore]: continue vals = self.h5[key][i][:] if vals.dtype.names is None: yield i, vals else: for j in vals.dtype.names: yield i + "_" + str(j), vals[j] def get_cell_attrs(self) -> Generator[Tuple[str, np.ndarray], None, None]: return self._stream_attrs(self.cellAttrsKey, [self.cellNamesKey]) def feature_names(self) -> List[str]: if self.featureAttrsKey not in self.h5: pass elif self.featureNamesKey not in self.h5[self.featureAttrsKey]: logger.warning( f"Feature names key ({self.featureNamesKey}) is missing in attributes" ) else: return self.h5[self.featureAttrsKey][self.featureNamesKey][:] return [f"feature_{x}" for x in range(self.nFeatures)]
BSD 3-Clause New or Revised License
gdikov/adversarial-variational-bayes
third_party/ite/cost/meta_a.py
MASpearmanUT.estimation
python
def estimation(self, y, ds=None): if ds is None: ds = ones(y.shape[1], dtype='int') self.verification_compatible_subspace_dimensions(y, ds) self.verification_one_dimensional_subspaces(ds) num_of_samples = y.shape[0] k = int(floor(sqrt(num_of_samples))) self.spearman_cond_ut_co.p = k / num_of_samples a = self.spearman_cond_ut_co.estimation(y, ds) return a
Estimate upper tail dependence. Parameters ---------- y : (number of samples, dimension)-ndarray One row of y corresponds to one sample. ds : int vector, vector of ones ds[i] = 1 (for all i): the i^th subspace is one-dimensional. If ds is not given (ds=None), the vector of ones [ds = ones(y.shape[1],dtype='int')] is emulated inside the function. Returns ------- a : float Estimated upper tail dependence. References ---------- Friedrich Schmid and Rafael Schmidt. Multivariate conditional versions of Spearman's rho and related measures of tail dependence. Journal of Multivariate Analysis, 98:1123-1140, 2007. C. Spearman. The proof and measurement of association between two things. The American Journal of Psychology, 15:72-101, 1904. Examples -------- a = co.estimation(y,ds)
https://github.com/gdikov/adversarial-variational-bayes/blob/ebd692c70349f34bcb3a2086269bd814cafce96f/third_party/ite/cost/meta_a.py#L156-L202
from numpy import sqrt, floor, ones from ite.cost.x_initialization import InitX from ite.cost.x_verification import VerOneDSubspaces, VerCompSubspaceDims from ite.cost.x_factory import co_factory class MASpearmanLT(InitX, VerOneDSubspaces, VerCompSubspaceDims): def __init__(self, mult=True, spearman_cond_lt_co_name='BASpearmanCondLT', spearman_cond_lt_co_pars=None): spearman_cond_lt_co_pars = spearman_cond_lt_co_pars or {} super().__init__(mult=mult) spearman_cond_lt_co_pars['mult'] = mult self.spearman_cond_lt_co = co_factory(spearman_cond_lt_co_name, **spearman_cond_lt_co_pars) def estimation(self, y, ds=None): if ds is None: ds = ones(y.shape[1], dtype='int') self.verification_compatible_subspace_dimensions(y, ds) self.verification_one_dimensional_subspaces(ds) num_of_samples = y.shape[0] k = int(floor(sqrt(num_of_samples))) self.spearman_cond_lt_co.p = k / num_of_samples a = self.spearman_cond_lt_co.estimation(y, ds) return a class MASpearmanUT(InitX, VerOneDSubspaces, VerCompSubspaceDims): def __init__(self, mult=True, spearman_cond_ut_co_name='BASpearmanCondUT', spearman_cond_ut_co_pars=None): super().__init__(mult=mult) spearman_cond_ut_co_pars = spearman_cond_ut_co_pars or {} spearman_cond_ut_co_pars['mult'] = mult self.spearman_cond_ut_co = co_factory(spearman_cond_ut_co_name, **spearman_cond_ut_co_pars)
MIT License
mozilla/make.mozilla.org
vendor-local/lib/python/south/orm.py
_FakeORM.eval_in_context
python
def eval_in_context(self, code, app, extra_imports={}): fake_locals = dict(inspect.getmodule(self.cls).__dict__) for key, value in fake_locals.items(): if isinstance(value, type) and issubclass(value, models.Model) and hasattr(value, "_meta"): del fake_locals[key] fake_locals.update(dict([ (name.split(".")[-1], model) for name, model in self.models.items() ])) fake_locals.update(dict([ (name.split(".")[-1], model) for name, model in self.models.items() if name.split(".")[0] == app ])) fake_locals['orm'] = self fake_locals['_'] = lambda x: x fake_locals['datetime'] = datetime_utils for name, value in extra_imports.items(): parts = value.split(".") try: obj = fake_locals[parts[0]] for part in parts[1:]: obj = getattr(obj, part) except (KeyError, AttributeError): pass else: fake_locals[name] = obj continue try: fake_locals[name] = ask_for_it_by_name(value) except ImportError: if name == "SouthFieldClass": raise ValueError("Cannot import the required field '%s'" % value) else: print "WARNING: Cannot import '%s'" % value fake_locals = ModelsLocals(fake_locals) return eval(code, globals(), fake_locals)
Evaluates the given code in the context of the migration file.
https://github.com/mozilla/make.mozilla.org/blob/98b87c517b463a5bae09f29284b1dabca97bb376/vendor-local/lib/python/south/orm.py#L177-L235
import inspect from django.db import models from django.db.models.loading import cache from django.core.exceptions import ImproperlyConfigured from south.db import db from south.utils import ask_for_it_by_name, datetime_utils from south.hacks import hacks from south.exceptions import UnfreezeMeLater, ORMBaseNotIncluded, ImpossibleORMUnfreeze class ModelsLocals(object): def __init__(self, data): self.data = data def __getitem__(self, key): try: return self.data[key] except KeyError: return self.data[key.lower()] _orm_cache = {} def FakeORM(*args): if not args in _orm_cache: _orm_cache[args] = _FakeORM(*args) return _orm_cache[args] class LazyFakeORM(object): def __init__(self, *args): self._args = args self.orm = None def __get__(self, obj, type=None): if not self.orm: self.orm = FakeORM(*self._args) return self.orm class _FakeORM(object): def __init__(self, cls, app): self.default_app = app self.cls = cls self.models = {} try: self.models_source = cls.models except AttributeError: return hacks.clear_app_cache() model_names = [] for name, data in self.models_source.items(): if "Meta" not in data: data['Meta'] = {} try: app_label, model_name = name.split(".", 1) except ValueError: app_label = self.default_app model_name = name if "object_name" in data['Meta']: model_name = data['Meta']['object_name'] del data['Meta']['object_name'] name = "%s.%s" % (app_label, model_name) self.models[name.lower()] = name model_names.append((name.lower(), app_label, model_name, data)) last_size = None while model_names: if len(model_names) == last_size: raise ImpossibleORMUnfreeze() last_size = len(model_names) postponed_model_names = [] for name, app_label, model_name, data in model_names: try: self.models[name] = self.make_model(app_label, model_name, data) except UnfreezeMeLater: postponed_model_names.append((name, app_label, model_name, data)) model_names = postponed_model_names self.retry_failed_fields() for model in self.models.values(): model._meta.get_all_field_names() hacks.unclear_app_cache() def __iter__(self): return iter(self.models.values()) def __getattr__(self, key): fullname = (self.default_app+"."+key).lower() try: return self.models[fullname] except KeyError: raise AttributeError("The model '%s' from the app '%s' is not available in this migration. (Did you use orm.ModelName, not orm['app.ModelName']?)" % (key, self.default_app)) def __getitem__(self, key): if ":" in key: key, fname = key.split(":") else: fname = None key = key.lower() try: model = self.models[key] except KeyError: try: app, model = key.split(".", 1) except ValueError: raise KeyError("The model '%s' is not in appname.modelname format." % key) else: raise KeyError("The model '%s' from the app '%s' is not available in this migration." % (model, app)) if fname: return model._meta.get_field_by_name(fname)[0] else: return model
BSD 3-Clause New or Revised License
andrewsayre/pysmartthings
pysmartthings/device.py
Device.device_type_id
python
def device_type_id(self) -> str: return self._device_type_id
Get the SmartThings device type handler id.
https://github.com/andrewsayre/pysmartthings/blob/cc9538f2bfef09055891e7bb432a20a8d918d524/pysmartthings/device.py#L173-L175
from collections import defaultdict, namedtuple import colorsys import re from typing import Any, Dict, Mapping, Optional, Sequence, Tuple from .api import Api from .capability import ATTRIBUTE_OFF_VALUES, ATTRIBUTE_ON_VALUES, Attribute, Capability from .entity import Entity DEVICE_TYPE_OCF = "OCF" DEVICE_TYPE_DTH = "DTH" DEVICE_TYPE_UNKNOWN = "UNKNOWN" DEVICE_TYPE_ENDPOINT_APP = "ENDPOINT_APP" DEVICE_TYPE_VIPER = "VIPER" COLOR_HEX_MATCHER = re.compile("^#[A-Fa-f0-9]{6}$") Status = namedtuple("status", "value unit data") STATUS_NONE = Status(None, None, None) def hs_to_hex(hue: float, saturation: float) -> str: rgb = colorsys.hsv_to_rgb(hue / 100, saturation / 100, 100) return "#{:02x}{:02x}{:02x}".format( round(rgb[0]), round(rgb[1]), round(rgb[2]) ).upper() def hex_to_hs(color_hex: str) -> (int, int): color_hex = color_hex.lstrip("#") rgb = [ int(color_hex[i : i + len(color_hex) // 3], 16) / 255.0 for i in range(0, len(color_hex), len(color_hex) // 3) ] hsv = colorsys.rgb_to_hsv(rgb[0], rgb[1], rgb[2]) return round(hsv[0] * 100, 3), round(hsv[1] * 100, 3) def bool_to_value(attribute: str, value: bool) -> str: return ATTRIBUTE_ON_VALUES[attribute] if value else ATTRIBUTE_OFF_VALUES[attribute] class Command: close = "close" execute = "execute" lock = "lock" off = "off" open = "open" on = "on" override_drlc_action = "overrideDrlcAction" preset_position = "presetPosition" request_drlc_action = "requestDrlcAction" set_air_flow_direction = "setAirFlowDirection" set_air_conditioner_mode = "setAirConditionerMode" set_color = "setColor" set_color_temperature = "setColorTemperature" set_cooling_setpoint = "setCoolingSetpoint" set_fan_mode = "setFanMode" set_fan_speed = "setFanSpeed" set_heating_setpoint = "setHeatingSetpoint" set_hue = "setHue" set_level = "setLevel" set_saturation = "setSaturation" set_thermostat_fan_mode = "setThermostatFanMode" set_thermostat_mode = "setThermostatMode" unlock = "unlock" mute = "mute" unmute = "unmute" set_volume = "setVolume" volume_up = "volumeUp" volume_down = "volumeDown" play = "play" pause = "pause" stop = "stop" fast_forward = "fastForward" rewind = "rewind" set_input_source = "setInputSource" set_playback_shuffle = "setPlaybackShuffle" set_playback_repeat_mode = "setPlaybackRepeatMode" set_tv_channel = "setTvChannel" channel_up = "channelUp" channel_down = "channelDown" class Device: def __init__(self): self._device_id = None self._name = None self._label = None self._location_id = None self._room_id = None self._type = DEVICE_TYPE_UNKNOWN self._device_type_id = None self._device_type_name = None self._device_type_network = None self._components = dict() self._capabilities = [] def apply_data(self, data: dict): self._device_id = data.get("deviceId") self._name = data.get("name") self._label = data.get("label") self._location_id = data.get("locationId") self._room_id = data.get("roomId") self._type = data.get("type") self._components.clear() self._capabilities.clear() components = data.get("components") if components: for component in components: capabilities = [c["id"] for c in component["capabilities"]] component_id = component["id"] if component_id == "main": self._capabilities.extend(capabilities) else: self._components[component_id] = capabilities if self._type == DEVICE_TYPE_DTH: dth = data.get("dth") if dth: self._device_type_id = dth.get("deviceTypeId") self._device_type_name = dth.get("deviceTypeName") self._device_type_network = dth.get("deviceNetworkType") def get_capability(self, *capabilities) -> Optional[str]: for capability in capabilities: if capability in self._capabilities: return capability return None @property def device_id(self) -> str: return self._device_id @property def name(self) -> str: return self._name @property def label(self) -> str: return self._label @property def location_id(self) -> str: return self._location_id @property def room_id(self): return self._room_id @property def type(self) -> str: return self._type @property
Apache License 2.0
reliaqualassociates/ramstk
src/ramstk/exim/imports.py
Import._do_insert_hardware
python
def _do_insert_hardware(self, row: pd.Series) -> RAMSTKHardwareRecord: _hardware = RAMSTKHardwareRecord() _map = self._dic_field_map["Hardware"] _hardware.revision_id = _get_input_value(_map, row, "Revision ID", 1) _hardware.hardware_id = _get_input_value(_map, row, "Hardware ID", 1) _hardware.alt_part_number = _get_input_value( _map, row, "Alternate Part Number", "" ) _hardware.cage_code = _get_input_value(_map, row, "CAGE Code", "") _hardware.category_id = _get_input_value(_map, row, "Category ID", 0) _hardware.comp_ref_des = _get_input_value(_map, row, "Composite Ref. Des.", "") _hardware.cost = _get_input_value(_map, row, "Cost", 0.0) _hardware.cost_type_id = _get_input_value(_map, row, "Cost Type", 0) _hardware.description = _get_input_value(_map, row, "Description", "") _hardware.duty_cycle = _get_input_value(_map, row, "Duty Cycle", 100.0) _hardware.figure_number = _get_input_value(_map, row, "Figure Number", "") _hardware.lcn = _get_input_value(_map, row, "LCN", "") _hardware.level = _get_input_value(_map, row, "Level", 0) _hardware.manufacturer_id = _get_input_value(_map, row, "Manufacturer", 0) _hardware.mission_time = _get_input_value(_map, row, "Mission Time", 24.0) _hardware.name = _get_input_value(_map, row, "Name", "") _hardware.nsn = _get_input_value(_map, row, "NSN", "") _hardware.page_number = _get_input_value(_map, row, "Page Number", "") _hardware.parent_id = _get_input_value(_map, row, "Parent Assembly", 1) _hardware.part = _get_input_value(_map, row, "Part", 0) _hardware.part_number = _get_input_value(_map, row, "Part Number", "") _hardware.quantity = _get_input_value(_map, row, "Quantity", 1) _hardware.ref_des = _get_input_value(_map, row, "Reference Designator", "") _hardware.remarks = _get_input_value(_map, row, "Remarks", "") _hardware.repairable = _get_input_value(_map, row, "Repairable", 1) _hardware.specification_number = _get_input_value( _map, row, "Specification", "" ) _hardware.subcategory_id = _get_input_value(_map, row, "Subcategory ID", 0) _hardware.tagged_part = _get_input_value(_map, row, "Tagged Part", 0) _hardware.year_of_manufacture = _get_input_value( _map, row, "Year of Manufacture", 1900 ) return _hardware
Insert a new Hardware entity to the RAMSTK db. :param row: the row from the pandas DataFrame containing the input data. :return: _entity :rtype: :class:`ramstk.models.programdb.ramstkhardware.RAMSTKHardware`
https://github.com/reliaqualassociates/ramstk/blob/ffec5a107424914cf0026c6dfe26369c221f79f9/src/ramstk/exim/imports.py#L667-L714
import inspect import math from collections import OrderedDict from datetime import date from typing import Any, Dict import numpy as np import pandas as pd from dateutil import parser from pubsub import pub from ramstk.db.base import BaseDatabase from ramstk.exceptions import DataAccessError from ramstk.models import ( RAMSTKAllocationRecord, RAMSTKDesignElectricRecord, RAMSTKDesignMechanicRecord, RAMSTKFunctionRecord, RAMSTKHardwareRecord, RAMSTKMilHdbk217FRecord, RAMSTKNSWCRecord, RAMSTKReliabilityRecord, RAMSTKRequirementRecord, RAMSTKSimilarItemRecord, RAMSTKValidationRecord, ) def _do_replace_nan(value: Any, default: Any) -> Any: _value = value try: if math.isnan(value): _value = default except TypeError: pass if value is np.nan: _value = default return _value def _get_input_value( mapper: Dict[str, Any], df_row: pd.Series, field: str, default: Any ) -> Any: try: _value = df_row.at[mapper[field]] except KeyError: _value = default _value = _do_replace_nan(_value, default) if default == date.today(): _value = parser.parse(_value) return _value class Import: _dic_field_map = { "Function": OrderedDict( [ ("Revision ID", ""), ("Function ID", ""), ("Level", ""), ("Function Code", ""), ("Function Name", ""), ("Parent", ""), ("Remarks", ""), ("Safety Critical", ""), ("Type", ""), ] ), "Requirement": OrderedDict( [ ("Revision ID", ""), ("Requirement ID", ""), ("Derived?", ""), ("Requirement", ""), ("Figure Number", ""), ("Owner", ""), ("Page Number", ""), ("Parent ID", ""), ("Priority", ""), ("Requirement Code", ""), ("Specification", ""), ("Requirement Type", ""), ("Validated?", ""), ("Validated Date", ""), ] ), "Hardware": OrderedDict( [ ("Revision ID", ""), ("Hardware ID", ""), ("Alternate Part Number", ""), ("CAGE Code", ""), ("Category ID", ""), ("Composite Ref. Des.", ""), ("Cost", ""), ("Cost Type", ""), ("Description", ""), ("Duty Cycle", ""), ("Figure Number", ""), ("LCN", ""), ("Level", ""), ("Manufacturer", ""), ("Mission Time", ""), ("Name", ""), ("NSN", ""), ("Page Number", ""), ("Parent Assembly", ""), ("Part", ""), ("Part Number", ""), ("Quantity", ""), ("Reference Designator", ""), ("Remarks", ""), ("Repairable", ""), ("Specification", ""), ("Subcategory ID", ""), ("Tagged Part", ""), ("Year of Manufacture", ""), ] ), "Design Electric": OrderedDict( [ ("Hardware ID", ""), ("Application ID", ""), ("Area", ""), ("Capacitance", ""), ("Configuration ID", ""), ("Construction ID", ""), ("Contact Form ID", ""), ("Contact Gauge", ""), ("Contact Rating ID", ""), ("Current Operating", ""), ("Current Rated", ""), ("Current Ratio", ""), ("Environment Active ID", ""), ("Environment Dormant ID", ""), ("Family ID", ""), ("Feature Size", ""), ("Frequency Operating", ""), ("Insert ID", ""), ("Insulation ID", ""), ("Manufacturing ID", ""), ("Matching ID", ""), ("N Active Pins", ""), ("N Circuit Planes", ""), ("N Cycles", ""), ("N Elements", ""), ("N Hand Soldered", ""), ("N Wave Soldered", ""), ("Operating Life", ""), ("Overstress", ""), ("Package ID", ""), ("Power Operating", ""), ("Power Rated", ""), ("Power Ratio", ""), ("Reason", ""), ("Resistance", ""), ("Specification ID", ""), ("Technology ID", ""), ("Temperature, Active", ""), ("Temperature, Case", ""), ("Temperature, Dormant", ""), ("Temperature, Hot Spot", ""), ("Temperature, Junction", ""), ("Temperature, Knee", ""), ("Temperature, Rated Max", ""), ("Temperature, Rated Min", ""), ("Temperature Rise", ""), ("Theta JC", ""), ("Type ID", ""), ("Voltage, AC Operating", ""), ("Voltage, DC Operating", ""), ("Voltage ESD", ""), ("Voltage, Rated", ""), ("Voltage Ratio", ""), ("Weight", ""), ("Years in Production", ""), ] ), "Design Mechanic": OrderedDict( [ ("Hardware ID", ""), ("Altitude, Operating", ""), ("Application ID", ""), ("Balance ID", ""), ("Clearance", ""), ("Casing ID", ""), ("Contact Pressure", ""), ("Deflection", ""), ("Diameter, Coil", ""), ("Diameter, Inner", ""), ("Diameter, Outer", ""), ("Diameter, Wire", ""), ("Filter Size", ""), ("Flow, Design", ""), ("Flow, Operating", ""), ("Frequency, Operating", ""), ("Friction", ""), ("Impact ID", ""), ("Allowable Leakage", ""), ("Length", ""), ("Length, Compressed", ""), ("Length, Relaxed", ""), ("Design Load", ""), ("Load ID", ""), ("Operating Load", ""), ("Lubrication ID", ""), ("Manufacturing ID", ""), ("Material ID", ""), ("Meyer Hardness", ""), ("Misalignment Angle", ""), ("N Ten", ""), ("N Cycles", ""), ("N Elements", ""), ("Offset", ""), ("Particle Size", ""), ("Contact Pressure", ""), ("Differential Pressure", ""), ("Downstream Pressure", ""), ("Rated Pressure", ""), ("Upstream Pressure", ""), ("Design RPM", ""), ("Operating RPM", ""), ("Service ID", ""), ("Spring Index", ""), ("Surface Finish", ""), ("Technology ID", ""), ("Thickness", ""), ("Torque ID", ""), ("Type ID", ""), ("Design Viscosity", ""), ("Dynamic Viscosity", ""), ("% Water", ""), ("Minimum Width", ""), ] ), "Reliability": OrderedDict( [ ("Hardware ID", ""), ("Additive Adjustment Factor", ""), ("Failure Distribution ID", ""), ("Failure Rate Method ID", ""), ("Failure Rate Model", ""), ("Specified Failure Rate", ""), ("Failure Rate Type ID", ""), ("Location Parameter", ""), ("Specified MTBF", ""), ("Multiplicative Adjustment Factor", ""), ("Quality ID", ""), ("Reliability Goal", ""), ("Reliability Goal Measure ID", ""), ("Scale Parameter", ""), ("Shape Parameter", ""), ("Survival Analysis ID", ""), ] ), "Validation": OrderedDict( [ ("Revision ID", ""), ("Validation ID", ""), ("Acceptable Maximum", ""), ("Acceptable Mean", ""), ("Acceptable Minimum", ""), ("Acceptable Variance", ""), ("s-Confidence", ""), ("Average Task Cost", ""), ("Maximum Task Cost", ""), ("Minimum Task Cost", ""), ("Start Date", ""), ("End Date", ""), ("Task Description", ""), ("Unit of Measure", ""), ("Name", ""), ("Task Status", ""), ("Task Type", ""), ("Task Specification", ""), ("Average Task Time", ""), ("Maximum Task Time", ""), ("Minimum Task Time", ""), ] ), } def __init__(self) -> None: self._dao: BaseDatabase = BaseDatabase() self._df_input_data: pd.DataFrame = pd.DataFrame({}) pub.subscribe(self._do_connect, "succeed_connect_program_database") pub.subscribe(self._do_map_to_field, "request_map_to_field") pub.subscribe(self._do_read_db_fields, "request_db_fields") pub.subscribe(self._do_read_file, "request_read_import_file") pub.subscribe(self._do_import, "request_import") def _do_connect(self, dao: BaseDatabase) -> None: self._dao = dao def _do_import(self, module: str) -> None: _entities = [] for __, _row in self._df_input_data.iterrows(): if module == "Function": _entity = self._do_insert_function(_row) _entities.append(_entity) elif module == "Requirement": _entity = self._do_insert_requirement(_row) _entities.append(_entity) elif module == "Hardware": _entity = self._do_insert_hardware(_row) _entities.append(_entity) _entity = self._do_insert_allocation(_row) _entities.append(_entity) _entity = self._do_insert_similar_item(_row) _entities.append(_entity) _entity = self._do_insert_design_electric(_row) _entities.append(_entity) _entity = self._do_insert_mil_hdbk_f(_row) _entities.append(_entity) _entity = self._do_insert_design_mechanic(_row) _entities.append(_entity) _entity = self._do_insert_nswc(_row) _entities.append(_entity) _entity = self._do_insert_reliability(_row) _entities.append(_entity) elif module == "Validation": _entity = self._do_insert_validation(_row) _entities.append(_entity) try: self._dao.do_insert_many(_entities) pub.sendMessage( "succeed_import_module", module=module, ) except (AttributeError, DataAccessError): _method_name: str = inspect.currentframe().f_code.co_name _error_msg: str = ( "{1}: There was a problem importing {0} records. " "This is usually caused by key violations; check the " "ID and/or parent ID fields in the import file." ).format(module, _method_name) pub.sendMessage( "fail_import_module", error_message=_error_msg, ) def _do_insert_allocation(self, row: pd.Series) -> RAMSTKAllocationRecord: _allocation = RAMSTKAllocationRecord() _map = self._dic_field_map["Hardware"] _allocation.revision_id = _get_input_value(_map, row, "Revision ID", 1) _allocation.hardware_id = _get_input_value(_map, row, "Hardware ID", 1) _allocation.parent_id = _get_input_value(_map, row, "Parent Assembly", 1) return _allocation def _do_insert_design_electric(self, row: pd.Series) -> RAMSTKDesignElectricRecord: _design_electric = RAMSTKDesignElectricRecord() _map = self._dic_field_map["Hardware"] _design_electric.hardware_id = _get_input_value(_map, row, "Hardware ID", 1) _map = self._dic_field_map["Design Electric"] _design_electric.set_attributes( { "application_id": _get_input_value(_map, row, "Application ID", 0), "area": _get_input_value(_map, row, "Area", 0.0), "capacitance": _get_input_value(_map, row, "Capacitance", 0.000001), "configuration_id": _get_input_value(_map, row, "Configuration ID", 0), "construction_id": _get_input_value(_map, row, "Construction ID", 0), "contact_form_id": _get_input_value(_map, row, "Contact Form ID", 0), "contact_gauge": _get_input_value(_map, row, "Contact Gauge", 20), "contact_rating_id": _get_input_value( _map, row, "Contact Rating ID", 0 ), "current_operating": _get_input_value( _map, row, "Current Operating", 0.0 ), "current_rated": _get_input_value(_map, row, "Current Rated", 0.0), "current_ratio": _get_input_value(_map, row, "Current Ratio", 0.0), "environment_active_id": _get_input_value( _map, row, "Environment Active ID", 0 ), "environment_dormant_id": _get_input_value( _map, row, "Environment Dormant ID", 0 ), "family_id": _get_input_value(_map, row, "Family ID", 0), "feature_size": _get_input_value(_map, row, "Feature Size", 1.0), "frequency_operating": _get_input_value( _map, row, "Frequency Operating", 0.0 ), "insert_id": _get_input_value(_map, row, "Insert ID", 0), "insulation_id": _get_input_value(_map, row, "Insulation ID", 0), "manufacturing_id": _get_input_value(_map, row, "Manufacturing ID", 0), "matching_id": _get_input_value(_map, row, "Matching ID", 0), "n_active_pins": _get_input_value(_map, row, "N Active Pins", 0), "n_circuit_planes": _get_input_value(_map, row, "N Circuit Planes", 1), "n_cycles": _get_input_value(_map, row, "N Cycles", 0), "n_elements": _get_input_value(_map, row, "N Elements", 0), "n_hand_soldered": _get_input_value(_map, row, "N Hand Soldered", 0), "n_wave_soldered": _get_input_value(_map, row, "N Wave Soldered", 0), "operating_life": _get_input_value(_map, row, "Operating Life", 0.0), "overstress": _get_input_value(_map, row, "Overstress", 0), "package_id": _get_input_value(_map, row, "Package ID", 0), "power_operating": _get_input_value(_map, row, "Power Operating", 0.0), "power_rated": _get_input_value(_map, row, "Power Rated", 0.0), "power_ratio": _get_input_value(_map, row, "Power Ratio", 0.0), "reason": _get_input_value(_map, row, "Reason", ""), "resistance": _get_input_value(_map, row, "Resistance", 0.0), "specification_id": _get_input_value(_map, row, "Specification ID", 0), "technology_id": _get_input_value(_map, row, "Technology ID", 0), "temperature_active": _get_input_value( _map, row, "Temperature, Active", 30.0 ), "temperature_case": _get_input_value( _map, row, "Temperature, Case", 0.0 ), "temperature_dormant": _get_input_value( _map, row, "Temperature, Dormant", 25.0 ), "temperature_hot_spot": _get_input_value( _map, row, "Temperature, Hot Spot", 0.0 ), "temperature_junction": _get_input_value( _map, row, "Temperature, Junction", 0.0 ), "temperature_knee": _get_input_value( _map, row, "Temperature, Knee", 25.0 ), "temperature_rated_max": _get_input_value( _map, row, "Temperature, Rated Max", 0.0 ), "temperature_rated_min": _get_input_value( _map, row, "Temperature, Rated Min", 0.0 ), "temperature_rise": _get_input_value( _map, row, "Temperature Rise", 0.0 ), "theta_jc": _get_input_value(_map, row, "Theta JC", 0.0), "type_id": _get_input_value(_map, row, "Type ID", 0), "voltage_ac_operating": _get_input_value( _map, row, "Voltage, AC Operating", 0.0 ), "voltage_dc_operating": _get_input_value( _map, row, "Voltage, DC Operating", 0.0 ), "voltage_esd": _get_input_value(_map, row, "Voltage ESD", 0.0), "voltage_rated": _get_input_value(_map, row, "Voltage, Rated", 0.0), "voltage_ratio": _get_input_value(_map, row, "Voltage Ratio", 0.0), "weight": _get_input_value(_map, row, "Weight", 1.0), "years_in_production": _get_input_value( _map, row, "Years in Production", 2 ), } ) return _design_electric def _do_insert_design_mechanic(self, row: pd.Series) -> RAMSTKDesignMechanicRecord: _design_mechanic = RAMSTKDesignMechanicRecord() _map = self._dic_field_map["Hardware"] _design_mechanic.hardware_id = _get_input_value(_map, row, "Hardware ID", 1) _map = self._dic_field_map["Design Mechanic"] _design_mechanic.set_attributes( { "altitude_operating": _get_input_value( _map, row, "Altitude, Operating", 0.0 ), "application_id": _get_input_value(_map, row, "Application ID", 0), "balance_id": _get_input_value(_map, row, "Balance ID", 0), "clearance": _get_input_value(_map, row, "Clearance", 0.0), "casing_id": _get_input_value(_map, row, "Casing ID", 0), "contact_pressure": _get_input_value( _map, row, "Contact Pressure", 0.0 ), "deflection": _get_input_value(_map, row, "Deflection", 0.0), "diameter_coil": _get_input_value(_map, row, "Diameter, Coil", 0.0), "diameter_inner": _get_input_value(_map, row, "Diameter, Inner", 0.0), "diameter_outer": _get_input_value(_map, row, "Diameter, Outer", 0.0), "diameter_wire": _get_input_value(_map, row, "Diameter, Wire", 0.0), "filter_size": _get_input_value(_map, row, "Filter Size", 0.0), "flow_design": _get_input_value(_map, row, "Flow, Design", 0.0), "flow_operating": _get_input_value(_map, row, "Flow, Operating", 0.0), "frequency_operating": _get_input_value( _map, row, "Frequency, Operating", 0.0 ), "friction": _get_input_value(_map, row, "Friction", 0.0), "impact_id": _get_input_value(_map, row, "Impact ID", 0), "leakage_allowable": _get_input_value( _map, row, "Allowable Leakage", 0.0 ), "length": _get_input_value(_map, row, "Length", 0.0), "length_compressed": _get_input_value( _map, row, "Length, Compressed", 0.0 ), "length_relaxed": _get_input_value(_map, row, "Length, Relaxed", 0.0), "load_design": _get_input_value(_map, row, "Design Load", 0.0), "load_id": _get_input_value(_map, row, "Load ID", 0), "load_operating": _get_input_value(_map, row, "Operating Load", 0.0), "lubrication_id": _get_input_value(_map, row, "Lubrication ID", 0), "manufacturing_id": _get_input_value(_map, row, "Manufacturing ID", 0), "material_id": _get_input_value(_map, row, "Material ID", 0), "meyer_hardness": _get_input_value(_map, row, "Meyer Hardness", 0.0), "misalignment_angle": _get_input_value( _map, row, "Misalignment Angle", 0.0 ), "n_ten": _get_input_value(_map, row, "N Ten", 0), "n_cycles": _get_input_value(_map, row, "N Cycles", 0.0), "n_elements": _get_input_value(_map, row, "N Elements", 0), "offset": _get_input_value(_map, row, "Offset", 0.0), "particle_size": _get_input_value(_map, row, "Particle Size", 0.0), "pressure_contact": _get_input_value( _map, row, "Contact Pressure", 0.0 ), "pressure_delta": _get_input_value( _map, row, "Differential Pressure", 0.0 ), "pressure_downstream": _get_input_value( _map, row, "Downstream Pressure", 0.0 ), "pressure_rated": _get_input_value(_map, row, "Rated Pressure", 0.0), "pressure_upstream": _get_input_value( _map, row, "Upstream Pressure", 0.0 ), "rpm_design": _get_input_value(_map, row, "Design RPM", 0.0), "rpm_operating": _get_input_value(_map, row, "Operating RPM", 0.0), "service_id": _get_input_value(_map, row, "Service ID", 0), "spring_index": _get_input_value(_map, row, "Spring Index", 0), "surface_finish": _get_input_value(_map, row, "Surface Finish", 0.0), "technology_id": _get_input_value(_map, row, "Technology ID", 0), "thickness": _get_input_value(_map, row, "Thickness", 0.0), "torque_id": _get_input_value(_map, row, "Torque ID", 0), "type_id": _get_input_value(_map, row, "Type ID", 0), "viscosity_design": _get_input_value( _map, row, "Design Viscosity", 0.0 ), "viscosity_dynamic": _get_input_value( _map, row, "Dynamic Viscosity", 0.0 ), "water_per_cent": _get_input_value(_map, row, "% Water", 0.0), "width_minimum": _get_input_value(_map, row, "Minimum Width", 0.0), } ) return _design_mechanic def _do_insert_function(self, row: pd.Series) -> RAMSTKFunctionRecord: _function = RAMSTKFunctionRecord() _map = self._dic_field_map["Function"] _function.revision_id = _get_input_value(_map, row, "Revision ID", 1) _function.function_id = _get_input_value(_map, row, "Function ID", 1) _function.function_code = _get_input_value(_map, row, "Function Code", "") _function.level = _get_input_value(_map, row, "Level", 0) _function.name = _get_input_value(_map, row, "Function Name", "") _function.parent_id = _get_input_value(_map, row, "Parent", 1) _function.remarks = _get_input_value(_map, row, "Remarks", "") _function.safety_critical = _get_input_value(_map, row, "Safety Critical", 0) _function.type_id = _get_input_value(_map, row, "Type", "") return _function
BSD 3-Clause New or Revised License
kchen92/graphnav
src/semnav/learning/evaluator.py
Evaluator.get_seq_path
python
def get_seq_path(frame_path): return os.path.dirname(frame_path)
Return the sequence name as a String for a given (full) frame path.
https://github.com/kchen92/graphnav/blob/7c2159b876ac377e6ff075ae6c7201036144f4f2/src/semnav/learning/evaluator.py#L127-L130
from __future__ import print_function from __future__ import division import os import torch from semnav.config import get_config from semnav.dataset import get_split_datasets from semnav.dataset.dataset_utils import get_frame_idx from semnav.dataset.dataset_visualizer import DatasetVisualizer from semnav.dataset.graph_net_frame_dataset import GraphNetFrameDataset from semnav.lib.categories import BehaviorCategory from semnav.learning.behavior_net.behavior_cnn import BehaviorCNN from semnav.learning.behavior_net.behavior_rnn import BehaviorRNN from semnav.learning.graph_net.graph_net import GraphNet from semnav.learning.phase_net.phase_rnn import PhaseRNN from semnav.learning import decode_batch, get_net from torch.utils.data import DataLoader class Evaluator(object): def __init__(self, cfg=None): if cfg is None: self.cfg = get_config() else: self.cfg = cfg self.hidden = None self.depth_stack = None if self.cfg.visualize_results is True: self.dataset_visualizer = DatasetVisualizer() @staticmethod def load_eval_datasets(cfg): cur_dataset_type = cfg.dataset_type if cfg.dataset_type == 'graph_net': cfg.dataset_type = 'single_frame_graph_net' else: cfg.dataset_type = 'frame_by_frame' train_set, val_set, test_set = get_split_datasets(cfg.dataset) cfg.dataset_type = cur_dataset_type return train_set, val_set, test_set @staticmethod def get_evaluation_batch_size(): return 1 def predict(self, net, depth, is_new_episode): net.eval() if is_new_episode is True: print('New episode!') if isinstance(net, GraphNet) and isinstance(depth, dict): depth, graph_net_input = (depth['depth'], depth['graph_net_input']) if ((isinstance(net, BehaviorCNN) and (self.cfg.dataset_type == 'temporal')) or (isinstance(net, GraphNet) and (self.cfg.dataset_type == 'graph_net'))): if self.cfg.use_semantic_class is not None: n_channels_per_frame = 2 else: n_channels_per_frame = 1 if is_new_episode: self.depth_stack = depth.repeat(1, self.cfg.n_frames_per_sample, 1, 1) else: self.depth_stack = torch.cat([self.depth_stack[:, n_channels_per_frame:, :, :], depth], dim=1) depth = self.depth_stack if (isinstance(net, BehaviorRNN) or isinstance(net, PhaseRNN)) and is_new_episode: print('Resetting hidden state') batch_size = depth.size(0) hidden = net.initial_hidden(batch_size=batch_size) if isinstance(hidden, list): self.hidden = [x.to(self.cfg.device) for x in hidden] else: self.hidden = hidden.to(self.cfg.device) if isinstance(net, BehaviorCNN): output = net(depth) elif isinstance(net, GraphNet): output = net(depth, graph_net_input) elif isinstance(net, BehaviorRNN) or isinstance(net, PhaseRNN): output, self.hidden = net(depth, self.hidden) else: raise ValueError('Validation on this network is not supported.') return output @staticmethod
MIT License
probml/pyprobml
scripts/vb_logreg.py
EBLogisticRegression._get_sigma
python
def _get_sigma(self,X): return np.asarray([ np.sum(X**2*s,axis = 1) for s in self.sigma_])
Compute variance of predictive distribution
https://github.com/probml/pyprobml/blob/9d5a94449ee76c0ca37ca953c502864315ae7724/scripts/vb_logreg.py#L257-L259
import superimport import numpy as np from scipy.optimize import fmin_l_bfgs_b from sklearn.utils.optimize import newton_cg from scipy.special import expit, exprel from scipy.linalg import eigvalsh from sklearn.utils.multiclass import check_classification_targets from sklearn.linear_model.base import LinearClassifierMixin, BaseEstimator from sklearn.utils import check_X_y from scipy.linalg import solve_triangular from sklearn.linear_model.logistic import ( _logistic_loss_and_grad, _logistic_loss, _logistic_grad_hess,) class BayesianLogisticRegression(LinearClassifierMixin, BaseEstimator): def __init__(self, n_iter, tol, fit_intercept, verbose): self.n_iter = n_iter self.tol = tol self.fit_intercept = fit_intercept self.verbose = verbose def fit(self,X,y): X,y = check_X_y( X, y , dtype = np.float64) check_classification_targets(y) self.classes_ = np.unique(y) n_classes = len(self.classes_) n_samples, n_features = X.shape if self.fit_intercept: X = self._add_intercept(X) if n_classes < 2: raise ValueError("Need samples of at least 2 classes") if n_classes > 2: self.coef_, self.sigma_ = [0]*n_classes,[0]*n_classes self.intercept_ = [0]*n_classes else: self.coef_, self.sigma_, self.intercept_ = [0],[0],[0] for i in range(len(self.coef_)): if n_classes == 2: pos_class = self.classes_[1] else: pos_class = self.classes_[i] mask = (y == pos_class) y_bin = np.ones(y.shape, dtype=np.float64) y_bin[~mask] = self._mask_val coef_, sigma_ = self._fit(X,y_bin) if self.fit_intercept: self.intercept_[i],self.coef_[i] = self._get_intercept(coef_) else: self.coef_[i] = coef_ self.sigma_[i] = sigma_ self.coef_ = np.asarray(self.coef_) return self def predict_proba(self,X): scores = self.decision_function(X) if self.fit_intercept: X = self._add_intercept(X) sigma = self._get_sigma(X) ks = 1. / ( 1. + np.pi*sigma / 8)**0.5 probs = expit(scores.T*ks).T if probs.shape[1] == 1: probs = np.hstack([1 - probs, probs]) else: probs /= np.reshape(np.sum(probs, axis = 1), (probs.shape[0],1)) return probs def _add_intercept(self,X): raise NotImplementedError def _get_intercept(self,coef): raise NotImplementedError def _get_sigma(self,X): raise NotImplementedError class EBLogisticRegression(BayesianLogisticRegression): def __init__(self, n_iter = 50, tol = 1e-3,solver = 'lbfgs_b',n_iter_solver = 15, tol_solver = 1e-3, fit_intercept = True, alpha = 1e-6, verbose = False): super(EBLogisticRegression,self).__init__(n_iter, tol, fit_intercept, verbose) self.n_iter_solver = n_iter_solver self.tol_solver = tol_solver self.alpha = alpha if solver not in ['lbfgs_b','newton_cg']: raise ValueError(('Only "lbfgs_b" and "newton_cg" ' 'solvers are implemented')) self.solver = solver self._mask_val = -1. def _fit(self,X,y): alpha = self.alpha n_samples,n_features = X.shape w0 = np.zeros(n_features) for i in range(self.n_iter): alpha0 = alpha w, d = self._posterior(X, y, alpha, w0) mu_sq = np.sum(w**2) alpha = X.shape[1] / (mu_sq + np.sum(d)) delta_alpha = abs(alpha - alpha0) if delta_alpha < self.tol or i==self.n_iter-1: break coef_, sigma_ = self._posterior(X, y, alpha , w) self.alpha_ = alpha return coef_, sigma_ def _add_intercept(self,X): return np.hstack((X,np.ones([X.shape[0],1]))) def _get_intercept(self,coef): return coef[-1], coef[:-1]
MIT License
technige/py2neo
py2neo/__init__.py
ConnectionProfile.protocol
python
def protocol(self): return self.__protocol
The name of the underlying point-to-point protocol, derived from the URI scheme. This will either be ``'bolt'`` or ``'http'``, regardless of security and verification settings. If unspecified, and uninfluenced by environment variables, this will default to ``'bolt'``.
https://github.com/technige/py2neo/blob/603abd4d5f672bde039d4362cbec77d0d471f034/py2neo/__init__.py#L402-L409
from __future__ import absolute_import, print_function __all__ = [ "__author__", "__copyright__", "__email__", "__license__", "__package__", "__version__", "DEFAULT_PROTOCOL", "DEFAULT_SECURE", "DEFAULT_VERIFY", "DEFAULT_USER", "DEFAULT_PASSWORD", "DEFAULT_HOST", "DEFAULT_BOLT_PORT", "DEFAULT_HTTP_PORT", "DEFAULT_HTTPS_PORT", "ConnectionProfile", "ServiceProfile", ] from os import getenv from py2neo.addressing import Address from py2neo.compat import Mapping, string_types, urlsplit from py2neo.meta import get_metadata try: from py2neo.database import * from py2neo.errors import * from py2neo.matching import * from py2neo.data import * except ImportError: pass else: __all__ += database.__all__ __all__ += errors.__all__ __all__ += matching.__all__ __all__ += data.__all__ metadata = get_metadata() __author__ = metadata["author"] __copyright__ = "2011, {}".format(metadata["author"]) __email__ = metadata["author_email"] __license__ = metadata["license"] __package__ = metadata["name"] __version__ = metadata["version"] NEO4J_URI = getenv("NEO4J_URI") NEO4J_AUTH = getenv("NEO4J_AUTH") NEO4J_SECURE = getenv("NEO4J_SECURE") NEO4J_VERIFY = getenv("NEO4J_VERIFY") DEFAULT_PROTOCOL = "bolt" DEFAULT_SECURE = False DEFAULT_VERIFY = True DEFAULT_USER = "neo4j" DEFAULT_PASSWORD = "password" DEFAULT_HOST = "localhost" DEFAULT_BOLT_PORT = 7687 DEFAULT_HTTP_PORT = 7474 DEFAULT_HTTPS_PORT = 7473 class ConnectionProfile(Mapping): _keys = ("secure", "verify", "scheme", "user", "password", "address", "auth", "host", "port", "port_number", "protocol", "uri") _hash_keys = ("protocol", "secure", "verify", "user", "password", "address") def __init__(self, profile=None, **settings): self.__protocol = DEFAULT_PROTOCOL self.__secure = DEFAULT_SECURE self.__verify = DEFAULT_VERIFY self.__user = DEFAULT_USER self.__password = DEFAULT_PASSWORD self.__address = Address.parse("") self._apply_env_vars() if profile is None: pass elif isinstance(profile, string_types): self._apply_uri(profile) elif isinstance(profile, self.__class__): self._apply_settings(**{k: profile[k] for k in self._hash_keys}) elif isinstance(profile, Mapping): self._apply_settings(**profile) else: raise TypeError("Profile %r is neither a ConnectionProfile " "nor a string URI" % profile) self._apply_settings(**settings) if not self.address.port: addr = list(self.address) if self.protocol == "http": addr[1] = DEFAULT_HTTPS_PORT if self.secure else DEFAULT_HTTP_PORT else: addr[1] = DEFAULT_BOLT_PORT self.__address = Address(addr) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.uri) def __str__(self): return "«{}»".format(self.uri) def __getitem__(self, key): if key in self._keys: return getattr(self, key) else: raise KeyError(key) def __len__(self): return len(self._keys) def __iter__(self): return iter(self._keys) def _apply_env_vars(self): if NEO4J_URI: self._apply_uri(NEO4J_URI) if NEO4J_AUTH: self._apply_settings(auth=NEO4J_AUTH) if NEO4J_SECURE: self._apply_settings(secure=(NEO4J_SECURE == "1")) if NEO4J_VERIFY: self._apply_settings(verify=(NEO4J_VERIFY == "1")) def _apply_uri(self, uri): settings = {} parsed = urlsplit(uri) if parsed.scheme is not None: self._apply_scheme(parsed.scheme) if "@" in parsed.netloc: settings["address"] = parsed.netloc.partition("@")[-1] else: settings["address"] = parsed.netloc if parsed.username: settings["user"] = parsed.username if parsed.password: settings["password"] = parsed.password self._apply_settings(**settings) def _apply_scheme(self, scheme): if scheme == "https": protocol, ext = "http", "s" else: protocol, _, ext = scheme.partition("+") if ext == "": self._apply_settings(protocol=protocol, secure=False, verify=True) elif ext == "s": self._apply_settings(protocol=protocol, secure=True, verify=True) elif ext == "ssc": self._apply_settings(protocol=protocol, secure=True, verify=False) else: raise ValueError("Unknown scheme extension %r" % ext) def _apply_settings(self, uri=None, scheme=None, protocol=None, secure=None, verify=None, address=None, host=None, port=None, port_number=None, auth=None, user=None, password=None, **other): if uri: self._apply_uri(uri) if scheme: self._apply_scheme(scheme) if protocol: self._apply_protocol(protocol) if secure is not None: self.__secure = secure if verify is not None: self.__verify = verify if isinstance(address, tuple): self.__address = Address(address) elif address: self.__address = Address.parse(address) if host and port: self.__address = Address.parse("%s:%s" % (host, port)) elif host: self.__address = Address.parse("%s:%s" % (host, self.port)) elif port: self.__address = Address.parse("%s:%s" % (self.host, port)) if isinstance(auth, tuple): self.__user, self.__password = auth elif auth: self.__user, _, self.__password = auth.partition(":") if user: self.__user = user if password: self.__password = password if other: raise ValueError("The following settings are not supported: %r" % other) def _apply_protocol(self, protocol): if protocol not in ("bolt", "http"): raise ValueError("Unknown protocol %r" % protocol) self.__protocol = protocol def __hash__(self): values = tuple(getattr(self, key) for key in self._hash_keys) return hash(values) def __eq__(self, other): self_values = tuple(getattr(self, key) for key in self._hash_keys) try: other_values = tuple(getattr(other, key) for key in self._hash_keys) except AttributeError: return False else: return self_values == other_values @property def secure(self): return self.__secure @property def verify(self): return self.__verify @property def scheme(self): if self.secure and self.verify: return "https" if self.protocol == "http" else self.protocol + "+s" elif self.secure: return self.protocol + "+ssc" else: return self.protocol @property def user(self): return self.__user @property def password(self): return self.__password @property def address(self): return self.__address @property def auth(self): return self.user, self.password @property def host(self): return self.address.host @property def port(self): return self.address.port @property def port_number(self): return self.address.port_number @property
Apache License 2.0
lemonsaurus/blackbox
blackbox/utils/workflows.py
get_handlers_by_id
python
def get_handlers_by_id( id_: t.Union[str, list[str]], handlers: HandlerById[Handler] ) -> set[Handler]: if not isinstance(id_, (str, list)): raise TypeError ids = [id_] if isinstance(id_, str) else id_ match = set() for i in ids: match.update(handlers[i]) return match
Given ids and a mapping of id to handlers, return handlers matching the ids. `id_` can be a string or a list of strings corresponding to ids. Raises TypeError if id_ is not a string or a list. Raises KeyError if id_ does not correspond to a handler in `handlers`.
https://github.com/lemonsaurus/blackbox/blob/4d78910a7e0b995e641092f5ef1a2ec5ed40cc64/blackbox/utils/workflows.py#L66-L86
import dataclasses import typing as t from collections import defaultdict from itertools import chain from blackbox import exceptions from blackbox.handlers import BlackboxDatabase from blackbox.handlers import BlackboxNotifier from blackbox.handlers import BlackboxStorage from blackbox.handlers._base import BlackboxHandler Handler = t.TypeVar("Handler", bound=BlackboxHandler) HandlerById = t.Mapping[str, set[Handler]] @dataclasses.dataclass class Workflow: database: BlackboxDatabase storage_providers: set[BlackboxStorage] = dataclasses.field(default_factory=set) notifiers: set[BlackboxNotifier] = dataclasses.field(default_factory=set) components = (BlackboxDatabase, BlackboxNotifier, BlackboxStorage) HANDLER_MAPPING: dict[str, t.Type[BlackboxHandler]] = { handler.__name__.lower(): handler for handler in chain.from_iterable(component.__subclasses__() for component in components) } def get_configured_handlers(config: dict) -> dict: handler_dict = defaultdict(set) for handler_type, handler_config in config.items(): try: Handler = HANDLER_MAPPING[handler_type] except KeyError: raise exceptions.InvalidHandler(handler_type) for handler_id, handler_fields in handler_config.items(): handler_instance = Handler(**handler_fields, id=handler_id) handler_dict["all"].add(handler_instance) handler_dict[handler_type].add(handler_instance) handler_dict[handler_id].add(handler_instance) return handler_dict
MIT License
vlsida/openram
compiler/verify/run_script.py
run_script
python
def run_script(cell_name, script="lvs"): echo_cmd_output = OPTS.verbose_level > 1 cwd = os.getcwd() os.chdir(OPTS.openram_temp) errfile = "{0}{1}.{2}.err".format(OPTS.openram_temp, cell_name, script) outfile = "{0}{1}.{2}.out".format(OPTS.openram_temp, cell_name, script) resultsfile = "{0}{1}.{2}.report".format(OPTS.openram_temp, cell_name, script) scriptpath = '{0}run_{1}.sh'.format(OPTS.openram_temp, script) debug.info(2, "Starting {}".format(scriptpath)) start = time.time() with open(outfile, 'wb') as fo, open(errfile, 'wb') as fe: p = subprocess.Popen( [scriptpath], stdout=fo, stderr=fe, cwd=OPTS.openram_temp) if echo_cmd_output: tailo = subprocess.Popen([ 'tail', '-f', '--pid', str(p.pid), outfile, ]) taile = subprocess.Popen([ 'tail', '-f', '--pid', str(p.pid), errfile, ]) lastoutput = start while p.poll() == None: runningfor = time.time() - start outputdelta = time.time() - lastoutput if outputdelta > 30: lastoutput = time.time() debug.info(1, "Still running {} ({:.0f} seconds)".format(scriptpath, runningfor)) time.sleep(1) assert p.poll() != None, (p.poll(), p) p.wait() if echo_cmd_output: if tailo.poll() != None: tailo.kill() tailo.wait() if taile.poll() != None: taile.kill() taile.wait() debug.info(2, "Finished {} with {}".format(scriptpath, p.returncode)) os.chdir(cwd) return (outfile, errfile, resultsfile)
Run script and create output files.
https://github.com/vlsida/openram/blob/f66aac3264598eeae31225c62b6a4af52412d407/compiler/verify/run_script.py#L19-L76
import os import debug import subprocess import time from globals import OPTS
BSD 3-Clause New or Revised License
alteryx/evalml
evalml/pipelines/component_graph.py
ComponentGraph.instantiate
python
def instantiate(self, parameters=None): if self._is_instantiated: raise ValueError( f"Cannot reinstantiate a component graph that was previously instantiated" ) parameters = parameters or {} param_set = set(s for s in parameters.keys() if s not in ["pipeline"]) diff = param_set.difference(set(self.component_instances.keys())) if len(diff): warnings.warn(ParameterNotUsedWarning(diff)) self._is_instantiated = True component_instances = {} for component_name, component_class in self.component_instances.items(): component_parameters = parameters.get(component_name, {}) if inspect.isclass(component_class): try: new_component = component_class( **component_parameters, random_seed=self.random_seed ) except (ValueError, TypeError) as e: self._is_instantiated = False err = "Error received when instantiating component {} with the following arguments {}".format( component_name, component_parameters ) raise ValueError(err) from e component_instances[component_name] = new_component elif isinstance(component_class, ComponentBase): component_instances[component_name] = component_class self.component_instances = component_instances return self
Instantiates all uninstantiated components within the graph using the given parameters. An error will be raised if a component is already instantiated but the parameters dict contains arguments for that component. Args: parameters (dict): Dictionary with component names as keys and dictionary of that component's parameters as values. An empty dictionary {} or None implies using all default values for component parameters. If a component in the component graph is already instantiated, it will not use any of its parameters defined in this dictionary. Defaults to None. Returns: self Raises: ValueError: If component graph is already instantiated or if a component errored while instantiating.
https://github.com/alteryx/evalml/blob/12ea29f4cb62948566804f624d37442c2e5aeeea/evalml/pipelines/component_graph.py#L135-L177
import inspect import warnings import networkx as nx import pandas as pd import woodwork as ww from networkx.algorithms.dag import topological_sort from networkx.exception import NetworkXUnfeasible from evalml.exceptions.exceptions import ( MethodPropertyNotFoundError, MissingComponentError, ParameterNotUsedWarning, ) from evalml.pipelines.components import ComponentBase, Estimator, Transformer from evalml.pipelines.components.utils import handle_component_class from evalml.utils import get_logger, import_or_raise, infer_feature_types logger = get_logger(__file__) class ComponentGraph: def __init__(self, component_dict=None, random_seed=0): self.random_seed = random_seed self.component_dict = component_dict or {} if not isinstance(self.component_dict, dict): raise ValueError( "component_dict must be a dictionary which specifies the components and edges between components" ) self._validate_component_dict() self.component_instances = {} self._is_instantiated = False for component_name, component_info in self.component_dict.items(): component_class = handle_component_class(component_info[0]) self.component_instances[component_name] = component_class self._validate_component_dict_edges() self.input_feature_names = {} self._feature_provenance = {} self._i = 0 self._compute_order = self.generate_order(self.component_dict) def _validate_component_dict(self): for _, component_inputs in self.component_dict.items(): if not isinstance(component_inputs, list): raise ValueError( "All component information should be passed in as a list" ) def _validate_component_dict_edges(self): for _, component_inputs in self.component_dict.items(): component_inputs = component_inputs[1:] has_feature_input = any( component_input.endswith(".x") or component_input == "X" for component_input in component_inputs ) num_target_inputs = sum( component_input.endswith(".y") or component_input == "y" for component_input in component_inputs ) if not has_feature_input: raise ValueError( "All components must have at least one input feature (.x/X) edge." ) if num_target_inputs != 1: raise ValueError( "All components must have exactly one target (.y/y) edge." ) def check_all_inputs_have_correct_syntax(edge): return not ( edge.endswith(".y") or edge == "y" or edge.endswith(".x") or edge == "X" ) if ( len( list(filter(check_all_inputs_have_correct_syntax, component_inputs)) ) != 0 ): raise ValueError( "All edges must be specified as either an input feature ('X'/.x) or input target ('y'/.y)." ) target_inputs = [ component for component in component_inputs if (component.endswith(".y")) ] if target_inputs: target_component_name = target_inputs[0][:-2] target_component_class = self.get_component(target_component_name) if not target_component_class.modifies_target: raise ValueError( f"{target_inputs[0]} is not a valid input edge because {target_component_name} does not return a target." ) @property def compute_order(self): return self._compute_order @property def default_parameters(self): defaults = {} for component in self.component_instances.values(): if component.default_parameters: defaults[component.name] = component.default_parameters return defaults
BSD 3-Clause New or Revised License
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
FakeFile.st_mtime
python
def st_mtime(self) -> float: return self.stat_result.st_mtime
Return the modification time of the fake file.
https://github.com/jmcgeheeiv/pyfakefs/blob/589bae0c58298d92fea0e463ab5104166cd6e63c/pyfakefs/fake_filesystem.py#L369-L371
import errno import heapq import io import locale import os import random import sys import traceback import uuid from collections import namedtuple from doctest import TestResults from enum import Enum from stat import ( S_IFREG, S_IFDIR, S_ISLNK, S_IFMT, S_ISDIR, S_IFLNK, S_ISREG, S_IFSOCK ) from types import ModuleType, TracebackType from typing import ( List, Optional, Callable, Union, Any, Dict, Tuple, cast, AnyStr, overload, NoReturn, ClassVar, IO, Iterator, TextIO, Type ) from pyfakefs.deprecator import Deprecator from pyfakefs.extra_packages import use_scandir from pyfakefs.fake_scandir import scandir, walk, ScanDirIter from pyfakefs.helpers import ( FakeStatResult, BinaryBufferIO, TextBufferIO, is_int_type, is_byte_string, is_unicode_string, make_string_path, IS_PYPY, to_string, matching_string, real_encoding, now, AnyPath, to_bytes ) from pyfakefs import __version__ PERM_READ = 0o400 PERM_WRITE = 0o200 PERM_EXE = 0o100 PERM_DEF = 0o777 PERM_DEF_FILE = 0o666 PERM_ALL = 0o7777 _OpenModes = namedtuple( '_OpenModes', 'must_exist can_read can_write truncate append must_not_exist' ) _OPEN_MODE_MAP = { 'r': (True, True, False, False, False, False), 'w': (False, False, True, True, False, False), 'a': (False, False, True, False, True, False), 'r+': (True, True, True, False, False, False), 'w+': (False, True, True, True, False, False), 'a+': (False, True, True, False, True, False), 'x': (False, False, True, False, False, True), 'x+': (False, True, True, False, False, True) } AnyFileWrapper = Union[ "FakeFileWrapper", "FakeDirWrapper", "StandardStreamWrapper", "FakePipeWrapper" ] AnyString = Union[str, bytes] AnyFile = Union["FakeFile", "FakeDirectory"] if sys.platform.startswith('linux'): _MAX_LINK_DEPTH = 40 else: _MAX_LINK_DEPTH = 32 NR_STD_STREAMS = 3 if sys.platform == 'win32': USER_ID = 1 GROUP_ID = 1 else: USER_ID = os.getuid() GROUP_ID = os.getgid() class OSType(Enum): LINUX = "linux" MACOS = "macos" WINDOWS = "windows" class PatchMode(Enum): OFF = 1 AUTO = 2 ON = 3 def set_uid(uid: int) -> None: global USER_ID USER_ID = uid def set_gid(gid: int) -> None: global GROUP_ID GROUP_ID = gid def reset_ids() -> None: if sys.platform == 'win32': set_uid(1) set_gid(1) else: set_uid(os.getuid()) set_gid(os.getgid()) def is_root() -> bool: return USER_ID == 0 class FakeLargeFileIoException(Exception): def __init__(self, file_path: str) -> None: super(FakeLargeFileIoException, self).__init__( 'Read and write operations not supported for ' 'fake large file: %s' % file_path) def _copy_module(old: ModuleType) -> ModuleType: saved = sys.modules.pop(old.__name__, None) new = __import__(old.__name__) if saved is not None: sys.modules[old.__name__] = saved return new class FakeFile: stat_types = ( 'st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid', 'st_size', 'st_atime', 'st_mtime', 'st_ctime', 'st_atime_ns', 'st_mtime_ns', 'st_ctime_ns' ) def __init__(self, name: AnyStr, st_mode: int = S_IFREG | PERM_DEF_FILE, contents: Optional[AnyStr] = None, filesystem: Optional["FakeFilesystem"] = None, encoding: Optional[str] = None, errors: Optional[str] = None, side_effect: Optional[Callable[["FakeFile"], None]] = None): if filesystem is None: raise ValueError('filesystem shall not be None') self.filesystem: FakeFilesystem = filesystem self._side_effect: Optional[Callable] = side_effect self.name: AnyStr = name self.stat_result = FakeStatResult( filesystem.is_windows_fs, USER_ID, GROUP_ID, now()) if st_mode >> 12 == 0: st_mode |= S_IFREG self.stat_result.st_mode = st_mode self.st_size: int = 0 self.encoding: Optional[str] = real_encoding(encoding) self.errors: str = errors or 'strict' self._byte_contents: Optional[bytes] = self._encode_contents(contents) self.stat_result.st_size = ( len(self._byte_contents) if self._byte_contents is not None else 0) self.epoch: int = 0 self.parent_dir: Optional[FakeDirectory] = None self.xattr: Dict = {} self.opened_as: AnyString = '' @property def byte_contents(self) -> Optional[bytes]: return self._byte_contents @property def contents(self) -> Optional[str]: if isinstance(self.byte_contents, bytes): return self.byte_contents.decode( self.encoding or locale.getpreferredencoding(False), errors=self.errors) return None @property def st_ctime(self) -> float: return self.stat_result.st_ctime @st_ctime.setter def st_ctime(self, val: float) -> None: self.stat_result.st_ctime = val @property def st_atime(self) -> float: return self.stat_result.st_atime @st_atime.setter def st_atime(self, val: float) -> None: self.stat_result.st_atime = val @property
Apache License 2.0
koalixswitzerland/koalixcrm
koalixcrm/plugin.py
PluginProcessor.resolve_name
python
def resolve_name(name, package, level): if not hasattr(package, 'rindex'): raise ValueError("'package' not set to a string") dot = len(package) for x in xrange(level, 1, -1): try: dot = package.rindex('.', 0, dot) except ValueError: raise ValueError("attempted relative import beyond top-level " "package") return "%s.%s" % (package[:dot], name)
Return the absolute name of the module to be imported.
https://github.com/koalixswitzerland/koalixcrm/blob/87d125379845d6ab990c19500d63cbed4051040a/koalixcrm/plugin.py#L26-L37
import sys from django.conf import settings class PluginProcessor(object): @staticmethod def converttorelativestring(pluginmodule, nameofinline): output = [] if len(nameofinline) != 0: output.append(pluginmodule.__name__ + "." + nameofinline[0]) return output else: return [] @staticmethod def getAllPlugins(): allpluginmodules = [] for plugin in settings.KOALIXCRM_PLUGINS: temp = __import__(plugin + ".admin") allpluginmodules.append(sys.modules[plugin + ".admin"]); return allpluginmodules @staticmethod
BSD 3-Clause New or Revised License
twitivity/twitivity
twitivity.py
Activity.webhooks
python
def webhooks(self) -> json: try: return self.api(method="GET", endpoint=f"all/webhooks.json").json() except Exception as e: raise e
Returns all environments, webhook URLs and their statuses for the authenticating app. Only one webhook URL can be registered to each environment.
https://github.com/twitivity/twitivity/blob/ca6d29413423409bc8e9c8841a21c6c3f86f60ea/twitivity.py#L98-L105
import json import hmac import os import hashlib import base64 import re import requests from typing import NoReturn from abc import ABC, abstractmethod from tweepy.error import TweepError from tweepy import OAuthHandler from flask import Flask, request class Activity: _protocol: str = "https:/" _host: str = "api.twitter.com" _version: str = "1.1" _product: str = "account_activity" _auth: OAuthHandler = OAuthHandler( os.environ["consumer_key"], os.environ["consumer_secret"] ) _auth.set_access_token( os.environ["access_token"], os.environ["access_token_secret"] ) def api(self, method: str, endpoint: str, data: dict = None) -> json: try: with requests.Session() as r: response = r.request( url="/".join( [ self._protocol, self._host, self._version, self._product, endpoint, ] ), method=method, auth=self._auth.apply_auth(), data=data, ) return response except TweepError: raise def register_webhook(self, callback_url: str) -> json: try: return self.api( method="POST", endpoint=f"all/{os.environ['env_name']}/webhooks.json", data={"url": callback_url}, ).json() except Exception as e: raise e def refresh(self, webhook_id: str) -> NoReturn: try: return self.api( method="PUT", endpoint=f"all/{os.environ['env_name']}/webhooks/{webhook_id}.json", ) except Exception as e: raise e def delete (self, webhook_id: str) -> NoReturn: try: return self.api( method="DELETE", endpoint=f"all/{os.environ['env_name']}/webhooks/{webhook_id}.json", ) except Exception as e: raise e def subscribe(self) -> NoReturn: try: return self.api( method="POST", endpoint=f"all/{os.environ['env_name']}/subscriptions.json", ) except Exception: raise
MIT License
iscre4m/pycarddeck
pyCardDeck/deck.py
Deck.__str__
python
def __str__(self) -> str: if self.name: return self.name else: return 'Deck of cards'
Used for representation of the object for humans called with str(Deck) This method is also called when you are providing arguments to str.format(), you can just provide your Deck instance and it will magically know the name, yay! :return: Name of the deck if it has a name or 'Deck of cards' if it has none :rtype: string
https://github.com/iscre4m/pycarddeck/blob/2171ca53852aeb690019a27f2e774ffb5efaff5c/pyCardDeck/deck.py#L477-L493
import logging import os import yaml import jsonpickle from typing import List, Union, Optional, Iterator from random import shuffle, randint, randrange from .errors import OutOfCards, NotACard, NoCards, CardNotFound, UnknownFormat from .cards import CardType log = logging.getLogger(__name__) class Deck: def __init__(self, cards: Optional[List[CardType]] = None, reshuffle: object = True, name: str = None, discard: Optional['Deck'] = None): self.name = name if cards is None: self._cards = [] else: self._cards = cards if discard is None: self._discard_pile = [] else: self._discard_pile = discard self._reshuffle = reshuffle self.set_file_location("exported_deck") def _get_card(self, position: str = "top") -> CardType: if len(self._cards): positions = { "top": 0, "bottom": len(self._cards) - 1, "random": randrange(len(self._cards)) } card = self._cards.pop(positions[position]) self.reshuffle_if_empty() log.debug('Card drawn from %s: %s', (position, card)) return card elif not self._reshuffle: log.debug('You tried to draw. No more cards to be drawn. Position: %s', position) raise OutOfCards('You tried to draw. No more cards to be drawn. Position: %s', position) else: log.debug('You tried to draw from an empty deck. Position: %s', position) raise NoCards('You tried to draw from an empty deck. Position: %s', position) def draw(self) -> CardType: return self._get_card("top") def draw_bottom(self) -> CardType: return self._get_card("bottom") def draw_random(self) -> CardType: return self._get_card("random") def draw_specific(self, specific_card: CardType) -> CardType: log.debug('Attempting to find card: %s', specific_card) if len(self._cards): for available_card in self._cards: if _card_compare(specific_card, available_card): card = available_card break else: log.debug('Specific card not found in the deck') raise CardNotFound('Specific card not found in the deck') self._cards.remove(card) self.reshuffle_if_empty() log.debug('Specific card drawn: %s', card) return card else: log.debug('You tried to draw a specific card from an empty deck') raise NoCards('You tried to draw a specific card from an empty deck') def card_exists(self, card: CardType) -> bool: found = False for available_card in self._cards: if _card_compare(card, available_card): found = True break log.debug('Card %s exists in the deck: %s', card, found) return found def shuffle(self) -> None: if len(self._cards): shuffle(self._cards) log.debug('Deck shuffled') else: log.warning('You tried to shuffle an empty deck') raise NoCards('You tried to shuffle an empty deck') def reshuffle_if_empty(self) -> None: if not len(self._cards) and self._reshuffle: self.shuffle_back() def shuffle_back(self) -> None: for card in self._discard_pile: self._cards.append(card) self.shuffle() if isinstance(self._discard_pile, Deck): self._discard_pile.clear() else: self._discard_pile = [] log.debug('Cards have been shuffled back from the discard pile') def discard(self, card: CardType) -> None: log.debug("Card being discarded: %s", card) if card or type(card) == int: if isinstance(self._discard_pile, Deck): self._discard_pile.add_single(card, 0) else: self._discard_pile.append(card) log.debug('Card %s discarded', card) else: log.warning('You tried to insert %s (rank(%s) into a discard pile', card, type(card).__name__) raise NotACard('You tried to insert {} (rank({}) into a discard pile' .format(card, type(card).__name__)) def clear(self) -> None: self._cards = [] def add_single(self, card: CardType, position: int = False) -> None: if position is not False: self._cards.insert(position, card) log.debug("Card %s inserted to position %i", card, position) log.debug(self._cards) else: self._cards.insert(randint(0, len(self._cards)), card) log.debug('Card %s shuffled into the deck', card) def add_many(self, cards: List[CardType]) -> None: for card in cards: self.add_single(card) log.debug('New cards shuffled into the deck') def show_top(self, number: int) -> List[CardType]: return self._cards[0:number] def set_file_location(self, location) -> None: self._save_location = os.path.abspath(os.path.expanduser(location)) def export(self, fmt: str, to_file: bool = False, location: str = None) -> str: format_stripped = fmt.lower().strip() if location: self.set_file_location(location) else: self.set_file_location("exported_deck") temp_location = self._save_location self._save_location = None exported = _get_exported_string(format_stripped, self) self._save_location = temp_location if to_file: with open(self._save_location, 'w') as target_file: target_file.writelines(exported) log.debug("File exported to: %s", self._save_location) return exported def load(self, to_load: str, is_file: bool = False) -> None: if is_file: self.set_file_location(to_load) with open(self._save_location, 'r') as file: loadable = file.read() else: loadable = to_load try: result = jsonpickle.decode(loadable) log.debug("loading JSON") except Exception: result = yaml.load(loadable) log.debug("loading YAML") try: del result.__dict__["_save_location"] self.__dict__.update(result.__dict__) except AttributeError: raise UnknownFormat def load_standard_deck(self) -> None: location = os.path.join(os.path.dirname(__file__), "standard_deck.yml") data = yaml.load(open(location)).__dict__ del data["_save_location"] self.__dict__.update(data) @property def cards_left(self) -> int: if len(self._cards): return len(self._cards) else: return 0 @property def discarded(self) -> int: return len(self._discard_pile) @property def json(self) -> str: return self.export("json", to_file=False) @property def yaml(self) -> str: return self.export("yaml") @property def empty(self) -> bool: if len(self._cards): return False else: return True @property def file_location(self) -> str: return self._save_location def __repr__(self) -> str: return 'Deck(cards={0}, discarded={3}, reshuffle={1}, name={2})' .format(self.cards_left, self._reshuffle, self.name, self.discarded)
MIT License
beanbaginc/django-evolution
django_evolution/mutations.py
BaseModelMutation.mutate
python
def mutate(self, mutator, model): raise NotImplementedError
Schedule a model mutation on the mutator. This will instruct the mutator to perform one or more database mutations for a model. Those will be scheduled and later executed on the database, if not optimized out. Args: mutator (django_evolution.mutators.ModelMutator): The mutator to perform an operation on. model (MockModel): The model being mutated. Raises: django_evolution.errors.EvolutionNotImplementedError: The configured mutation is not supported on this type of database.
https://github.com/beanbaginc/django-evolution/blob/fb76e44a2361a69a440dca086c0cc67ac6a4300d/django_evolution/mutations.py#L522-L541
from __future__ import unicode_literals import inspect from functools import partial from django.db import models from django.db.utils import DEFAULT_DB_ALIAS from django_evolution.compat import six from django_evolution.compat.datastructures import OrderedDict from django_evolution.consts import UpgradeMethod from django_evolution.db import EvolutionOperationsMulti from django_evolution.db.sql_result import SQLResult from django_evolution.db.state import DatabaseState from django_evolution.errors import (CannotSimulate, SimulationFailure, EvolutionNotImplementedError) from django_evolution.mock_models import MockModel, MockRelated, create_field from django_evolution.signature import (AppSignature, ConstraintSignature, FieldSignature, IndexSignature, ProjectSignature) from django_evolution.utils.models import get_database_for_model_name class Simulation(object): def __init__(self, mutation, app_label, project_sig, database_state, legacy_app_label=None, database=DEFAULT_DB_ALIAS): assert isinstance(project_sig, ProjectSignature), 'project_sig must be a ProjectSignature instance' assert (database_state is None or isinstance(database_state, DatabaseState)), 'database_state must be None or a DatabaseState instance' self.mutation = mutation self.app_label = app_label self.legacy_app_label = legacy_app_label or app_label self.project_sig = project_sig self.database_state = database_state self.database = database def get_evolver(self): return EvolutionOperationsMulti(self.database, self.database_state).get_evolver() def get_app_sig(self): app_sig = self.project_sig.get_app_sig(self.app_label) if (app_sig is None and self.legacy_app_label is not None and self.legacy_app_label != self.app_label): app_sig = self.project_sig.get_app_sig(self.legacy_app_label) if app_sig: return app_sig self.fail('The application could not be found in the signature.') def get_model_sig(self, model_name): model_sig = self.get_app_sig().get_model_sig(model_name) if model_sig: return model_sig self.fail('The model could not be found in the signature.', model_name=model_name) def get_field_sig(self, model_name, field_name): field_sig = self.get_model_sig(model_name).get_field_sig(field_name) if field_sig: return field_sig self.fail('The field could not be found in the signature.', model_name=model_name, field_name=field_name) def fail(self, error, **error_vars): msg = '%s %s' % (self.mutation.simulation_failure_error, error) error_dict = { 'app_label': self.app_label, } error_dict.update( (key, getattr(self.mutation, value)) for key, value in six.iteritems(self.mutation.error_vars) ) error_dict.update(error_vars) raise SimulationFailure(msg % error_dict) class BaseMutation(object): simulation_failure_error = 'Cannot simulate the mutation.' error_vars = {} def generate_hint(self): return '%s(%s)' % (self.__class__.__name__, ', '.join(self.get_hint_params())) def get_hint_params(self): return [] def generate_dependencies(self, app_label, **kwargs): return {} def run_simulation(self, **kwargs): self.simulate(Simulation(self, **kwargs)) def simulate(self, simulation): raise NotImplementedError def mutate(self, mutator): raise NotImplementedError def is_mutable(self, app_label, project_sig, database_state, database): return False def serialize_value(self, value): if isinstance(value, six.string_types): value = repr(six.text_type(value)) if value.startswith('u'): value = value[1:] elif isinstance(value, list): value = '[%s]' % ', '.join( self.serialize_value(item) for item in value ) elif isinstance(value, tuple): if len(value) == 1: suffix = ',' else: suffix = '' value = '(%s%s)' % ( ', '.join( self.serialize_value(item) for item in value ), suffix, ) elif isinstance(value, dict): value = '{%s}' % ', '.join( '%s: %s' % (self.serialize_value(dict_key), self.serialize_value(dict_value)) for dict_key, dict_value in six.iteritems(value) ) elif inspect.isclass(value): if value.__module__.startswith('django.db.models'): prefix = 'models.' else: prefix = '' return prefix + value.__name__ elif hasattr(value, 'deconstruct'): path, args, kwargs = value.deconstruct() if path.startswith('django.db.models'): path = 'models.%s' % path.rsplit('.', 1)[-1] parts = ['%s(' % path] if args: parts.append(', '.join( self.serialize_value(arg) for arg in args )) if kwargs: parts.append(', '.join( self.serialize_attr(key, value) for key, value in six.iteritems(kwargs) )) parts.append(')') value = ''.join(parts) else: value = repr(value) return value def serialize_attr(self, attr_name, attr_value): return '%s=%s' % (attr_name, self.serialize_value(attr_value)) def __hash__(self): return id(self) def __eq__(self, other): return (type(self) is type(other) and self.generate_hint() == other.generate_hint()) def __str__(self): return self.generate_hint() def __repr__(self): return '<%s>' % self class BaseModelMutation(BaseMutation): error_vars = dict({ 'model_name': 'model_name', }, **BaseMutation.error_vars) def __init__(self, model_name): super(BaseModelMutation, self).__init__() self.model_name = model_name def evolver(self, model, database_state, database=None): if database is None: database = get_database_for_model_name(model.app_label, model.model_name) return EvolutionOperationsMulti(database, database_state).get_evolver()
BSD 3-Clause New or Revised License
tanwanirahul/django-batch-requests
batch_requests/settings.py
import_class
python
def import_class(class_path): module_name, class_name = class_path.rsplit(".", 1) module = import_module(module_name) claz = getattr(module, class_name) return claz
Imports the class for the given class name.
https://github.com/tanwanirahul/django-batch-requests/blob/9c5afc42f7542f466247f4ffed9c44e1c49fa20d/batch_requests/settings.py#L27-L34
from django.conf import settings from django.utils.importlib import import_module import multiprocessing DEFAULTS = { "HEADERS_TO_INCLUDE": ["HTTP_USER_AGENT", "HTTP_COOKIE"], "DEFAULT_CONTENT_TYPE": "application/json", "USE_HTTPS": False, "EXECUTE_PARALLEL": False, "CONCURRENT_EXECUTOR": "batch_requests.concurrent.executor.ThreadBasedExecutor", "NUM_WORKERS": multiprocessing.cpu_count() * 4, "ADD_DURATION_HEADER": True, "DURATION_HEADER_NAME": "batch_requests.duration", "MAX_LIMIT": 20 } USER_DEFINED_SETTINGS = getattr(settings, 'BATCH_REQUESTS', {})
MIT License
decred/tinydecred
decred/decred/util/helpers.py
mktime
python
def mktime(year, month=None, day=None): if month: if day: return calendar.timegm( time.strptime( "%i-%s-%s" % (year, str(month).zfill(2), str(day).zfill(2)), "%Y-%m-%d", ) ) return calendar.timegm( time.strptime("%i-%s" % (year, str(month).zfill(2)), "%Y-%m") ) return calendar.timegm(time.strptime(str(year), "%Y"))
Make a timestamp from year, month, day. Args: year (int): the year. month (int), optional: the month. day (int), optional: the day.
https://github.com/decred/tinydecred/blob/f7f7d9f7da8d49d9ae9a72e5579b07a3b8572267/decred/decred/util/helpers.py#L50-L70
import calendar import configparser import logging from logging.handlers import RotatingFileHandler import os import platform import sys import time import traceback from urllib.parse import urlsplit, urlunsplit from appdirs import AppDirs def formatTraceback(err): return "".join(traceback.format_exception(None, err, err.__traceback__)) def mkdir(path): if os.path.isdir(path): return True if os.path.isfile(path): return False os.makedirs(path) return True
ISC License
googleapis/python-speech
google/cloud/speech_v1/services/speech/transports/grpc_asyncio.py
SpeechGrpcAsyncIOTransport.long_running_recognize
python
def long_running_recognize( self, ) -> Callable[ [cloud_speech.LongRunningRecognizeRequest], Awaitable[operations_pb2.Operation] ]: if "long_running_recognize" not in self._stubs: self._stubs["long_running_recognize"] = self.grpc_channel.unary_unary( "/google.cloud.speech.v1.Speech/LongRunningRecognize", request_serializer=cloud_speech.LongRunningRecognizeRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["long_running_recognize"]
r"""Return a callable for the long running recognize method over gRPC. Performs asynchronous speech recognition: receive results via the google.longrunning.Operations interface. Returns either an ``Operation.error`` or an ``Operation.response`` which contains a ``LongRunningRecognizeResponse`` message. For more information on asynchronous speech recognition, see the `how-to <https://cloud.google.com/speech-to-text/docs/async-recognize>`__. Returns: Callable[[~.LongRunningRecognizeRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server.
https://github.com/googleapis/python-speech/blob/cc97a580bb4e693a1c3e5170064164e0c5d8482b/google/cloud/speech_v1/services/speech/transports/grpc_asyncio.py#L280-L310
import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async from google.api_core import operations_v1 from google.auth import credentials as ga_credentials from google.auth.transport.grpc import SslCredentials import packaging.version import grpc from grpc.experimental import aio from google.cloud.speech_v1.types import cloud_speech from google.longrunning import operations_pb2 from .base import SpeechTransport, DEFAULT_CLIENT_INFO from .grpc import SpeechGrpcTransport class SpeechGrpcAsyncIOTransport(SpeechTransport): _grpc_channel: aio.Channel _stubs: Dict[str, Callable] = {} @classmethod def create_channel( cls, host: str = "speech.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> aio.Channel: return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) def __init__( self, *, host: str = "speech.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: credentials = False self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, credentials=self._credentials, credentials_file=credentials_file, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: return self._grpc_channel @property def operations_client(self) -> operations_v1.OperationsAsyncClient: if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel ) return self._operations_client @property def recognize( self, ) -> Callable[ [cloud_speech.RecognizeRequest], Awaitable[cloud_speech.RecognizeResponse] ]: if "recognize" not in self._stubs: self._stubs["recognize"] = self.grpc_channel.unary_unary( "/google.cloud.speech.v1.Speech/Recognize", request_serializer=cloud_speech.RecognizeRequest.serialize, response_deserializer=cloud_speech.RecognizeResponse.deserialize, ) return self._stubs["recognize"] @property
Apache License 2.0