repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
mwojnars/hypertag
src/hypertag/core/dom.py
DOM.copy
python
def copy(self): dup = copy(self) dup.nodes = [node.copy() for node in self.nodes] return dup
Mostly deep copy of self, with an exception for nodes' attributes, which are shallow-copied, and their `tag` links (no copy).
https://github.com/mwojnars/hypertag/blob/31b6a08cfaef57414ea553ee76b152e3fdc94d0f/src/hypertag/core/dom.py#L143-L149
import re, itertools from copy import copy from types import GeneratorType from hypertag.nifty.util import Object from hypertag.core.errors import VoidTagEx, TypeErrorEx def add_indent(text, indent, re_start = re.compile(r'\n(?=.)')): if not indent: return text return re_start.sub('\n' + indent, text) def del_indent(text, indent = None): if indent is None: indent = get_indent(text) if text.startswith(indent): text = text[len(indent):] return text.replace('\n' + indent, '\n') def get_indent(text): lines = text.split('\n') lines = list(filter(None, [l.rstrip() for l in lines])) if not lines: return '' for i, column in enumerate(zip(*lines)): if not column[0].isspace() or min(column) != max(column): return lines[0][:i] else: size = min(map(len, lines)) return lines[0][:size] class DOM: nodes = None def __init__(self, *nodes, **params): _strict = params.pop('_strict', True) if params: raise TypeErrorEx('unrecognized keyword argument "%s" in DOM.__init__()' % list(params.keys())[0]) self.nodes = self._flatten(nodes) if _strict else list(nodes) def __bool__(self): return bool(self.nodes) def __len__(self): return len(self.nodes) def __iter__(self): return iter(self.nodes) def __getitem__(self, pos): if isinstance(pos, int): return self.nodes[pos] if isinstance(pos, (int, slice)): return DOM(self.nodes[pos], _strict = False) return self.select(pos) @staticmethod def node(*args, **kwargs): return DOM(DOM.Node(*args, **kwargs)) @staticmethod def text(*args, **kwargs): return DOM(DOM.Text(*args, **kwargs)) @staticmethod def _flatten(nodes): result = [] for n in nodes: if n is None: continue if isinstance(n, (list, DOM, GeneratorType)): result += DOM._flatten(n) elif isinstance(n, DOM.Node): result.append(n) else: raise TypeErrorEx("found %s in a DOM, expected DOM.Node" % type(n)) return result def set_indent(self, indent): for n in self.nodes: n.set_indent(indent) def render(self): return ''.join(node.render() for node in self.nodes) def tree(self, indent = '', step = ' '): return ''.join(node.tree(indent, step) for node in self.nodes)
MIT License
pcah/python-clean-architecture
pca/utils/collections.py
OrderedSet.discard
python
def discard(self, key): if key in self: i = self.items.index(key) del self.items[i] del self.map[key] for k, v in self.map.items(): if v >= i: self.map[k] = v - 1
Remove an element. Do not raise an exception if absent. The MutableSet mixin uses this to implement the .remove() method, which *does* raise an error when asked to remove a non-existent item.
https://github.com/pcah/python-clean-architecture/blob/20630d0b3b4c00f6503a26cc98c45df12bc31b3b/pca/utils/collections.py#L182-L194
import typing as t from collections.abc import MutableSet from functools import singledispatch SliceAll = slice(None) generator_type = type(e for e in ()) none_type = type(None) def sget(target, key: str, default: t.Any = None): key_iter = key.split(".") if hasattr(key, "split") else [key] value = target for part in key_iter: try: value = getattr(value, part) except (TypeError, AttributeError): try: value = value[part] except (TypeError, KeyError): try: value = value[int(part)] except (TypeError, ValueError, IndexError, KeyError): return default return value def is_iterable(obj): return hasattr(obj, "__iter__") and not isinstance(obj, str) def is_iterable_and_not_tuple(obj): return is_iterable(obj) and not isinstance(obj, tuple) class OrderedSet(MutableSet): def __init__(self, iterable=None): self.items = [] self.map = {} if iterable is not None: self |= iterable def __len__(self): return len(self.items) def __getitem__(self, index): if index == SliceAll: return self elif hasattr(index, "__index__") or isinstance(index, slice): result = self.items[index] if isinstance(result, list): return OrderedSet(result) else: return result elif is_iterable_and_not_tuple(index): return OrderedSet([self.items[i] for i in index]) else: raise TypeError("Don't know how to index an OrderedSet by %r" % index) def copy(self): return OrderedSet(self) def __getstate__(self): if len(self) == 0: return (None,) else: return list(self) def __setstate__(self, state): if state == (None,): self.__init__([]) else: self.__init__(state) def __contains__(self, key): return key in self.map def add(self, key): if key not in self.map: self.map[key] = len(self.items) self.items.append(key) return self.map[key] append = add def update(self, sequence): item_index = None try: for item in sequence: item_index = self.add(item) except TypeError: raise ValueError("Argument needs to be an iterable, got %s" % type(sequence)) return item_index def index(self, key): if is_iterable_and_not_tuple(key): return [self.index(subkey) for subkey in key] return self.map[key] def pop(self): if not self.items: raise KeyError("Set is empty") elem = self.items[-1] del self.items[-1] del self.map[elem] return elem
MIT License
drexly/openhgsenti
lib/django/contrib/gis/db/models/query.py
GeoQuerySet.perimeter
python
def perimeter(self, **kwargs): return self._distance_attribute('perimeter', None, **kwargs)
Returns the perimeter of the geometry field as a `Distance` object stored in a `perimeter` attribute on each element of this GeoQuerySet.
https://github.com/drexly/openhgsenti/blob/d7806f58c81127d32091d9875a99ac13aef94a8a/lib/django/contrib/gis/db/models/query.py#L267-L272
import warnings from django.contrib.gis.db.models import aggregates from django.contrib.gis.db.models.fields import ( GeometryField, LineStringField, PointField, get_srid_info, ) from django.contrib.gis.db.models.lookups import GISLookup from django.contrib.gis.db.models.sql import ( AreaField, DistanceField, GeomField, GMLField, ) from django.contrib.gis.geometry.backend import Geometry from django.contrib.gis.measure import Area, Distance from django.db import connections from django.db.models.expressions import RawSQL from django.db.models.fields import Field from django.db.models.query import QuerySet from django.utils import six from django.utils.deprecation import ( RemovedInDjango20Warning, RemovedInDjango110Warning, ) class GeoQuerySet(QuerySet): def area(self, tolerance=0.05, **kwargs): procedure_args, geo_field = self._spatial_setup( 'area', field_name=kwargs.get('field_name')) s = {'procedure_args': procedure_args, 'geo_field': geo_field, 'setup': False, } connection = connections[self.db] backend = connection.ops if backend.oracle: s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s' s['procedure_args']['tolerance'] = tolerance s['select_field'] = AreaField('sq_m') elif backend.postgis or backend.spatialite: if backend.geography: s['select_field'] = AreaField('sq_m') elif not geo_field.geodetic(connection): s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection))) else: raise Exception('Area on geodetic coordinate systems not supported.') return self._spatial_attribute('area', s, **kwargs) def centroid(self, **kwargs): return self._geom_attribute('centroid', **kwargs) def collect(self, **kwargs): warnings.warn( "The collect GeoQuerySet method is deprecated. Use the Collect() " "aggregate in an aggregate() or annotate() method.", RemovedInDjango110Warning, stacklevel=2 ) return self._spatial_aggregate(aggregates.Collect, **kwargs) def difference(self, geom, **kwargs): return self._geomset_attribute('difference', geom, **kwargs) def distance(self, geom, **kwargs): return self._distance_attribute('distance', geom, **kwargs) def envelope(self, **kwargs): return self._geom_attribute('envelope', **kwargs) def extent(self, **kwargs): warnings.warn( "The extent GeoQuerySet method is deprecated. Use the Extent() " "aggregate in an aggregate() or annotate() method.", RemovedInDjango110Warning, stacklevel=2 ) return self._spatial_aggregate(aggregates.Extent, **kwargs) def extent3d(self, **kwargs): warnings.warn( "The extent3d GeoQuerySet method is deprecated. Use the Extent3D() " "aggregate in an aggregate() or annotate() method.", RemovedInDjango110Warning, stacklevel=2 ) return self._spatial_aggregate(aggregates.Extent3D, **kwargs) def force_rhr(self, **kwargs): return self._geom_attribute('force_rhr', **kwargs) def geojson(self, precision=8, crs=False, bbox=False, **kwargs): backend = connections[self.db].ops if not backend.geojson: raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ ' 'support GeoJSON serialization.') if not isinstance(precision, six.integer_types): raise TypeError('Precision keyword must be set with an integer.') options = 0 if crs and bbox: options = 3 elif bbox: options = 1 elif crs: options = 2 s = {'desc': 'GeoJSON', 'procedure_args': {'precision': precision, 'options': options}, 'procedure_fmt': '%(geo_col)s,%(precision)s,%(options)s', } return self._spatial_attribute('geojson', s, **kwargs) def geohash(self, precision=20, **kwargs): s = {'desc': 'GeoHash', 'procedure_args': {'precision': precision}, 'procedure_fmt': '%(geo_col)s,%(precision)s', } return self._spatial_attribute('geohash', s, **kwargs) def gml(self, precision=8, version=2, **kwargs): backend = connections[self.db].ops s = {'desc': 'GML', 'procedure_args': {'precision': precision}} if backend.postgis: s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s' s['procedure_args'] = {'precision': precision, 'version': version} if backend.oracle: s['select_field'] = GMLField() return self._spatial_attribute('gml', s, **kwargs) def intersection(self, geom, **kwargs): return self._geomset_attribute('intersection', geom, **kwargs) def kml(self, **kwargs): s = {'desc': 'KML', 'procedure_fmt': '%(geo_col)s,%(precision)s', 'procedure_args': {'precision': kwargs.pop('precision', 8)}, } return self._spatial_attribute('kml', s, **kwargs) def length(self, **kwargs): return self._distance_attribute('length', None, **kwargs) def make_line(self, **kwargs): warnings.warn( "The make_line GeoQuerySet method is deprecated. Use the MakeLine() " "aggregate in an aggregate() or annotate() method.", RemovedInDjango110Warning, stacklevel=2 ) return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs) def mem_size(self, **kwargs): return self._spatial_attribute('mem_size', {}, **kwargs) def num_geom(self, **kwargs): return self._spatial_attribute('num_geom', {}, **kwargs) def num_points(self, **kwargs): return self._spatial_attribute('num_points', {}, **kwargs)
Apache License 2.0
jasonrollins/shareplum
shareplum/site.py
_Site2007.list
python
def list(self, list_name, exclude_hidden_fields=False): return _List2007( self._session, list_name, self._url, self._verify_ssl, self.users, self.huge_tree, self.timeout, exclude_hidden_fields=exclude_hidden_fields, site_url=self.site_url, )
Sharepoint Lists Web Service Microsoft Developer Network: The Lists Web service provides methods for working with SharePoint lists, content types, list items, and files.
https://github.com/jasonrollins/shareplum/blob/ca01e1e3aa136ebede67e04ebb31aa0aa51d6f54/shareplum/site.py#L369-L387
from typing import Any from typing import Dict from typing import List from typing import Optional import requests from requests.packages.urllib3.util.retry import Retry from requests_toolbelt import SSLAdapter from lxml import etree from .request_helper import get, post from .list import _List2007, _List365 from .folder import _Folder from .soap import Soap from .version import __version__ from enum import Enum class Version(Enum): v2007 = 1 v2010 = 2 v2013 = 3 v2016 = 4 v2019 = 5 v365 = 6 class _Site2007: def __init__(self, site_url, auth=None, authcookie=None, verify_ssl=True, ssl_version=None, huge_tree=False, timeout=None, retry=None): self.site_url = site_url self._verify_ssl = verify_ssl if retry is None: retry = Retry(total=5, read=5, connect=5, backoff_factor=0.3, status_forcelist=[500, 502, 503, 504]) http_adaptor = requests.adapters.HTTPAdapter(max_retries=retry) https_adaptor = http_adaptor self._session = requests.Session() if ssl_version is not None: https_adaptor = SSLAdapter(ssl_version, max_retries=retry) self._session.mount("https://", https_adaptor) self._session.mount("http://", http_adaptor) self._session.headers.update({"user-agent": "shareplum/%s" % __version__}) if authcookie is not None: self._session.cookies = authcookie else: self._session.auth = auth self.huge_tree = huge_tree self.timeout = timeout self.last_request = None self._services_url = { "Alerts": "/_vti_bin/Alerts.asmx", "Authentication": "/_vti_bin/Authentication.asmx", "Copy": "/_vti_bin/Copy.asmx", "Dws": "/_vti_bin/Dws.asmx", "Forms": "/_vti_bin/Forms.asmx", "Imaging": "/_vti_bin/Imaging.asmx", "DspSts": "/_vti_bin/DspSts.asmx", "Lists": "/_vti_bin/lists.asmx", "Meetings": "/_vti_bin/Meetings.asmx", "People": "/_vti_bin/People.asmx", "Permissions": "/_vti_bin/Permissions.asmx", "SiteData": "/_vti_bin/SiteData.asmx", "Sites": "/_vti_bin/Sites.asmx", "Search": "/_vti_bin/Search.asmx", "UserGroup": "/_vti_bin/usergroup.asmx", "Versions": "/_vti_bin/Versions.asmx", "Views": "/_vti_bin/Views.asmx", "WebPartPages": "/_vti_bin/WebPartPages.asmx", "Webs": "/_vti_bin/Webs.asmx", } self.site_info = self.get_site() self.users = self.get_users() self.version = "2007" def _url(self, service): return "".join([self.site_url, self._services_url[service]]) def _headers(self, soap_action): headers = { "Content-Type": "text/xml; charset=UTF-8", "SOAPAction": "http://schemas.microsoft.com/sharepoint/soap/" + soap_action, } return headers def add_list(self, list_name, description, template_id): template_ids = { "Announcements": "104", "Contacts": "105", "Custom List": "100", "Custom List in Datasheet View": "120", "DataSources": "110", "Discussion Board": "108", "Document Library": "101", "Events": "106", "Form Library": "115", "Issues": "1100", "Links": "103", "Picture Library": "109", "Survey": "102", "Tasks": "107", } if type(template_id) == int: template_id = str(template_id) elif type(template_id) == str: if template_id.isdigit(): pass else: template_id = template_ids[template_id] soap_request = Soap("AddList") soap_request.add_parameter("listName", list_name) soap_request.add_parameter("description", description) soap_request.add_parameter("templateID", template_id) self.last_request = str(soap_request) response = post(self._session, url=self._url("Lists"), headers=self._headers("AddList"), data=str(soap_request).encode("utf-8"), verify=self._verify_ssl, timeout=self.timeout) return response.text def delete_list(self, list_name): soap_request = Soap("DeleteList") soap_request.add_parameter("listName", list_name) self.last_request = str(soap_request) post(self._session, url=self._url("Lists"), headers=self._headers("DeleteList"), data=str(soap_request).encode("utf-8"), verify=self._verify_ssl, timeout=self.timeout) def get_form_collection(self, list_name): soap_request = Soap("GetFormCollection") soap_request.add_parameter("listName", list_name) self.last_request = str(soap_request) response = post(self._session, url=self._url("Forms"), headers=self._headers("GetFormCollection"), data=str(soap_request).encode("utf-8"), verify=self._verify_ssl, timeout=self.timeout) envelope = etree.fromstring(response.text.encode("utf-8"), parser=etree.XMLParser(huge_tree=self.huge_tree, recover=True)) items = envelope[0][0][0][0] data = [] for _item in items: data.append({k: v for (k, v) in _item.items()}) return data def get_site(self): soap_request = Soap("GetSite") soap_request.add_parameter("SiteUrl", self.site_url) self.last_request = str(soap_request) response = post(self._session, url=self._url("Sites"), headers=self._headers("GetSite"), data=str(soap_request).encode("utf-8"), verify=self._verify_ssl, timeout=self.timeout) envelope = etree.fromstring(response.text.encode("utf-8"), parser=etree.XMLParser(huge_tree=self.huge_tree, recover=True)) data = envelope[0][0][0] return data.text def get_list_templates(self): soap_request = Soap("GetListTemplates") soap_request.add_parameter("GetListTemplates") self.last_request = str(soap_request) response = post(self._session, url=self._url("Webs"), headers=self._headers("GetListTemplates"), data=str(soap_request).encode("utf-8"), verify=self._verify_ssl, timeout=self.timeout) envelope = etree.fromstring(response.text.encode("utf-8"), parser=etree.XMLParser(huge_tree=self.huge_tree, recover=True)) lists = envelope[0][0][0][0] data = [] for _list in lists: data.append({k: v for (k, v) in _list.items()}) return data def get_site_templates(self, lcid="1033"): soap_request = Soap("GetSiteTemplates") soap_request.add_parameter("LCID", lcid) self.last_request = str(soap_request) response = post(self._session, url=self._url("Sites"), headers=self._headers("GetSiteTemplates"), data=str(soap_request).encode("utf-8"), verify=self._verify_ssl, timeout=self.timeout) return response envelope = etree.fromstring(response.text.encode("utf-8"), parser=etree.XMLParser(huge_tree=self.huge_tree, recover=True)) lists = envelope[0][0][1] data = [] for _list in lists: data.append({k: v for (k, v) in _list.items()}) return data def get_list_collection(self): soap_request = Soap("GetListCollection") self.last_request = str(soap_request) response = post(self._session, url=self._url("SiteData"), headers=self._headers("GetListCollection"), data=str(soap_request).encode("utf-8"), verify=self._verify_ssl, timeout=self.timeout) envelope = etree.fromstring(response.text.encode("utf-8"), parser=etree.XMLParser(huge_tree=self.huge_tree, recover=True)) lists = envelope[0][0][1] data = [] for _list in lists: _list_data = {} for item in _list: key = item.tag.replace("{http://schemas.microsoft.com/sharepoint/soap/}", "") value = item.text _list_data[key] = value data.append(_list_data) return data def get_users(self, rowlimit=0): soap_request = Soap("GetListItems") soap_request.add_parameter("listName", "UserInfo") soap_request.add_parameter("rowLimit", str(rowlimit)) self.last_request = str(soap_request) response = post(self._session, url=self._url("Lists"), headers=self._headers("GetListItems"), data=str(soap_request).encode("utf-8"), verify=self._verify_ssl, timeout=self.timeout) try: envelope = etree.fromstring(response.text.encode("utf-8"), parser=etree.XMLParser(huge_tree=self.huge_tree, recover=True)) except Exception as e: raise requests.ConnectionError("GetUsers GetListItems response failed to parse correctly: " + str(e)) listitems = envelope[0][0][0][0][0] data = [] for row in listitems: data.append({key[4:]: value for (key, value) in row.items() if key[4:]}) return { "py": {i["ImnName"]: i["ID"] + ";#" + i["ImnName"] for i in data}, "sp": {i["ID"] + ";#" + i["ImnName"]: i["ImnName"] for i in data}, }
MIT License
m3dev/pptx-template
.eggs/python_pptx-0.6.6-py3.6.egg/pptx/dml/chtfmt.py
ChartFormat.line
python
def line(self): spPr = self._element.get_or_add_spPr() return LineFormat(spPr)
The |LineFormat| object providing access to the visual properties of this object, such as line color and line style.
https://github.com/m3dev/pptx-template/blob/bccd95728fc27963dabdd53bd3a2ee92233d5176/.eggs/python_pptx-0.6.6-py3.6.egg/pptx/dml/chtfmt.py#L40-L46
from __future__ import ( absolute_import, division, print_function, unicode_literals ) from .fill import FillFormat from .line import LineFormat from ..shared import ElementProxy from ..util import lazyproperty class ChartFormat(ElementProxy): __slots__ = ('_fill', '_line') @lazyproperty def fill(self): spPr = self._element.get_or_add_spPr() return FillFormat.from_fill_parent(spPr) @lazyproperty
Apache License 2.0
softbankrobotics-research/qibullet
qibullet/simulation_manager.py
SimulationManager.resetSimulation
python
def resetSimulation(self, physics_client): self._clearInstance(physics_client) pybullet.resetSimulation(physicsClientId=physics_client)
Resets the simulated instance corresponding to the physics client id. All of the objects loaded in the simulation will be destroyed, but the instance will still be running
https://github.com/softbankrobotics-research/qibullet/blob/486e5943a9886a777eeacdc06e97e323ccd0cc31/qibullet/simulation_manager.py#L96-L103
import time import pybullet import threading import pybullet_data from qibullet.laser import Laser from qibullet.camera import Camera from qibullet.nao_virtual import NaoVirtual from qibullet.romeo_virtual import RomeoVirtual from qibullet.pepper_virtual import PepperVirtual from qibullet.robot_module import RobotModule from qibullet.helpers import GravityHelper class SimulationManager: def __init__(self): pass def launchSimulation( self, gui=True, use_shared_memory=False, auto_step=True): if gui: physics_client = pybullet.connect(pybullet.GUI) if auto_step: pybullet.setRealTimeSimulation( 1, physicsClientId=physics_client) pybullet.configureDebugVisualizer( pybullet.COV_ENABLE_RGB_BUFFER_PREVIEW, 0, physicsClientId=physics_client) pybullet.configureDebugVisualizer( pybullet.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 0, physicsClientId=physics_client) pybullet.configureDebugVisualizer( pybullet.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 0, physicsClientId=physics_client) else: if use_shared_memory: physics_client = pybullet.connect( pybullet.SHARED_MEMORY_SERVER) if auto_step: pybullet.setRealTimeSimulation( 1, physicsClientId=physics_client) else: physics_client = pybullet.connect(pybullet.DIRECT) if auto_step: threading.Thread( target=self._stepSimulation, args=[physics_client]).start() self.setGravity(physics_client, [0.0, 0.0, -9.81]) return physics_client
Apache License 2.0
xknx/xknx
xknx/remote_value/remote_value_sensor.py
_RemoteValueGeneric.payload_valid
python
def payload_valid(self, payload: DPTArray | DPTBinary | None) -> DPTArray | None: return ( payload if isinstance(payload, DPTArray) and len(payload.value) == self.dpt_class.payload_length else None )
Test if telegram payload may be parsed.
https://github.com/xknx/xknx/blob/87666cc9bd9da64a84305baeff84486097346111/xknx/remote_value/remote_value_sensor.py#L58-L65
from __future__ import annotations from abc import abstractmethod from typing import TYPE_CHECKING, TypeVar, Union from xknx.dpt import DPTArray, DPTBase, DPTBinary, DPTNumeric from xknx.exceptions import ConversionError from .remote_value import AsyncCallbackType, GroupAddressesType, RemoteValue if TYPE_CHECKING: from xknx.xknx import XKNX ValueType = TypeVar("ValueType") class _RemoteValueGeneric(RemoteValue[DPTArray, ValueType]): dpt_base_class: type[DPTBase] def __init__( self, xknx: XKNX, group_address: GroupAddressesType | None = None, group_address_state: GroupAddressesType | None = None, sync_state: bool | int | float | str = True, value_type: int | str | None = None, device_name: str | None = None, feature_name: str = "Value", after_update_cb: AsyncCallbackType | None = None, ): if value_type is None: raise ConversionError("no value type given", device_name=device_name) _dpt_class = self.dpt_base_class.parse_transcoder(value_type) if _dpt_class is None: raise ConversionError( "invalid value type", value_type=value_type, device_name=device_name ) self.dpt_class = _dpt_class super().__init__( xknx, group_address, group_address_state, sync_state=sync_state, device_name=device_name, feature_name=feature_name, after_update_cb=after_update_cb, )
MIT License
gregorch/ipet
ipet/concepts/Manager.py
Manager.chgManageableName
python
def chgManageableName(self, manageable, oldname, newname): if newname != oldname: if self.getManageable(newname) is not None: raise KeyError("An element of name %s is already listed" % (newname)) del self.stringrepresentations[oldname] self.stringrepresentations[newname] = manageable
changes a manageables name, if possible
https://github.com/gregorch/ipet/blob/e4135ff936d3aa447a960d854f9c51554e5ba7dc/ipet/concepts/Manager.py#L94-L102
from .Observer import Observable from .IPETMessageStream import Message class Manager(Observable): def __init__(self, listofmanageables=[], activate=False): self.stringrepresentations = {} self.activeset = set() for manageable in listofmanageables: self.addManageable(manageable) if activate: self.activate(manageable) def addManageable(self, manageable): stringrepresentation = self.getStringRepresentation(manageable) self.stringrepresentations[stringrepresentation] = manageable def getStringRepresentation(self, manageable): if type(manageable) is str: return manageable else: try: return manageable.getName() except AttributeError: return str(manageable) def getManageable(self, stringrepresentation): return self.stringrepresentations.get(stringrepresentation, None) def deleteManageable(self, manageable): for key, val in list(self.stringrepresentations.items()): if val == manageable: oldstringrepresentation = key break del self.stringrepresentations[oldstringrepresentation] self.deactivate([manageable]) def reinsertManageable(self, manageable): active = self.isActive(manageable) self.deleteManageable(manageable) self.addManageable(manageable) if active: self.activate([manageable]) def editObjectAttribute(self, manageable, attributename, newattribute): oldname = self.getStringRepresentation(manageable) manageable.editAttribute(attributename, newattribute) newname = self.getStringRepresentation(manageable) print(newname, newattribute) if oldname != newname: self.chgManageableName(manageable, oldname, newname) self.notify(Message("Changed attribute %s of %s to %s" % (attributename, newname, newattribute), Message.MESSAGETYPE_INFO))
MIT License
ingeniamc/ingenialink-python
ingenialink/ipb/servo.py
IPBServo.monitoring_set_mapped_register
python
def monitoring_set_mapped_register(self, channel, address, subnode, dtype, size): return lib.il_net_set_mapped_register(self._cffi_network, channel, address, subnode, dtype, size)
Set monitoring mapped register. Args: channel (int): Identity channel number. reg_idx (int): Register address to map. dtype (REG_DTYPE): Data type of the register to map. Returns: int: Result code.
https://github.com/ingeniamc/ingenialink-python/blob/6011931697e48456f5638c2848303aac2e5bcb75/ingenialink/ipb/servo.py#L937-L950
import os from .._ingenialink import lib, ffi from ingenialink.constants import * from ingenialink.exceptions import * from ingenialink.ipb.register import * from ingenialink.utils._utils import * from ingenialink.register import dtype_size from ingenialink.ipb.dictionary import IPBDictionary from ingenialink.servo import Servo, SERVO_MODE, SERVO_STATE, SERVO_UNITS_ACC, SERVO_UNITS_TORQUE, SERVO_UNITS_POS, SERVO_UNITS_VEL import io import numpy as np from xml.dom import minidom import xml.etree.ElementTree as ET import ingenialogger logger = ingenialogger.get_logger(__name__) PRODUCT_ID_REGISTERS = { 0: IPBRegister( identifier='', units='', subnode=0, address=0x06E1, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RO ), 1: IPBRegister( identifier='', units='', subnode=1, address=0x06E1, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RO ) } SERIAL_NUMBER_REGISTERS = { 0: IPBRegister( identifier='', units='', subnode=0, address=0x06E6, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RO ), 1: IPBRegister( identifier='', units='', subnode=1, address=0x06E6, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RO ) } SOFTWARE_VERSION_REGISTERS = { 0: IPBRegister( identifier='', units='', subnode=0, address=0x06E4, cyclic='CONFIG', dtype=REG_DTYPE.STR, access=REG_ACCESS.RO ), 1: IPBRegister( identifier='', units='', subnode=1, address=0x06E4, cyclic='CONFIG', dtype=REG_DTYPE.STR, access=REG_ACCESS.RO ) } REVISION_NUMBER_REGISTERS = { 0: IPBRegister( identifier='', units='', subnode=0, address=0x06E2, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RO ), 1: IPBRegister( identifier='', units='', subnode=1, address=0x06E2, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RO ) } DIST_NUMBER_SAMPLES = IPBRegister( identifier='', units='', subnode=0, address=0x00C4, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RW ) DIST_DATA = IPBRegister( identifier='', units='', subnode=0, address=0x00B4, cyclic='CONFIG', dtype=REG_DTYPE.U16, access=REG_ACCESS.WO ) STORE_COCO_ALL = IPBRegister( identifier='', units='', subnode=0, address=0x06DB, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RW ) RESTORE_COCO_ALL = IPBRegister( identifier='', units='', subnode=0, address=0x06DC, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RW ) STATUS_WORD = IPBRegister( identifier='', units='', subnode=1, address=0x0011, cyclic='CONFIG', dtype=REG_DTYPE.U16, access=REG_ACCESS.RW ) STORE_MOCO_ALL_REGISTERS = { 1: IPBRegister( identifier='', units='', subnode=1, address=0x06DB, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RW ), 2: IPBRegister( identifier='', units='', subnode=2, address=0x06DB, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RW ), 3: IPBRegister( identifier='', units='', subnode=3, address=0x06DB, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RW ) } RESTORE_MOCO_ALL_REGISTERS = { 1: IPBRegister( identifier='', units='', subnode=1, address=0x06DC, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RW, reg_range=None ), 2: IPBRegister( identifier='', units='', subnode=2, address=0x06DC, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RW, reg_range=None ), 3: IPBRegister( identifier='', units='', subnode=3, address=0x06DC, cyclic='CONFIG', dtype=REG_DTYPE.U32, access=REG_ACCESS.RW, reg_range=None ) } class IPBServo(Servo): def __init__(self, cffi_servo, cffi_net, target, dictionary_path=None): self._cffi_servo = cffi_servo self._cffi_network = cffi_net super(IPBServo, self).__init__(target) _dictionary_path = cstr(dictionary_path) if dictionary_path else ffi.NULL self.__dictionary = IPBDictionary(dictionary_path, self._cffi_servo) self.__observers_servo_state = {} self.__handlers_servo_state = {} self.__observers_emergency_state = {} if not hasattr(self, '_errors') or not self._errors: self._errors = self._get_all_errors(_dictionary_path) prod_name = '' if self.dictionary.part_number is None else self.dictionary.part_number self.full_name = '{} {}'.format(prod_name, self.name) @staticmethod def _get_all_errors(dictionary): errors = {} if str(dictionary) != "<cdata 'void *' NULL>": tree = ET.parse(dictionary) for error in tree.iter("Error"): label = error.find(".//Label") id = int(error.attrib['id'], 0) errors[id] = [ error.attrib['id'], error.attrib['affected_module'], error.attrib['error_type'].capitalize(), label.text ] return errors def _get_reg(self, reg, subnode): if isinstance(reg, IPBRegister): return reg elif isinstance(reg, str): _dict = self.dictionary if not _dict: raise ValueError('No dictionary loaded') if reg not in _dict.registers(subnode): raise_err(lib.IL_REGNOTFOUND, 'Register not found ({})'.format(reg)) return _dict.registers(subnode)[reg] else: raise TypeError('Invalid register') def raw_read(self, reg, subnode=1): return self.read(reg, subnode=subnode) def read(self, reg, subnode=1): _reg = self._get_reg(reg, subnode) t, f = self._raw_read[_reg.dtype] v = ffi.new(t) r = f(self._cffi_servo, _reg._reg, ffi.NULL, v) raise_err(r) value = self.extended_buffer if _reg.dtype == REG_DTYPE.STR else v[0] if isinstance(value, str): value = value.replace('\x00', '') return value def raw_write(self, reg, data, confirm=True, extended=0, subnode=1): self.write(reg, data, confirm, extended, subnode) def write(self, reg, data, confirm=True, extended=0, subnode=1): _reg = self._get_reg(reg, subnode) if isinstance(data, float) and _reg.dtype != REG_DTYPE.FLOAT: data = int(data) f = self._raw_write[_reg.dtype] r = f(self._cffi_servo, _reg._reg, ffi.NULL, data, confirm, extended) raise_err(r) def read_sdo(self, idx, subidx, dtype, slave=1): v = ffi.new('double *') r = lib.il_net_SDO_read(self._cffi_network, slave, idx, subidx, dtype, v) raise_err(r) return v[0] def read_string_sdo(self, idx, subidx, size, slave=1): v = ffi.new("char[" + str(size) + "]") r = lib.il_net_SDO_read_string(self._cffi_network, slave, idx, subidx, size, v) raise_err(r) return pstr(v) def write_sdo(self, idx, subidx, dtype, value, slave=1): r = lib.il_net_SDO_write(self._cffi_network, slave, idx, subidx, dtype, value) raise_err(r) def destroy(self): return lib.il_servo_destroy(self._cffi_servo) def reset(self): r = lib.il_servo_reset(self._cffi_servo) raise_err(r) def get_state(self, subnode=1): state = ffi.new('il_servo_state_t *') flags = ffi.new('int *') lib.il_servo_state_get(self._cffi_servo, state, flags, subnode) return SERVO_STATE(state[0]), flags[0] def enable(self, subnode=1, timeout=DEFAULT_PDS_TIMEOUT): r = lib.il_servo_enable(self._cffi_servo, subnode, timeout) raise_err(r) def disable(self, subnode=1, timeout=DEFAULT_PDS_TIMEOUT): r = lib.il_servo_disable(self._cffi_servo, subnode, timeout) raise_err(r) def fault_reset(self, subnode=1, timeout=DEFAULT_PDS_TIMEOUT): r = lib.il_servo_fault_reset(self._cffi_servo, subnode, timeout) raise_err(r) def switch_on(self, timeout=2.): r = lib.il_servo_switch_on(self._cffi_servo, to_ms(timeout)) raise_err(r) def homing_start(self): r = lib.il_servo_homing_start(self._cffi_servo) raise_err(r) def homing_wait(self, timeout): r = lib.il_servo_homing_wait(self._cffi_servo, to_ms(timeout)) raise_err(r) def store_parameters(self, subnode=None): if subnode is None: r = 0 try: self.write(reg=STORE_COCO_ALL, data=PASSWORD_STORE_ALL, subnode=0) logger.info('Store all successfully done.') except Exception: logger.warning('Store all COCO failed. Trying MOCO...') r = -1 if r < 0: if self.dictionary.subnodes > SINGLE_AXIS_MINIMUM_SUBNODES: for dict_subnode in self.dictionary.subnodes: self.write(reg=STORE_MOCO_ALL_REGISTERS[dict_subnode], data=PASSWORD_STORE_ALL, subnode=dict_subnode) logger.info('Store axis {} successfully done.'.format( dict_subnode)) else: self.write(reg=STORE_MOCO_ALL_REGISTERS[1], data=PASSWORD_STORE_ALL, subnode=1) logger.info('Store all successfully done.') elif subnode == 0: self.write(reg=STORE_COCO_ALL, data=PASSWORD_STORE_RESTORE_SUB_0, subnode=subnode) logger.info('Store subnode 0 successfully done.') elif subnode > 0 and subnode in STORE_MOCO_ALL_REGISTERS: self.write(reg=STORE_MOCO_ALL_REGISTERS[subnode], data=PASSWORD_STORE_ALL, subnode=subnode) logger.info('Store axis {} successfully done.'.format(subnode)) else: raise ILError('Invalid subnode.') sleep(1.5) def restore_parameters(self, subnode=None): if subnode is None: self.write(reg=RESTORE_COCO_ALL, data=PASSWORD_RESTORE_ALL, subnode=0) logger.info('Restore all successfully done.') elif subnode == 0: self.write(reg=RESTORE_COCO_ALL, data=PASSWORD_STORE_RESTORE_SUB_0, subnode=0) logger.info('Restore subnode 0 successfully done.') elif subnode > 0 and subnode in RESTORE_MOCO_ALL_REGISTERS: self.write(reg=RESTORE_MOCO_ALL_REGISTERS[subnode], data=PASSWORD_RESTORE_ALL, subnode=subnode) logger.info('Restore subnode {} successfully done.'.format(subnode)) else: raise ILError('Invalid subnode.') sleep(1.5) def is_alive(self): _is_alive = True try: self.read(STATUS_WORD) except ILError as e: _is_alive = True logger.error(e) return _is_alive def _store_comm(self): r = lib.il_servo_store_comm(self._cffi_servo) raise_err(r) def _store_app(self): r = lib.il_servo_store_app(self._cffi_servo) raise_err(r) def replace_dictionary(self, dictionary): r = lib.il_servo_dict_load(self._cffi_servo, cstr(dictionary)) if not hasattr(self, '_errors') or not self._errors: self._errors = self._get_all_errors(dictionary) raise_err(r) self.__dictionary = IPBDictionary(dictionary, self._cffi_servo) @staticmethod def __update_single_axis_dict(registers_category, registers, subnode): for register in registers: if subnode is not None and register.attrib['subnode'] != str( subnode) and subnode >= 0 and register in registers_category: registers_category.remove(register) cleanup_register(register) @staticmethod def __update_multiaxis_dict(device, axes_category, list_axis, subnode): for axis in list_axis: registers_category = axis.find('./Registers') registers = registers_category.findall('./Register') if subnode is not None and axis.attrib['subnode'] == str(subnode): for register in registers: cleanup_register(register) device.append(registers_category) device.remove(axes_category) break for register in registers: cleanup_register(register) def save_configuration(self, config_file, subnode=None): if subnode is not None and (not isinstance(subnode, int) or subnode < 0): raise ILError('Invalid subnode') prod_code, rev_number = get_drive_identification(self, subnode) r = lib.il_servo_dict_storage_read(self._cffi_servo) raise_err(r) self.dictionary.save(config_file) tree = ET.parse(config_file) xml_data = tree.getroot() body = xml_data.find('Body') device = xml_data.find('Body/Device') categories = xml_data.find('Body/Device/Categories') errors = xml_data.find('Body/Errors') if 'ProductCode' in device.attrib and prod_code is not None: device.attrib['ProductCode'] = str(prod_code) if 'RevisionNumber' in device.attrib and rev_number is not None: device.attrib['RevisionNumber'] = str(rev_number) registers_category = xml_data.find('Body/Device/Registers') if registers_category is None: axes_category = xml_data.find('Body/Device/Axes') list_axis = xml_data.findall('Body/Device/Axes/Axis') self.__update_multiaxis_dict(device, axes_category, list_axis, subnode) else: registers = xml_data.findall('Body/Device/Registers/Register') self.__update_single_axis_dict(registers_category, registers, subnode) device.remove(categories) body.remove(errors) image = xml_data.find('./DriveImage') if image is not None: xml_data.remove(image) xmlstr = minidom.parseString(ET.tostring(xml_data)).toprettyxml( indent=" ", newl='') config_file = io.open(config_file, "w", encoding='utf8') config_file.write(xmlstr) config_file.close() def load_configuration(self, config_file, subnode=None): if not os.path.isfile(config_file): raise FileNotFoundError('Could not find {}.'.format(config_file)) if subnode is not None and (not isinstance(subnode, int) or subnode < 0): raise ILError('Invalid subnode') if subnode is None: subnode = -1 r = lib.il_servo_dict_storage_write(self._cffi_servo, cstr(config_file), subnode) if not hasattr(self, '_errors') or not self._errors: self._errors = self._get_all_errors(config_file) raise_err(r) def reload_errors(self, dictionary): self._errors = self._get_all_errors(dictionary) def emcy_subscribe(self, cb): cb_handle = ffi.new_handle(cb) slot = lib.il_servo_emcy_subscribe( self._cffi_servo, lib._on_emcy_cb, cb_handle) if slot < 0: raise_err(slot) self.__observers_emergency_state[slot] = cb_handle return slot def emcy_unsubscribe(self, slot): lib.il_servo_emcy_unsubscribe(self._cffi_servo, slot) del self.__observers_emergency_state[slot] def subscribe_to_status(self, callback): if callback in self.__observers_servo_state.values(): logger.info('Callback already subscribed.') return cb_handle = ffi.new_handle(callback) slot = lib.il_servo_state_subscribe( self._cffi_servo, lib._on_state_change_cb, cb_handle) if slot < 0: raise_err(slot) self.__observers_servo_state[slot] = callback self.__handlers_servo_state[slot] = cb_handle def unsubscribe_from_status(self, callback): if callback not in self.__observers_servo_state.values(): logger.info('Callback not subscribed.') return for slot, cb in self.__observers_servo_state.items(): if cb == callback: lib.il_servo_state_unsubscribe(self._cffi_servo, slot) del self.__observers_servo_state[slot] del self.__handlers_servo_state[slot] return def _state_subs_stop(self, stop): r = lib.il_servo_state_subs_stop(self._cffi_servo, stop) if r < 0: raise ILError('Failed toggling servo state subscriptions.') def start_status_listener(self): self._state_subs_stop(0) def stop_status_listener(self): self._state_subs_stop(1) def disturbance_write_data(self, channels, dtypes, data_arr): if not isinstance(channels, list): channels = [channels] if not isinstance(dtypes, list): dtypes = [dtypes] if not isinstance(data_arr[0], list): data_arr = [data_arr] num_samples = len(data_arr[0]) self.write(DIST_NUMBER_SAMPLES, num_samples, subnode=0) sample_size = 0 for dtype_val in dtypes: sample_size += dtype_size(dtype_val) samples_for_write = DIST_FRAME_SIZE // sample_size number_writes = num_samples // samples_for_write rest_samples = num_samples % samples_for_write for i in range(number_writes): for index, channel in enumerate(channels): self.disturbance_channel_data( channel, dtypes[index], data_arr[index][i * samples_for_write:(i + 1) * samples_for_write]) self.disturbance_data_size = sample_size * samples_for_write self.write(DIST_DATA, sample_size * samples_for_write, False, 1, subnode=0) for index, channel in enumerate(channels): self.disturbance_channel_data( channel, dtypes[index], data_arr[index][number_writes * samples_for_write:num_samples]) self.disturbance_data_size = rest_samples * sample_size self.write(DIST_DATA, rest_samples * sample_size, False, 1, subnode=0) def wait_reached(self, timeout): r = lib.il_servo_wait_reached(self._cffi_servo, to_ms(timeout)) raise_err(r) def units_update(self): r = lib.il_servo_units_update(self._cffi_servo) raise_err(r) def units_factor(self, reg): return lib.il_servo_units_factor(self._cffi_servo, reg._reg) def monitoring_channel_data(self, channel, dtype): data_arr = [] size = int(self.monitoring_data_size) bytes_per_block = self.monitoring_get_bytes_per_block() if dtype == REG_DTYPE.U16: data_arr = lib.il_net_monitoring_channel_u16(self._cffi_network, channel) elif dtype == REG_DTYPE.S16: data_arr = lib.il_net_monitoring_channel_s16(self._cffi_network, channel) elif dtype == REG_DTYPE.U32: data_arr = lib.il_net_monitoring_channel_u32(self._cffi_network, channel) elif dtype == REG_DTYPE.S32: data_arr = lib.il_net_monitoring_channel_s32(self._cffi_network, channel) elif dtype == REG_DTYPE.FLOAT: data_arr = lib.il_net_monitoring_channel_flt(self._cffi_network, channel) ret_arr = [] for i in range(0, int(size / bytes_per_block)): ret_arr.append(data_arr[i]) return ret_arr def monitoring_remove_all_mapped_registers(self): return lib.il_net_remove_all_mapped_registers(self._cffi_network)
MIT License
ucasir/nprf
utils/pair_generator.py
PairGenerator.get_triplet_list
python
def get_triplet_list(self, qid_list, sample_size=10): triplet_list_global = [] for qid in qid_list: relevance = self.relevance_dict.get(qid) relevance_posting = relevance.get_judged_docid_list() rel_0, rel_1, rel_2 = relevance_posting[0], relevance_posting[1], relevance_posting[2] rel_01_triplet_list = self.create_triplet_list(rel_0, rel_1, qid, sample_size) rel_12_triplet_list = self.create_triplet_list(rel_1, rel_2, qid, sample_size) curr_triplet_list = [] if rel_01_triplet_list != None: curr_triplet_list.extend(rel_01_triplet_list) if rel_12_triplet_list != None: curr_triplet_list.extend(rel_12_triplet_list) curr_triplet_list = np.random.permutation(curr_triplet_list) triplet_list_global.extend(curr_triplet_list[: self.sample_perquery_limit]) triplet_list_global = np.random.permutation(triplet_list_global) triplet_list_global = triplet_list_global[: self.sample_total_limit] return triplet_list_global
Deprecated, please use get_triplet_list_balanced
https://github.com/ucasir/nprf/blob/d385929b3249a003e017cba03b8669dc6a05037e/utils/pair_generator.py#L73-L96
import numpy as np import cPickle as pickle import os import sys import math import logging sys.path.append('../utils') from relevance_info import Relevance class PairGenerator(object): def __init__(self, relevance_dict_path, batch_size, shuffle, sample_perquery_limit, sample_total_limit): self.batch_size = batch_size self.shuffle = shuffle self.sample_perquery_limit = sample_perquery_limit self.sample_total_limit = sample_total_limit with open(relevance_dict_path, 'r') as f: self.relevance_dict = pickle.load(f) def generate_pair_batch(self, qid_list, sample_size): pass def get_feature_batch(self, triplet_list): pass def generate_list_batch(self, qid_list, topk): pass
Apache License 2.0
hewlettpackard/python-ilorest-library-old
src/redfish/rest/v1.py
RisObject.__init__
python
def __init__(self, d): super(RisObject, self).__init__() self.update(**dict((k, self.parse(value)) for k, value in d.iteritems()))
Initialize RisObject :param d: dictionary to be parsed :type d: dict
https://github.com/hewlettpackard/python-ilorest-library-old/blob/b00fd417024485a77c4f71f913135831d674a177/src/redfish/rest/v1.py#L88-L97
import os import sys import ssl import uuid import time import gzip import json import base64 import codecs import urllib import ctypes import hashlib import logging import httplib import platform try: import io except ImportError: pass from StringIO import StringIO from collections import (OrderedDict) import urlparse2 from redfish.hpilo.rishpilo import HpIloChifPacketExchangeError from redfish.hpilo.risblobstore2 import BlobStore2, Blob2OverrideError LOGGER = logging.getLogger(__name__) class RetriesExhaustedError(Exception): pass class InvalidCredentialsError(Exception): pass class ServerDownOrUnreachableError(Exception): pass class ChifDriverMissingOrNotFound(Exception): pass class DecompressResponseError(Exception): pass class JsonDecodingError(Exception): pass class RisObject(dict): __getattr__ = dict.__getitem__
Apache License 2.0
wheatoncs/lexos
lexos/models/content_analysis_model.py
ContentAnalysisModel.analyze
python
def analyze(self) -> (Optional[str], Optional[str]): dictionaries = self.count() if self.is_secure(): formula_errors = self.save_formula() self.generate_scores() self.generate_averages() dataframe_unsorted = self.to_data_frame() dataframe = dataframe_unsorted.sort_values( by=[dataframe_unsorted.columns [self.content_analysis_option.sort_column]], ascending=self.content_analysis_option.sort_ascending) overview = dataframe.values.tolist() overview.insert(0, dataframe.columns.values.tolist()) overview_csv = dataframe.to_csv() corpus_dataframe = pd.DataFrame(self.generate_corpus_results( dictionaries=dictionaries), columns=["Dictionary", "Phrase", "Count"]) corpus_results = self.get_top_results(corpus_dataframe) corpus_csv = corpus_dataframe.to_csv() document_results = [] for document_result in self.generate_document_results( dictionaries=dictionaries): dataframe = pd.DataFrame( document_result["table"], columns=["Dictionary", "Phrase", "Count"]) document_results.append({ "name": document_result["name"], "data": self.get_top_results(dataframe), "csv": dataframe.to_csv() }) else: formula_errors = "Formula error: Invalid input" overview = "" overview_csv = "" corpus_results = "" corpus_csv = "" document_results = "" return overview, overview_csv, corpus_results, corpus_csv, document_results, formula_errors
Perform the analysis. :return: The results of the analysis.
https://github.com/wheatoncs/lexos/blob/994be4e403053ebbef18e5758a100af616195706/lexos/models/content_analysis_model.py#L319-L373
import random from copy import deepcopy from typing import Optional import pandas as pd from lexos.helpers.definitions import count_phrase_in_text from lexos.receivers.content_analysis_receiver import ContentAnalysisReceiver, ContentAnalysisOption class ContentAnalysisModel(object): def __init__(self, test_options: Optional[ContentAnalysisOption] = None): self._test_options = test_options self._dictionaries = [] self._corpus = [] self._counters = [] self._formulas = [] self._scores = [] self._averages = [] self._formula = "" self._toggle_all = True def add_file(self, file_name: str, label: str, content: str): content = content.strip() total_word_counts = len(str(content).split(" ")) self._corpus.append(File(content=content, file_name=file_name, label=label, total_word_counts=total_word_counts)) def add_dictionary(self, file_name: str, label: str, content: str): new_list = str(content).split(", ") new_list.sort(key=lambda x: len(x.split()), reverse=True) self._dictionaries.append(Dictionary(content=new_list, file_name=file_name, label=label)) def get_active_dicts(self) -> list: return [dictionary for dictionary in self.dictionaries if dictionary.active] def count(self) -> list: self._counters = [] dictionaries = self.join_active_dicts() for file in deepcopy(self._corpus): dictionaries = count_phrases(dictionary=dictionaries, file=file) self.get_dictionary_counts(dictionaries) return dictionaries def generate_corpus_results(self, dictionaries: list) -> list: corpus_results = [] for phrase in dictionaries: count = 0 for i in phrase.file_counts: count += phrase.file_counts[i] corpus_results.append([phrase.dict_label, phrase.content, str(count)]) return corpus_results def generate_document_results(self, dictionaries: list) -> list: document_results = [] for file in self._corpus: result = {"name": file.label, "table": []} for phrase in dictionaries: result["table"].append([phrase.dict_label, phrase.content, str(phrase.file_counts[file.label])]) document_results.append(result) return document_results def get_dictionary_counts(self, dictionaries: list): counter = [] active_dicts = self.get_active_dicts() for dictionary in active_dicts: count = 0 for phrase in dictionaries: if phrase.dict_label == dictionary.label: count += phrase.count counter.append(count) if len(counter) == len(active_dicts): self._counters.append(counter) def generate_scores(self): self._scores = [] self._formulas = [] active_dicts = self.get_active_dicts() result = 0 for corpus_index, file in enumerate(self._corpus): new_formula = self._formula for active_dict_index, active_dict in enumerate(active_dicts): new_formula = new_formula.replace( "[" + active_dict.label + "]", str(self._counters[corpus_index][active_dict_index])) new_formula = new_formula.replace("()", "") try: result = eval(new_formula) except (ValueError, SyntaxError): pass self._scores.append(round( float(result) / file.total_word_count, ndigits=3)) self._formulas.append(result) def generate_averages(self): self._averages = [] scores_sum = 0 total_word_counts_sum = 0 formulas_sum = 0 active_dicts = self.get_active_dicts() for index, (score, formula, file) in enumerate(zip(self.scores, self._formulas, self._corpus)): scores_sum += score total_word_counts_sum += file.total_word_count formulas_sum += formula if len(self.scores) != 0: scores_avg = round( (float(scores_sum) / len(self.scores)), ndigits=3) else: scores_avg = 0 if len(self._corpus) != 0: average = (float(total_word_counts_sum) / (len(self._corpus))) total_word_counts_avg = round(average, ndigits=1) else: total_word_counts_avg = 0 if len(self._formulas) != 0: sums_avg = round((float(formulas_sum) / len(self._formulas)), ndigits=1) else: sums_avg = 0 for dict_index, _ in enumerate(active_dicts): cat_count = sum([counter[dict_index] for counter in self._counters]) if len(self._counters) != 0: self._averages.append(round( float(cat_count) / len(self._counters), ndigits=1)) else: self._averages.append(0) self._averages.append(sums_avg) self._averages.append(total_word_counts_avg) self._averages.append(scores_avg) def join_active_dicts(self) -> list: active_dicts = self.get_active_dicts() dictionaries = [Phrase(content=phrase, dict_label=dictionary.label) for dictionary in active_dicts for phrase in dictionary.content if phrase != ''] dictionaries.sort(key=lambda x: len(x.content.split()), reverse=True) return dictionaries def to_data_frame(self) -> pd.DataFrame: columns = ["Document Name"] + [dictionary.label for dictionary in self.get_active_dicts()] + ["Formula", "Word Count", "Score"] dataframe = pd.DataFrame(columns=columns) avg_column = pd.Series(["Averages"] + self._averages, index=columns) dataframe = dataframe.append(avg_column, ignore_index=True) for index, (file, formula, score, counters) in enumerate( zip(self._corpus, self._formulas, self._scores, self._counters)): column = pd.Series( [file.label] + counters + [formula] + [file.total_word_count] + [score], index=columns) dataframe = dataframe.append(column, ignore_index=True) return dataframe def is_secure(self) -> bool: formula = self._formula allowed_input = ["[" + dictionary.label + "]" for dictionary in self.get_active_dicts()] + ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", " ", "+", "-", "*", "/", "sin", "cos", "tan", "log", "sqrt", "(", ")"] for item in allowed_input: formula = formula.replace(item, "") if len(formula) == 0: return True return False def save_formula(self): if self._test_options is not None: formula = self._test_options.formula else: formula = self.content_analysis_option.formula if len(formula) == 0: self._formula = "0" else: formula = formula.replace("√", "sqrt").replace("^", "**") self._formula = formula return self.check_formula() def check_formula(self) -> str: error_msg = "Formula errors:<br>" is_error = False if self._formula.count("(") != self._formula.count(")"): error_msg += "Mismatched parenthesis<br>" is_error = True if "sin()" in self._formula: error_msg += "sin takes exactly one argument (0 given)<br>" is_error = True if "cos()" in self._formula: error_msg += "cos takes exactly one argument (0 given)<br>" is_error = True if "tan()" in self._formula: error_msg += "tan takes exactly one argument (0 given)<br>" is_error = True if "log()" in self._formula: error_msg += "log takes exactly one argument (0 given)<br>" is_error = True if is_error: return error_msg return "" def get_top_results(self, dataframe) -> list: dataframe.Count = pd.to_numeric(dataframe.Count, errors="coerce") dataframe = dataframe.sort_values(by="Count", ascending=False) return dataframe.head(100).values.tolist()
MIT License
voronind/vk
vk/session.py
APIBase.on_api_error_15
python
def on_api_error_15(self, request): logger.error('Authorization failed. Access token will be dropped') self.access_token = self.get_access_token() return self.send(request)
15. Access denied - due to scope
https://github.com/voronind/vk/blob/37f41c7634f67149d4dab8017be0adca5ea3dc24/vk/session.py#L86-L93
import re import urllib import logging import requests from .exceptions import VkAuthError, VkAPIError from .api import APINamespace from .utils import json_iter_parse, stringify logger = logging.getLogger('vk') class APIBase: METHOD_COMMON_PARAMS = {'v', 'lang', 'https', 'test_mode'} API_URL = 'https://api.vk.com/method/' CAPTCHA_URL = 'https://m.vk.com/captcha.php' def __new__(cls, *args, **kwargs): method_common_params = {key: kwargs.pop(key) for key in tuple(kwargs) if key in cls.METHOD_COMMON_PARAMS} api = object.__new__(cls) api.__init__(*args, **kwargs) return APINamespace(api, method_common_params) def __init__(self, timeout=10): self.timeout = timeout self.session = requests.Session() self.session.headers['Accept'] = 'application/json' self.session.headers['Content-Type'] = 'application/x-www-form-urlencoded' def send(self, request): logger.debug('Prepare API Method request') self.prepare_request(request) method_url = self.API_URL + request.method response = self.session.post(method_url, request.method_params, timeout=self.timeout) response.raise_for_status() for response_or_error in json_iter_parse(response.text): request.response = response_or_error if 'response' in response_or_error: return response_or_error['response'] elif 'error' in response_or_error: api_error = VkAPIError(request.response['error']) request.api_error = api_error return self.handle_api_error(request) def prepare_request(self, request): request.method_params['access_token'] = self.access_token def get_access_token(self): raise NotImplementedError def handle_api_error(self, request): logger.error('Handle API error: %s', request.api_error) api_error_handler_name = 'on_api_error_' + str(request.api_error.code) api_error_handler = getattr(self, api_error_handler_name, self.on_api_error) return api_error_handler(request) def on_api_error_14(self, request): request.method_params['captcha_key'] = self.get_captcha_key(request) request.method_params['captcha_sid'] = request.api_error.captcha_sid return self.send(request)
MIT License
districtdatalabs/yellowbrick-docs-zh
yellowbrick/base.py
ModelVisualizer.fit
python
def fit(self, X, y=None, **kwargs): self.estimator.fit(X, y) return self
Fits the wrapped estimator so that subclasses that override fit can ensure that the estimator is fit using super rather than a direct call down to the estimator. Score estimators tend to expect a fitted model. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values kwargs: dict Keyword arguments passed to the drawing functionality or to the Scikit-Learn API. See visualizer specific details for how to use the kwargs to modify the visualization or fitting process. Returns ------- self : visualizer The fit method must always return self to support pipelines.
https://github.com/districtdatalabs/yellowbrick-docs-zh/blob/3118e67f2bed561a00885e6edb2cabb3520ad66b/yellowbrick/base.py#L279-L304
import matplotlib.pyplot as plt from .utils.wrapper import Wrapper from sklearn.base import BaseEstimator from .utils import get_model_name, isestimator from sklearn.model_selection import cross_val_predict as cvp class Visualizer(BaseEstimator): def __init__(self, ax=None, **kwargs): self.ax = ax self.size = kwargs.pop('size', None) self.color = kwargs.pop('color', None) self.title = kwargs.pop('title', None) @property def ax(self): if not hasattr(self, "_ax") or self._ax is None: self._ax = plt.gca() return self._ax @ax.setter def ax(self, ax): self._ax = ax @property def size(self): if not hasattr(self, "_size") or self._size is None: fig = plt.gcf() self._size = fig.get_size_inches()*fig.dpi return self._size @size.setter def size(self, size): self._size = size if self._size is not None: fig = plt.gcf() width, height = size width_in_inches = width / fig.get_dpi() height_in_inches = height / fig.get_dpi() fig.set_size_inches(width_in_inches, height_in_inches) def fit(self, X, y=None, **kwargs): return self def draw(self, **kwargs): raise NotImplementedError( "Visualizers must implement a drawing interface." ) def finalize(self, **kwargs): return self.ax def poof(self, outpath=None, **kwargs): if self._ax is None: return self.finalize() if outpath is not None: plt.savefig(outpath, **kwargs) else: plt.show() def set_title(self, title=None): title = self.title or title if title is not None: self.ax.set_title(title) class ModelVisualizer(Visualizer, Wrapper): def __init__(self, model, ax=None, **kwargs): self.estimator = model self.name = get_model_name(self.estimator) Wrapper.__init__(self, self.estimator) Visualizer.__init__(self, ax=ax, **kwargs)
Apache License 2.0
squareslab/darjeeling
src/darjeeling/snippet.py
SnippetDatabase.lines_for_snippet
python
def lines_for_snippet(self, snippet: Snippet) -> Iterator[FileLine]: yield from self.__content_to_lines.get(snippet.content, [])
Returns an iterator over all lines at which a snippet appears.
https://github.com/squareslab/darjeeling/blob/9c234a984f9a7f829c3fb3976b620142b40ca9c8/src/darjeeling/snippet.py#L100-L102
__all__ = ('Snippet', 'SnippetDatabase', 'LineSnippet', 'LineSnippetDatabase', 'StatementSnippet', 'StatementSnippetDatabase') from typing import (Any, Collection, Dict, Generic, Iterator, MutableSet, Optional, FrozenSet, TypeVar) from collections import OrderedDict import abc import typing import attr from kaskara.analysis import Analysis as KaskaraAnalysis from loguru import logger from .core import FileLocationRange, FileLine if typing.TYPE_CHECKING: from .config import Config from .problem import Problem T = TypeVar('T', bound='Snippet') class Snippet(abc.ABC): @property @abc.abstractmethod def content(self) -> str: ... def __lt__(self, other: Any) -> bool: if not isinstance(other, Snippet): return False return self.content < other.content def __str__(self) -> str: return self.content def __eq__(self, other: Any) -> bool: return isinstance(other, Snippet) and self.content == other.content def __hash__(self) -> int: return hash(self.content) @attr.s(slots=True, frozen=True, eq=False, hash=False, str=False, auto_attribs=True) class LineSnippet(Snippet): content: str @attr.s(slots=True, frozen=True, eq=False, hash=False, str=False, auto_attribs=True) class StatementSnippet(Snippet): content: str kind: Optional[str] reads: FrozenSet[str] writes: FrozenSet[str] declares: FrozenSet[str] requires_syntax: FrozenSet[str] @property def requires_break(self) -> bool: return 'break' in self.requires_syntax @property def requires_continue(self) -> bool: return 'continue' in self.requires_syntax @property def uses(self) -> FrozenSet[str]: return self.reads | self.writes class SnippetDatabase(Generic[T], Collection[T], abc.ABC): def __init__(self) -> None: self.__content_to_snippet: OrderedDict[str, T] = OrderedDict() self.__filename_to_snippets: Dict[str, MutableSet[T]] = {} self.__content_to_lines: Dict[str, MutableSet[FileLine]] = OrderedDict() def __iter__(self) -> Iterator[T]: yield from self.__content_to_snippet.values() def __len__(self) -> int: return len(self.__content_to_snippet) def __contains__(self, snippet: Any) -> bool: return snippet.content in self.__content_to_snippet def in_file(self, filename: str) -> Iterator[T]: yield from self.__filename_to_snippets.get(filename, [])
Apache License 2.0
chapmanb/cloudbiolinux
cloudbio/custom/cloudman.py
install_cloudman
python
def install_cloudman(env): env.logger.debug("Installing CloudMan") _configure_cloudman(env, use_repo_autorun=False) install_nginx(env) install_proftpd(env) install_sge(env) install_novnc(env)
A meta method for installing all of CloudMan components. Allows CloudMan and all of its dependencies to be installed via: fab -f fabfile.py -i <key> -H ubuntu@<IP> install_custom:cloudman
https://github.com/chapmanb/cloudbiolinux/blob/db84bce8b6ad1b7f418499281c3c2d9d2495af05/cloudbio/custom/cloudman.py#L20-L30
import os import contextlib from fabric.api import cd from fabric.contrib.files import settings, hide from cloudbio.custom.shared import (_make_tmp_dir, _setup_conf_file) from cloudbio.cloudman import (_configure_cloudman, _configure_novnc, _configure_desktop, _configure_ec2_autorun) from cloudbio.galaxy import _install_nginx CDN_ROOT_URL = "http://linuxcourse.rutgers.edu/rate/Clusters/download" REPO_ROOT_URL = "https://bitbucket.org/afgane/mi-deployment/raw/tip"
MIT License
mandiant/speakeasy
speakeasy/windows/winemu.py
WindowsEmulator._handle_invalid_fetch
python
def _handle_invalid_fetch(self, emu, address, size, value, ctx): if address == self.return_hook or address == self.exit_hook: self._unset_emu_hooks() return True if not self.curr_mod: self.curr_mod = self.get_module_from_addr(self.get_pc()) if self.curr_mod: impfunc = self.curr_mod.import_table.get(address) if impfunc: mod_name, func_name = impfunc self.handle_import_func(mod_name, func_name) self._unset_emu_hooks() return True for addr, mod, fn in self.dyn_imps: if addr == address: self.handle_import_func(mod, fn) self._unset_emu_hooks() return True for addr, mod, fn in self.callbacks: if addr == address: self.handle_import_func(mod, fn) self._unset_emu_hooks() return True if self.dispatch_handlers: rv = self.dispatch_seh(ddk.STATUS_ACCESS_VIOLATION, address) if rv: return True fakeout = address & 0xFFFFFFFFFFFFF000 self.mem_map(self.page_size, base=fakeout) error = self.get_error_info('invalid_fetch', address) self.curr_run.error = error self.tmp_maps.append((fakeout, self.page_size)) self.on_run_complete() return True
Called when an attempt to emulate an instruction from an invalid address
https://github.com/mandiant/speakeasy/blob/e9b68610babba287c7032a32d0df2833ad1c5d7e/speakeasy/windows/winemu.py#L984-L1030
import io import os import ntpath import traceback import shlex import speakeasy.winenv.arch as _arch from speakeasy.binemu import BinaryEmulator from speakeasy.profiler import MemAccess import speakeasy.common as common from speakeasy.profiler import Run import speakeasy.windows.common as winemu import speakeasy.windows.objman as objman from speakeasy.windows.regman import RegistryManager from speakeasy.windows.fileman import FileManager from speakeasy.windows.cryptman import CryptoManager from speakeasy.windows.netman import NetworkManager from speakeasy.windows.hammer import ApiHammer from speakeasy.windows.driveman import DriveManager import speakeasy.winenv.defs.nt.ddk as ddk import speakeasy.winenv.defs.windows.windows as windef from speakeasy.struct import EmuStruct from speakeasy.errors import WindowsEmuError DISASM_SIZE = 0x20 class WindowsEmulator(BinaryEmulator): def __init__(self, config, logger=None, exit_event=None, debug=False): super(WindowsEmulator, self).__init__(config, logger=logger) self.debug = debug self.arch = 0 self.modules = [] self.pic_buffers = [] self.curr_run = None self.restart_curr_run = False self.curr_mod = None self.runs = [] self.input = None self.exit_event = exit_event self.page_size = 4096 self.ptr_size = None self.user_modules = [] self.max_runs = 100 self.sys_modules = [] self.symbols = {} self.ansi_strings = [] self.unicode_strings = [] self.tmp_maps = [] self.impdata_queue = [] self.run_queue = [] self.suspended_runs = [] self.cd = '' self.emu_hooks_set = True self.api = None self.curr_process = None self.om = None self.dyn_imps = [] self.callbacks = [] self.mem_trace_hooks = [] self.kernel_mode = False self.virtual_mem_base = 0x50000 self.mem_tracing_enabled = False self.tmp_code_hook = None self.veh_handlers = [] self.run_complete = False self.emu_complete = False self.global_data = {} self.processes = [] self.child_processes = [] self.curr_thread = None self.curr_exception_code = 0 self.prev_pc = 0 self.unhandled_exception_filter = 0 self.fs_addr = 0 self.gs_addr = 0 self.return_hook = winemu.EMU_RETURN_ADDR self.exit_hook = winemu.EXIT_RETURN_ADDR self._parse_config(config) self.wintypes = windef self.regman = RegistryManager(self.get_registry_config()) self.fileman = FileManager(config, self) self.netman = NetworkManager(config=self.get_network_config()) self.driveman = DriveManager(config=self.get_drive_config()) self.cryptman = CryptoManager() self.hammer = ApiHammer(self) def _parse_config(self, config): def _normalize_image(img): if img['arch'].lower() in ('x86', 'i386'): img['arch'] = _arch.ARCH_X86 elif img['arch'].lower() in ('x64', 'amd64'): img['arch'] = _arch.ARCH_AMD64 else: raise WindowsEmuError('Unsupported image arch: %s' % (img['arch'])) super(WindowsEmulator, self)._parse_config(config) for umod in self.config_user_modules: for img in umod.get('images', []): _normalize_image(img) for proc in self.config_processes: for img in proc.get('images', []): _normalize_image(img) self.cd = self.config.get('current_dir', '') self.dispatch_handlers = self.exceptions.get('dispatch_handlers', True) self.mem_tracing_enabled = self.config_analysis.get('memory_tracing', False) self.do_strings = self.config_analysis.get('strings', False) self.registry_config = self.config.get('registry', {}) self.modules_always_exist = self.config_modules.get('modules_always_exist', False) self.functions_always_exist = self.config_modules.get('functions_always_exist', False) def get_registry_config(self): return self.registry_config def on_run_complete(self): raise NotImplementedError() def enable_code_hook(self): if not self.tmp_code_hook and not self.mem_tracing_enabled: self.tmp_code_hook = self.add_code_hook(cb=self._hook_code) if self.tmp_code_hook: self.tmp_code_hook.enable() def disable_code_hook(self): if self.tmp_code_hook: self.tmp_code_hook.disable() def _module_access_hook(self, emu, addr, size, ctx): symbol = self.get_symbol_from_address(addr) if symbol: mod_name, fn = symbol.split('.') self.handle_import_func(mod_name, fn) return True def set_mem_tracing_hooks(self): if not self.mem_tracing_enabled: return if len(self.mem_trace_hooks) > 0: return self.mem_trace_hooks = ( self.add_code_hook(cb=self._hook_code), self.add_mem_read_hook(cb=self._hook_mem_read), self.add_mem_write_hook(cb=self._hook_mem_write) ) def cast(self, obj, bytez): if not isinstance(obj, EmuStruct): raise WindowsEmuError('Invalid object for cast') return obj.cast(bytez) def _unset_emu_hooks(self): if self.emu_hooks_set: self.emu_eng.mem_map(winemu.EMU_RETURN_ADDR, winemu.EMU_RESERVE_SIZE) self.emu_hooks_set = False def file_open(self, path, create=False): return self.fileman.file_open(path, create) def pipe_open(self, path, mode, num_instances, out_size, in_size): return self.fileman.pipe_open(path, mode, num_instances, out_size, in_size) def does_file_exist(self, path): return self.fileman.does_file_exist(path) def file_create_mapping(self, hfile, name, size, prot): return self.fileman.file_create_mapping(hfile, name, size, prot) def file_get(self, handle): return self.fileman.get_file_from_handle(handle) def file_delete(self, path): return self.fileman.delete_file(path) def pipe_get(self, handle): return self.fileman.get_pipe_from_handle(handle) def get_file_manager(self): return self.fileman def get_network_manager(self): return self.netman def get_crypt_manager(self): return self.cryptman def get_drive_manager(self): return self.driveman def reg_open_key(self, path, create=False): return self.regman.open_key(path, create) def reg_get_subkeys(self, hkey): return self.regman.get_subkeys(hkey) def reg_get_key(self, handle=0, path=''): if path: return self.regman.get_key_from_path(path) return self.regman.get_key_from_handle(handle) def reg_create_key(self, path): return self.regman.create_key(path) def _set_emu_hooks(self): if not self.emu_hooks_set: self.mem_unmap(winemu.EMU_RETURN_ADDR, winemu.EMU_RESERVE_SIZE) self.emu_hooks_set = True def add_run(self, run): self.run_queue.append(run) def _exec_next_run(self): try: run = self.run_queue.pop(0) except IndexError: self.on_emu_complete() return None self.run_complete = False self.reset_stack(self.stack_base) return self._exec_run(run) def call(self, addr, params=[]): self.reset_stack(self.stack_base) run = Run() run.type = 'call_0x%x' % (addr) run.start_addr = addr run.args = params if not self.run_queue: self.add_run(run) self.start() else: self.add_run(run) def _exec_run(self, run): self.log_info("* exec: %s" % run.type) self.curr_run = run if self.profiler: self.profiler.add_run(run) self.runs.append(self.curr_run) stk_ptr = self.get_stack_ptr() self.set_func_args(stk_ptr, self.return_hook, *run.args) stk_ptr = self.get_stack_ptr() stk_map = self.get_address_map(stk_ptr) self.curr_run.stack = MemAccess(base=stk_map.base, size=stk_map.size) if run.process_context: if run.process_context != self.get_current_process(): self.alloc_peb(run.process_context) self.set_current_process(run.process_context) if run.thread: self.set_current_thread(run.thread) if not self.kernel_mode: thread = self.get_current_thread() if thread: self.init_teb(thread, self.curr_process.get_peb()) self.init_tls(thread) self.set_pc(run.start_addr) return run def mem_cast(self, obj, addr): size = obj.sizeof() struct_bytes = self.mem_read(addr, size) return self.cast(obj, struct_bytes) def mem_purge(self): self.purge_memory() def setup_user_shared_data(self): if self.get_arch() == _arch.ARCH_X86: self.mem_map(self.page_size, base=0xFFDF0000, tag='emu.struct.KUSER_SHARED_DATA') elif self.get_arch() == _arch.ARCH_AMD64: self.mem_map(self.page_size, base=0xFFFFF78000000000, tag='emu.struct.KUSER_SHARED_DATA') def resume(self, addr, count=-1): self.emu_eng.start(addr, timeout=self.timeout, count=count) def start(self): try: run = self.run_queue.pop(0) except IndexError: return self.run_complete = False self.set_hooks() self._set_emu_hooks() if self.profiler: self.profiler.set_start_time() self._exec_run(run) while True: try: self.curr_mod = self.get_module_from_addr(self.curr_run.start_addr) self.emu_eng.start(self.curr_run.start_addr, timeout=self.timeout, count=self.max_instructions) if self.profiler: if self.profiler.get_run_time() > self.timeout: self.log_error('* Timeout of %d sec(s) reached.' % (self.timeout)) except KeyboardInterrupt: self.log_error('* User exited.') return except Exception as e: if self.exit_event and self.exit_event.is_set(): return stack_trace = traceback.format_exc() try: mnem, op, instr = self.get_disasm(self.get_pc(), DISASM_SIZE) except Exception as dis_err: self.log_error(str(dis_err)) error = self.get_error_info(str(e), self.get_pc(), traceback=stack_trace) self.curr_run.error = error run = self.on_run_complete() if not run: break continue break self.on_emu_complete() def get_current_run(self): return self.curr_run def get_current_module(self): return self.curr_mod def get_dropped_files(self): if self.fileman: return self.fileman.get_dropped_files() def set_hooks(self): super(WindowsEmulator, self).set_hooks() def get_processes(self): if not self.processes: self.init_processes(self.config_processes) return self.processes def kill_process(self, proc): try: self.processes.remove(proc) except ValueError: pass def get_current_thread(self): return self.curr_thread def get_current_process(self): return self.curr_process def set_current_process(self, process): self.curr_process = process def set_current_thread(self, thread): self.curr_thread = thread def _setup_gdt(self, arch): GDT_SIZE = 0x1000 SEG_SIZE = 0x1000 ENTRY_SIZE = 0x8 num_gdt_entries = 31 fs_addr = 0 gs_addr = 0 gdt_addr = None def _make_entry(index, base, access, limit=0xFFFFF000): access = access | (winemu.GDT_ACCESS_BITS.PresentBit | winemu.GDT_ACCESS_BITS.DirectionConformingBit) entry = 0xFFFF & limit entry |= (0xFFFFFF & base) << 16 entry |= (0xFF & access) << 40 entry |= (0xFF & (limit >> 16)) << 48 entry |= (0xFF & winemu.GDT_ACCESS_BITS.ProtMode32) << 52 entry |= (0xFF & (base >> 24)) << 56 entry = entry.to_bytes(8, 'little') offset = index * ENTRY_SIZE self.mem_write(gdt_addr + offset, entry) def _create_selector(index, flags): return flags | (index << 3) gdt_addr, gdt_size = self.get_valid_ranges(GDT_SIZE) self.mem_map(gdt_size, base=gdt_addr, tag='emu.gdt') seg_addr, seg_size = self.get_valid_ranges(SEG_SIZE) self.mem_map(seg_size, base=seg_addr, tag='emu.segment.gdt') access = (winemu.GDT_ACCESS_BITS.Data | winemu.GDT_ACCESS_BITS.DataWritable | winemu.GDT_ACCESS_BITS.Ring3) _make_entry(16, 0, access) access = (winemu.GDT_ACCESS_BITS.Code | winemu.GDT_ACCESS_BITS.CodeReadable | winemu.GDT_ACCESS_BITS.Ring3) _make_entry(17, 0, access) access = (winemu.GDT_ACCESS_BITS.Data | winemu.GDT_ACCESS_BITS.DataWritable | winemu.GDT_ACCESS_BITS.Ring0) _make_entry(18, 0, access) self.reg_write(_arch.X86_REG_GDTR, (0, gdt_addr, num_gdt_entries * ENTRY_SIZE-1, 0x0)) selector = _create_selector(16, winemu.GDT_FLAGS.Ring3) self.reg_write(_arch.X86_REG_DS, selector) selector = _create_selector(17, winemu.GDT_FLAGS.Ring3) self.reg_write(_arch.X86_REG_CS, selector) selector = _create_selector(18, winemu.GDT_FLAGS.Ring0) self.reg_write(_arch.X86_REG_SS, selector) if _arch.ARCH_X86 == arch: fs_addr, fs_size = self.get_valid_ranges(SEG_SIZE) self.mem_map(fs_size, base=fs_addr, tag='emu.segment.fs') access = (winemu.GDT_ACCESS_BITS.Data | winemu.GDT_ACCESS_BITS.DataWritable | winemu.GDT_ACCESS_BITS.Ring3) _make_entry(19, fs_addr, access) selector = _create_selector(19, winemu.GDT_FLAGS.Ring3) self.reg_write(_arch.X86_REG_FS, selector) elif _arch.ARCH_AMD64 == arch: gs_addr, gs_size = self.get_valid_ranges(SEG_SIZE) self.mem_map(gs_size, base=gs_addr, tag='emu.segment.gs') access = (winemu.GDT_ACCESS_BITS.Data | winemu.GDT_ACCESS_BITS.DataWritable | winemu.GDT_ACCESS_BITS.Ring3) _make_entry(15, gs_addr, access, limit=SEG_SIZE) selector = _create_selector(15, winemu.GDT_FLAGS.Ring3) self.reg_write(_arch.X86_REG_GS, selector) self.fs_addr = fs_addr self.gs_addr = gs_addr return fs_addr, gs_addr def init_peb(self, user_mods, proc=None): p = proc if not p: p = self.curr_process p.init_peb(user_mods) self.mem_write(self.peb_addr, p.peb.address.to_bytes(self.get_ptr_size(), 'little')) return p.peb def init_teb(self, thread, peb): if self.get_arch() == _arch.ARCH_X86: thread.init_teb(self.fs_addr, peb.address) elif self.get_arch() == _arch.ARCH_AMD64: thread.init_teb(self.gs_addr, peb.address) def init_tls(self, thread): ptrsz = self.get_ptr_size() run = self.curr_run module = self.get_mod_from_addr(run.start_addr) if module: modname = module.emu_path tokens = modname.split("\\") modname = tokens[len(tokens) - 1] tls_dirp = module.OPTIONAL_HEADER.DATA_DIRECTORY[9].VirtualAddress tls_dirp += module.OPTIONAL_HEADER.ImageBase tls_dir = self.mem_read(tls_dirp, ptrsz) thread.init_tls(tls_dir, os.path.splitext(modname)[0]) return def load_pe(self, path=None, data=None, imp_id=winemu.IMPORT_HOOK_ADDR): if not data and not os.path.exists(path): raise WindowsEmuError('File: %s not found' % (path)) pe = winemu.PeFile(path=path, data=data, imp_id=imp_id, imp_step=4) pe_type = 'unknown' if pe.is_driver(): pe_type = 'driver' elif pe.is_dll(): pe_type = 'dll' elif pe.is_exe(): pe_type = 'exe' arch = 'unknown' if pe.arch == _arch.ARCH_AMD64: arch = 'x64' elif pe.arch == _arch.ARCH_X86: arch = 'x86' self.input = {'path': pe.path, 'sha256': pe.hash, 'size': pe.file_size, 'arch': arch, 'filetype': pe_type, 'emu_version': self.get_emu_version(), 'os_run': self.get_osver_string()} if self.profiler: self.profiler.add_input_metadata(self.input) return pe def map_pe(self, pe, mod_name='none', emu_path=''): image_size = pe.image_size base = pe.base ranges = self.get_valid_ranges(image_size, addr=base) base, size = ranges addr = self.mem_map(size, base=base, tag='emu.module.%s' % (mod_name)) self.modules.append((pe, ranges, emu_path)) return addr def get_sys_modules(self): if not self.sys_modules: self.sys_modules = self.init_sys_modules(self.config_system_modules) return self.sys_modules def get_user_modules(self): if not self.user_modules: self.user_modules = self.init_user_modules(self.config_user_modules) return self.user_modules def get_mod_from_addr(self, addr): if self.curr_mod: end = self.curr_mod.get_base() + self.curr_mod.get_image_size() if addr >= self.curr_mod.get_base() and addr <= end: return self.curr_mod sys_mods = self.get_sys_modules() for m in sys_mods: if addr >= m.get_base() and addr < m.get_base() + m.image_size: return m user_mods = self.get_user_modules() for m in user_mods: if addr >= m.get_base() and addr < m.get_base() + m.image_size: return m def get_system_root(self): sysroot = self.env.get('systemroot', 'C:\\WINDOWS\\system32') if not sysroot.endswith('\\'): sysroot += '\\' return sysroot def get_windows_dir(self): sysroot = self.env.get('windir', 'C:\\WINDOWS') if not sysroot.endswith('\\'): sysroot += '\\' return sysroot def get_cd(self): if not self.cd: self.cd = self.env.get('cd', 'C:\\WINDOWS\\system32') if not self.cd.endswith('\\'): self.cd += '\\' return self.cd def set_cd(self, cd): self.cd = cd def get_env(self): return self.env def set_env(self, var, val): return self.env.update({var.lower(): val}) def get_os_version(self): return self.osversion def get_object_from_addr(self, addr): return self.om.get_object_from_addr(addr) def get_object_from_id(self, id): return self.om.get_object_from_id(id) def get_object_from_name(self, name): return self.om.get_object_from_name(name) def get_object_from_handle(self, handle): obj = self.om.get_object_from_handle(handle) if obj: return obj obj = self.fileman.get_object_from_handle(handle) if obj: return obj def get_object_handle(self, obj): obj = self.om.objects.get(obj.address) if obj: return self.om.get_handle(obj) def add_object(self, obj): self.om.add_object(obj) def search_path(self, file_name): if '\\' in file_name: return file_name fp = self.get_cd() if not fp.endswith('\\'): fp += '\\' return fp + file_name def new_object(self, otype): return self.om.new_object(otype) def create_process(self, path=None, cmdline=None, image=None, child=False): if not path and cmdline: path = cmdline file_path = shlex.split(path, posix=False)[0] if file_path[0] == '\"' and file_path[len(file_path) - 1] == '\"': file_path = file_path[1:-1] p = self.om.new_object(objman.Process) mod_data = self.get_module_data_from_emu_file(file_path) if mod_data: p.pe_data = mod_data else: new_mod = self.init_module(name=file_path, emu_path=path) self.map_decoy(new_mod) p.pe = new_mod p.path = file_path p.cmdline = cmdline t = self.om.new_object(objman.Thread) t.process = p t.tid = self.om.new_id() p.threads.append(t) if child: self.child_processes.append(p) else: self.processes.append(p) return p def create_thread(self, addr, ctx, proc_obj, thread_type='thread', is_suspended=False): if len(self.run_queue) >= self.max_runs: return 0, None thread = self.om.new_object(objman.Thread) thread.process = proc_obj hnd = self.om.get_handle(thread) run = Run() run.type = thread_type run.start_addr = addr run.instr_cnt = 0 run.args = (ctx,) run.process_context = proc_obj run.thread = thread if not is_suspended: self.run_queue.append(run) else: self.suspended_runs.append(run) return hnd, thread def resume_thread(self, thread): for r in self.suspended_runs: if r.thread == thread: _run = self.suspended_runs.pop(self.suspended_runs.index(r)) self.run_queue.append(_run) return True return False def get_dyn_imports(self): return self.dyn_imps def get_process_peb(self, process): return process.peb def add_callback(self, mod_name, func_name): for addr, mod, fn in self.callbacks: if mod_name == mod and func_name == fn: return addr if not self.callbacks: curr_idx = winemu.EMU_CALLBACK_RESERVE self.callbacks.append((curr_idx, mod_name, func_name)) else: curr_idx = self.callbacks[-1][0] curr_idx += 1 self.callbacks.append((curr_idx, mod_name, func_name)) return curr_idx def get_proc(self, mod_name, func_name): for addr, mod, fn in self.dyn_imps: if mod_name == mod and func_name == fn: return addr if not self.dyn_imps: curr_idx = winemu.DYM_IMP_RESERVE self.dyn_imps.append((curr_idx, mod_name, func_name)) else: curr_idx = self.dyn_imps[-1][0] curr_idx += 1 self.dyn_imps.append((curr_idx, mod_name, func_name)) return curr_idx def handle_import_data(self, mod_name, sym, data_ptr=0): module, func = self.api.get_data_export_handler(mod_name, sym) if not func: module, func = self.api.get_export_func_handler(mod_name, sym) if not func: return None proc_addr = self.get_proc(mod_name, sym) return proc_addr data_addr = self.api.call_data_func(module, func, data_ptr) return data_addr
MIT License
googlecloudplatform/perfkitbenchmarker
perfkitbenchmarker/providers/kubernetes/kubernetes_disk.py
StorageClass._Delete
python
def _Delete(self): body = self._BuildBody() kubernetes_helper.DeleteResource(body)
Deletes the StorageClass.
https://github.com/googlecloudplatform/perfkitbenchmarker/blob/c14a122016d414351d41167029c79c9a19709384/perfkitbenchmarker/providers/kubernetes/kubernetes_disk.py#L316-L319
import json import logging import re from absl import flags from perfkitbenchmarker import disk from perfkitbenchmarker import errors from perfkitbenchmarker import flag_util from perfkitbenchmarker import kubernetes_helper from perfkitbenchmarker import providers from perfkitbenchmarker import resource from perfkitbenchmarker import vm_util from perfkitbenchmarker.configs import option_decoders FLAGS = flags.FLAGS def CreateDisks(disk_specs, vm_name): scratch_disks = [] for disk_num, disk_spec in enumerate(disk_specs): disk_class = GetKubernetesDiskClass(disk_spec.disk_type) scratch_disk = disk_class(disk_num, disk_spec, vm_name) scratch_disk.Create() scratch_disks.append(scratch_disk) return scratch_disks class KubernetesDiskSpec(disk.BaseDiskSpec): CLOUD = providers.KUBERNETES @classmethod def _GetOptionDecoderConstructions(cls): result = super(KubernetesDiskSpec, cls)._GetOptionDecoderConstructions() result.update({ 'provisioner': (option_decoders.StringDecoder, {'default': None, 'none_ok': True}), 'parameters': (option_decoders.TypeVerifier, {'default': {}, 'valid_types': (dict,)}) }) return result @classmethod def _ApplyFlags(cls, config_values, flag_values): super(KubernetesDiskSpec, cls)._ApplyFlags(config_values, flag_values) if flag_values['k8s_volume_provisioner'].present: config_values['provisioner'] = flag_values.k8s_volume_provisioner if flag_values['k8s_volume_parameters'].present: config_values['parameters'] = config_values.get('parameters', {}) config_values['parameters'].update( flag_util.ParseKeyValuePairs(flag_values.k8s_volume_parameters)) def GetKubernetesDiskClass(volume_type): return resource.GetResourceClass(KubernetesDisk, K8S_VOLUME_TYPE=volume_type) class KubernetesDisk(disk.BaseDisk): RESOURCE_TYPE = 'KubernetesDisk' REQUIRED_ATTRS = ['K8S_VOLUME_TYPE'] def __init__(self, disk_num, disk_spec, name): super(KubernetesDisk, self).__init__(disk_spec) self.name = '%s-%s' % (name, disk_num) def _Create(self): return def _Delete(self): return def Attach(self, vm): return def Detach(self): return def SetDevicePath(self, vm): return def AttachVolumeMountInfo(self, volume_mounts): volume_mount = { 'mountPath': self.mount_point, 'name': self.name } volume_mounts.append(volume_mount) class EmptyDirDisk(KubernetesDisk): K8S_VOLUME_TYPE = 'emptyDir' def GetDevicePath(self): raise errors.Error('GetDevicePath not supported for Kubernetes local disk') def AttachVolumeInfo(self, volumes): local_volume = { 'name': self.name, 'emptyDir': {} } volumes.append(local_volume) class CephDisk(KubernetesDisk): K8S_VOLUME_TYPE = 'rbd' def __init__(self, disk_num, disk_spec, name): super(CephDisk, self).__init__(disk_num, disk_spec, name) self.ceph_secret = FLAGS.ceph_secret def _Create(self): cmd = ['rbd', '-p', FLAGS.rbd_pool, 'create', self.name, '--size', str(1024 * self.disk_size)] _, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False) if retcode != 0: raise Exception('Creating RBD image failed: %s' % stderr) cmd = ['rbd', 'map', FLAGS.rbd_pool + '/' + self.name] stdout, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False) if retcode != 0: raise Exception('Mapping RBD image failed: %s' % stderr) rbd_device = stdout.rstrip() if '/dev/rbd' not in rbd_device: cmd = ['rbd', 'showmapped'] stdout, _, _ = vm_util.IssueCommand(cmd, raise_on_failure=False) for image_device in stdout.split('\n'): if self.name in image_device: pattern = re.compile('/dev/rbd.*') output = pattern.findall(image_device) rbd_device = output[0].rstrip() break cmd = ['/sbin/mkfs.ext4', rbd_device] stdout, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False) if retcode != 0: raise Exception('Formatting partition failed: %s' % stderr) cmd = ['rbd', 'unmap', rbd_device] stdout, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False) if retcode != 0: raise Exception('Unmapping block device failed: %s' % stderr) def _Delete(self): cmd = ['rbd', 'rm', FLAGS.rbd_pool + '/' + self.name] stdout, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False) if retcode != 0: msg = 'Removing RBD image failed. Reattempting.' logging.warning(msg) raise Exception(msg) def AttachVolumeInfo(self, volumes): ceph_volume = { 'name': self.name, 'rbd': { 'monitors': FLAGS.ceph_monitors, 'pool': FLAGS.rbd_pool, 'image': self.name, 'keyring': FLAGS.ceph_keyring, 'user': FLAGS.rbd_user, 'fsType': 'ext4', 'readOnly': False } } if FLAGS.ceph_secret: ceph_volume['rbd']['secretRef'] = {'name': FLAGS.ceph_secret} volumes.append(ceph_volume) def SetDevicePath(self, vm): cmd = "mount | grep %s | tr -s ' ' | cut -f 1 -d ' '" % self.mount_point device, _ = vm.RemoteCommand(cmd) self.device_path = device.rstrip() def GetDevicePath(self): return self.device_path class PersistentVolumeClaim(resource.BaseResource): @vm_util.Retry(poll_interval=10, max_retries=100, log_errors=False) def _WaitForPVCBoundCompletion(self): exists_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'get', 'pvc', '-o=json', self.name] logging.info('Waiting for PVC %s', self.name) pvc_info, _, _ = vm_util.IssueCommand(exists_cmd, suppress_warning=True, raise_on_failure=False) if pvc_info: pvc_info = json.loads(pvc_info) pvc = pvc_info['status']['phase'] if pvc == 'Bound': logging.info('PVC is ready.') return raise Exception('PVC %s is not ready. Retrying to check status.' % self.name) def __init__(self, name, storage_class, size): super(PersistentVolumeClaim, self).__init__() self.name = name self.storage_class = storage_class self.size = size def _Create(self): body = self._BuildBody() kubernetes_helper.CreateResource(body) self._WaitForPVCBoundCompletion() def _Delete(self): body = self._BuildBody() kubernetes_helper.DeleteResource(body) def _BuildBody(self): body = { 'kind': 'PersistentVolumeClaim', 'apiVersion': 'v1', 'metadata': { 'name': self.name }, 'spec': { 'accessModes': ['ReadWriteOnce'], 'resources': { 'requests': { 'storage': '%sGi' % self.size } }, 'storageClassName': self.storage_class, } } return json.dumps(body) class StorageClass(resource.BaseResource): def __init__(self, name, provisioner, parameters): super(StorageClass, self).__init__() self.name = name self.provisioner = provisioner self.parameters = parameters def _CheckStorageClassExists(self): exists_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'get', 'sc', '-o=json', self.name] sc_info, _, _ = vm_util.IssueCommand(exists_cmd, suppress_warning=True, raise_on_failure=False) if sc_info: sc_info = json.loads(sc_info) sc_name = sc_info['metadata']['name'] if sc_name == self.name: logging.info('StorageClass already exists.') return True else: logging.info('About to create new StorageClass: %s', self.name) return False def _Create(self): body = self._BuildBody() if not self._CheckStorageClassExists(): kubernetes_helper.CreateResource(body)
Apache License 2.0
egnyte/python-egnyte
egnyte/events.py
Event.user
python
def user(self): return resources.User(self._client, id=self.actor)
Get a user object based on event attributes
https://github.com/egnyte/python-egnyte/blob/569214d3030e8cdb097109e1ab2a82afae9a1623/egnyte/events.py#L54-L56
import time from egnyte import base, exc, resources class Event(base.Resource): _url_template = "pubapi/v1/events/%(id)s"
MIT License
ngageoint/sarpy
sarpy/io/product/sidd2_elements/ExploitationFeatures.py
ExploitationFeaturesProductType.__init__
python
def __init__(self, Resolution=None, Ellipticity=None, Polarizations=None, North=None, Extensions=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.Resolution = Resolution self.Ellipticity = Ellipticity self.Polarizations = Polarizations self.North = North self.Extensions = Extensions super(ExploitationFeaturesProductType, self).__init__(**kwargs)
Parameters ---------- Resolution : RowColDoubleType|numpy.ndarray|list|tuple Ellipticity : float Polarizations : List[ProcTxRcvPolarizationType] North : None|float Extensions : None|ParametersCollection|dict kwargs
https://github.com/ngageoint/sarpy/blob/91405721a7e6ffe7c76dd7b143915fee4bee1e82/sarpy/io/product/sidd2_elements/ExploitationFeatures.py#L790-L813
__classification__ = "UNCLASSIFIED" __author__ = "Thomas McCullough" import logging import datetime from typing import Union, List import numpy from sarpy.compliance import string_types from sarpy.io.xml.base import Serializable, ParametersCollection from sarpy.io.xml.descriptors import SerializableDescriptor, ParametersDescriptor, FloatDescriptor, FloatModularDescriptor, StringDescriptor, StringEnumDescriptor, DateTimeDescriptor, SerializableListDescriptor from sarpy.io.complex.sicd_elements.blocks import POLARIZATION1_VALUES from sarpy.io.complex.sicd_elements.SCPCOA import GeometryCalculator from sarpy.io.complex.sicd_elements.SICD import SICDType from sarpy.geometry.geocoords import wgs_84_norm, ecf_to_geodetic from .base import DEFAULT_STRICT from .blocks import RowColIntType, RowColDoubleType, RangeAzimuthType, AngleMagnitudeType, RadarModeType logger = logging.getLogger(__name__) class ExploitationCalculator(object): def __init__(self, geometry_calculator, row_vector, col_vector): self.geometry_calculator = geometry_calculator self.ARPPos = geometry_calculator.ARP self.ARPVel = geometry_calculator.ARP_vel self.SCP = geometry_calculator.SCP self.slant_x = self._make_unit(self.ARPPos - self.SCP) self.slant_z = self._make_unit(numpy.cross(self.slant_x, self.ARPVel)) if self.slant_z.dot(self.ARPPos) < 0: self.slant_z *= -1 self.slant_y = self._make_unit(numpy.cross(self.slant_z, self.slant_x)) self.ETP = wgs_84_norm(self.SCP) self.row_vector = self._make_unit(row_vector) self.col_vector = self._make_unit(col_vector) self.normal_vector = self._make_unit(numpy.cross(self.row_vector, self.col_vector)) @staticmethod def _make_unit(vec): vec_norm = numpy.linalg.norm(vec) if vec_norm < 1e-6: logger.error( 'input vector to be normlaized has norm {}, this may be a mistake'.format(vec_norm)) return vec/vec_norm @property def AzimuthAngle(self): return self.geometry_calculator.AzimAng @property def SlopeAngle(self): return numpy.rad2deg(numpy.arccos(self.slant_z.dot(self.ETP))) @property def DopplerConeAngle(self): return self.geometry_calculator.DopplerConeAng @property def SquintAngle(self): return self.geometry_calculator.SquintAngle @property def GrazeAngle(self): return numpy.rad2deg(numpy.arcsin(self.slant_x.dot(self.ETP))) @property def TiltAngle(self): return numpy.rad2deg(numpy.arctan(self.ETP.dot(self.slant_y)/self.ETP.dot(self.slant_z))) @property def Shadow(self): shadow = self.ETP - self.slant_x/(self.slant_x.dot(self.ETP)) shadow_prime = shadow - (shadow.dot(self.normal_vector)/self.slant_z.dot(self.normal_vector))*self.slant_z return AngleMagnitudeType( Angle=numpy.rad2deg(numpy.arctan2(self.col_vector.dot(shadow_prime), self.row_vector.dot(shadow_prime))), Magnitude=numpy.linalg.norm(shadow_prime)) @property def Layover(self): layover = self.normal_vector - self.slant_z/(self.slant_z.dot(self.normal_vector)) return AngleMagnitudeType( Angle=numpy.rad2deg(numpy.arctan2(self.col_vector.dot(layover), self.row_vector.dot(layover))), Magnitude=numpy.linalg.norm(layover)) @property def North(self): lat, lon, hae = ecf_to_geodetic(self.SCP) lat_r = numpy.deg2rad(lat) lon_r = numpy.deg2rad(lon) north = numpy.array( [-numpy.sin(lat_r)*numpy.cos(lon_r), -numpy.sin(lat_r)*numpy.sin(lon_r), numpy.cos(lat_r)]) north_prime = north - self.slant_z*(north.dot(self.normal_vector)/self.slant_z.dot(self.normal_vector)) return numpy.rad2deg(numpy.arctan2(self.col_vector.dot(north_prime), self.row_vector.dot(north_prime))) @property def MultiPath(self): multipath = self.slant_x - self.slant_z*( self.slant_x.dot(self.normal_vector)/self.slant_z.dot(self.normal_vector)) return numpy.rad2deg(numpy.arctan2(self.col_vector.dot(multipath), self.row_vector.dot(multipath))) @property def GroundTrack(self): track = self.ARPVel - (self.ARPVel.dot(self.normal_vector))*self.normal_vector return numpy.rad2deg(numpy.arctan2(self.col_vector.dot(track), self.row_vector.dot(track))) def get_ground_plane_resolution(self, row_ss, col_ss): x_g = self.slant_x - (self.slant_x.dot(self.normal_vector))*self.normal_vector theta_r = -numpy.arctan2(self.col_vector.dot(x_g), self.row_vector.dot(x_g)) graze = numpy.deg2rad(self.GrazeAngle) tilt = numpy.deg2rad(self.TiltAngle) k_r1 = (numpy.cos(theta_r)/numpy.cos(graze))**2 + (numpy.sin(theta_r)**2*numpy.tan(graze)*numpy.tan(tilt) - numpy.sin(2*theta_r)/numpy.cos(graze))*numpy.tan(graze)*numpy.tan(tilt) k_r2 = (numpy.sin(theta_r)/numpy.cos(tilt))**2 k_c1 = (numpy.sin(theta_r)**2/numpy.cos(graze) - numpy.sin(2*theta_r)*numpy.tan(graze)*numpy.tan(tilt))/numpy.cos(graze) + (numpy.cos(theta_r)*numpy.tan(graze)*numpy.tan(tilt))**2 k_c2 = (numpy.cos(theta_r)/numpy.cos(tilt))**2 r2 = row_ss*row_ss c2 = col_ss*col_ss return float(numpy.sqrt(k_r1*r2 + k_r2*c2)), float(numpy.sqrt(k_c1*r2 + k_c2*c2)) @classmethod def from_sicd(cls, sicd, row_vector, col_vector): calculator = GeometryCalculator( sicd.GeoData.SCP.ECF.get_array(), sicd.SCPCOA.ARPPos.get_array(), sicd.SCPCOA.ARPVel.get_array()) return cls(calculator, row_vector, col_vector) def _extract_sicd_tx_rcv_pol(str_in): if str_in is None: return 'UNKNOWN', 'UNKNOWN' if not isinstance(str_in, string_types): raise TypeError('requires a string type input.') if str_in in ['OTHER', 'UNKNOWN']: return 'UNKNOWN', 'UNKNOWN' return str_in.split(':') class InputROIType(Serializable): _fields = ('Size', 'UpperLeft') _required = ('Size', 'UpperLeft') Size = SerializableDescriptor( 'Size', RowColIntType, _required, strict=DEFAULT_STRICT, docstring='Number of rows and columns extracted from the input.') UpperLeft = SerializableDescriptor( 'UpperLeft', RowColIntType, _required, strict=DEFAULT_STRICT, docstring='The upper-left pixel extracted from the input.') def __init__(self, Size=None, UpperLeft=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.Size = Size self.UpperLeft = UpperLeft super(InputROIType, self).__init__(**kwargs) class TxRcvPolarizationType(Serializable): _fields = ('TxPolarization', 'RcvPolarization', 'RcvPolarizationOffset') _required = ('TxPolarization', 'RcvPolarization') _numeric_format = {'RcvPolarizationOffset': '0.17E'} TxPolarization = StringEnumDescriptor( 'TxPolarization', POLARIZATION1_VALUES, _required, strict=DEFAULT_STRICT, docstring='Transmit polarization type.') RcvPolarization = StringEnumDescriptor( 'RcvPolarization', POLARIZATION1_VALUES, _required, strict=DEFAULT_STRICT, docstring='Receive polarization type.') RcvPolarizationOffset = FloatModularDescriptor( 'RcvPolarizationOffset', 180.0, _required, strict=DEFAULT_STRICT, docstring='Angle offset for the receive polarization defined at aperture center.') def __init__(self, TxPolarization=None, RcvPolarization=None, RcvPolarizationOffset=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.TxPolarization = TxPolarization self.RcvPolarization = RcvPolarization self.RcvPolarizationOffset = RcvPolarizationOffset super(TxRcvPolarizationType, self).__init__(**kwargs) @classmethod def from_sicd_value(cls, str_in): tx, rcv = _extract_sicd_tx_rcv_pol(str_in) return cls(TxPolarization=tx, RcvPolarization=rcv) class ProcTxRcvPolarizationType(Serializable): _fields = ('TxPolarizationProc', 'RcvPolarizationProc') _required = ('TxPolarizationProc', 'RcvPolarizationProc') TxPolarizationProc = StringEnumDescriptor( 'TxPolarizationProc', POLARIZATION1_VALUES, _required, strict=DEFAULT_STRICT, docstring='Transmit polarization type.') RcvPolarizationProc = StringEnumDescriptor( 'RcvPolarizationProc', POLARIZATION1_VALUES, _required, strict=DEFAULT_STRICT, docstring='Receive polarization type.') def __init__(self, TxPolarizationProc=None, RcvPolarizationProc=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.TxPolarizationProc = TxPolarizationProc self.RcvPolarizationProc = RcvPolarizationProc super(ProcTxRcvPolarizationType, self).__init__(**kwargs) @classmethod def from_sicd_value(cls, str_in): tx, rcv = _extract_sicd_tx_rcv_pol(str_in) return cls(TxPolarizationProc=tx, RcvPolarizationProc=rcv) class ExploitationFeaturesCollectionInformationType(Serializable): _fields = ( 'SensorName', 'RadarMode', 'CollectionDateTime', 'LocalDateTime', 'CollectionDuration', 'Resolution', 'InputROI', 'Polarizations') _required = ('SensorName', 'RadarMode', 'CollectionDateTime', 'CollectionDuration') _collections_tags = {'Polarizations': {'array': False, 'child_tag': 'Polarization'}} _numeric_format = {'CollectionDuration': '0.17E'} SensorName = StringDescriptor( 'SensorName', _required, strict=DEFAULT_STRICT, docstring='The name of the sensor.') RadarMode = SerializableDescriptor( 'RadarMode', RadarModeType, _required, strict=DEFAULT_STRICT, docstring='Radar collection mode.') CollectionDateTime = DateTimeDescriptor( 'CollectionDateTime', _required, strict=DEFAULT_STRICT, docstring='Collection date and time defined in Coordinated Universal Time (UTC). The seconds ' 'should be followed by a Z to indicate UTC.') CollectionDuration = FloatDescriptor( 'CollectionDuration', _required, strict=DEFAULT_STRICT, docstring='The duration of the collection (units = seconds).') Resolution = SerializableDescriptor( 'Resolution', RangeAzimuthType, _required, strict=DEFAULT_STRICT, docstring='Uniformly-weighted resolution (range and azimuth) processed in ' 'the slant plane.') InputROI = SerializableDescriptor( 'InputROI', InputROIType, _required, strict=DEFAULT_STRICT, docstring='ROI representing portion of input data used to make ' 'this product.') Polarizations = SerializableListDescriptor( 'Polarizations', TxRcvPolarizationType, _collections_tags, _required, strict=DEFAULT_STRICT, docstring='Transmit and receive polarization(s).') def __init__(self, SensorName=None, RadarMode=None, CollectionDateTime=None, LocalDateTime=None, CollectionDuration=None, Resolution=None, Polarizations=None, **kwargs): self._local_date_time = None if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.SensorName = SensorName self.RadarMode = RadarMode self.CollectionDateTime = CollectionDateTime self.CollectionDuration = CollectionDuration self.LocalDateTime = LocalDateTime self.Resolution = Resolution self.Polarizations = Polarizations super(ExploitationFeaturesCollectionInformationType, self).__init__(**kwargs) @property def LocalDateTime(self): return self._local_date_time @LocalDateTime.setter def LocalDateTime(self, value): if value is None: self._local_date_time = None return elif isinstance(value, datetime.datetime): value = value.isoformat('T') if isinstance(value, string_types): self._local_date_time = value else: logger.error( 'Attribute LocalDateTime of class ExploitationFeaturesCollectionInformationType\n\t' 'requires a datetime.datetime or string. Got unsupported type {}.\n\t' 'Setting value to None.'.format(type(value))) self._local_date_time = None @classmethod def from_sicd(cls, sicd): if not isinstance(sicd, SICDType): raise TypeError('Requires SICDType instance, got type {}'.format(type(sicd))) polarizations = [ TxRcvPolarizationType.from_sicd_value(entry.TxRcvPolarization) for entry in sicd.RadarCollection.RcvChannels] return cls(SensorName=sicd.CollectionInfo.CollectorName, RadarMode=RadarModeType(**sicd.CollectionInfo.RadarMode.to_dict()), CollectionDateTime=sicd.Timeline.CollectStart, CollectionDuration=sicd.Timeline.CollectDuration, Resolution=(sicd.Grid.Row.SS, sicd.Grid.Col.SS), Polarizations=polarizations) class ExploitationFeaturesCollectionGeometryType(Serializable): _fields = ('Azimuth', 'Slope', 'Squint', 'Graze', 'Tilt', 'DopplerConeAngle', 'Extensions') _required = () _collections_tags = {'Extensions': {'array': False, 'child_tag': 'Extension'}} _numeric_format = { 'Azimuth': '0.17E', 'Slope': '0.17E', 'Squint': '0.17E', 'Graze': '0.17E', 'Tilt': '0.17E', 'DopplerConeAngle': '0.17E'} Azimuth = FloatDescriptor( 'Azimuth', _required, strict=DEFAULT_STRICT, bounds=(0.0, 360.0), docstring='Angle clockwise from north indicating the ETP line of sight vector.') Slope = FloatDescriptor( 'Slope', _required, strict=DEFAULT_STRICT, bounds=(0.0, 90.0), docstring='Angle between the ETP at scene center and the range vector perpendicular to ' 'the direction of motion.') Squint = FloatModularDescriptor( 'Squint', 180.0, _required, strict=DEFAULT_STRICT, docstring='Angle from the ground track to platform velocity vector at nadir. ' 'Left-look is positive, right-look is negative.') Graze = FloatDescriptor( 'Graze', _required, strict=DEFAULT_STRICT, bounds=(0.0, 90.0), docstring='Angle between the ETP and the line of sight vector.') Tilt = FloatModularDescriptor( 'Tilt', 180.0, _required, strict=DEFAULT_STRICT, docstring='Angle between the ETP and the cross range vector. ' 'Also known as the twist angle.') DopplerConeAngle = FloatDescriptor( 'DopplerConeAngle', _required, strict=DEFAULT_STRICT, bounds=(0.0, 180.0), docstring='The angle between the velocity vector and the radar line-of-sight vector. ' 'Also known as the slant plane squint angle.') Extensions = ParametersDescriptor( 'Extensions', _collections_tags, _required, strict=DEFAULT_STRICT, docstring='Exploitation feature extension related to geometry for a ' 'single input image.') def __init__(self, Azimuth=None, Slope=None, Squint=None, Graze=None, Tilt=None, DopplerConeAngle=None, Extensions=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.Azimuth = Azimuth self.Slope = Slope self.Squint = Squint self.Graze = Graze self.Tilt = Tilt self.DopplerConeAngle = DopplerConeAngle self.Extensions = Extensions super(ExploitationFeaturesCollectionGeometryType, self).__init__(**kwargs) @classmethod def from_calculator(cls, calculator): if not isinstance(calculator, ExploitationCalculator): raise TypeError( 'Requires input which is an instance of ExploitationCalculator, got type {}'.format(type(calculator))) return cls(Azimuth=calculator.AzimuthAngle, Slope=calculator.SlopeAngle, Graze=calculator.SlopeAngle, Tilt=calculator.TiltAngle, DopplerConeAngle=calculator.DopplerConeAngle, Squint=calculator.SquintAngle) class ExploitationFeaturesCollectionPhenomenologyType(Serializable): _fields = ('Shadow', 'Layover', 'MultiPath', 'GroundTrack', 'Extensions') _required = () _collections_tags = {'Extensions': {'array': False, 'child_tag': 'Extension'}} _numeric_format = {'MultiPath': '0.17E', 'GroundTrack': '0.17E'} Shadow = SerializableDescriptor( 'Shadow', AngleMagnitudeType, _required, strict=DEFAULT_STRICT, docstring='The phenomenon where vertical objects occlude radar ' 'energy.') Layover = SerializableDescriptor( 'Layover', AngleMagnitudeType, _required, strict=DEFAULT_STRICT, docstring='The phenomenon where vertical objects appear as ground objects with ' 'the same range/range rate.') MultiPath = FloatModularDescriptor( 'MultiPath', 180.0, _required, strict=DEFAULT_STRICT, docstring='This is a range dependent phenomenon which describes the energy from a ' 'single scatter returned to the radar via more than one path and results ' 'in a nominally constant direction in the ETP.') GroundTrack = FloatModularDescriptor( 'GroundTrack', 180.0, _required, strict=DEFAULT_STRICT, docstring='Counter-clockwise angle from increasing row direction to ground track ' 'at the center of the image.') Extensions = ParametersDescriptor( 'Extensions', _collections_tags, _required, strict=DEFAULT_STRICT, docstring='Exploitation feature extension related to geometry for a ' 'single input image.') def __init__(self, Shadow=None, Layover=None, MultiPath=None, GroundTrack=None, Extensions=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.Shadow = Shadow self.Layover = Layover self.MultiPath = MultiPath self.GroundTrack = GroundTrack self.Extensions = Extensions super(ExploitationFeaturesCollectionPhenomenologyType, self).__init__(**kwargs) @classmethod def from_calculator(cls, calculator): if not isinstance(calculator, ExploitationCalculator): raise TypeError( 'Requires input which is an instance of ExploitationCalculator, got type {}'.format(type(calculator))) return cls(Shadow=calculator.Shadow, Layover=calculator.Layover, MultiPath=calculator.MultiPath, GroundTrack=calculator.GroundTrack) class CollectionType(Serializable): _fields = ('Information', 'Geometry', 'Phenomenology', 'identifier') _required = ('Information', 'identifier') _set_as_attribute = ('identifier', ) Information = SerializableDescriptor( 'Information', ExploitationFeaturesCollectionInformationType, _required, strict=DEFAULT_STRICT, docstring='General collection information.') Geometry = SerializableDescriptor( 'Geometry', ExploitationFeaturesCollectionGeometryType, _required, strict=DEFAULT_STRICT, docstring='Key geometry parameters independent of product ' 'processing.') Phenomenology = SerializableDescriptor( 'Phenomenology', ExploitationFeaturesCollectionPhenomenologyType, _required, strict=DEFAULT_STRICT, docstring='Phenomenology related to both the geometry and the final ' 'product processing.') identifier = StringDescriptor( 'identifier', _required, strict=DEFAULT_STRICT, docstring='The exploitation feature identifier.') def __init__(self, Information=None, Geometry=None, Phenomenology=None, identifier=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.Information = Information self.Geometry = Geometry self.Phenomenology = Phenomenology self.identifier = identifier super(CollectionType, self).__init__(**kwargs) @classmethod def from_calculator(cls, calculator, sicd): if not isinstance(calculator, ExploitationCalculator): raise TypeError( 'Requires input which is an instance of ExploitationCalculator, got type {}'.format(type(calculator))) return cls(identifier=sicd.CollectionInfo.CoreName, Information=ExploitationFeaturesCollectionInformationType.from_sicd(sicd), Geometry=ExploitationFeaturesCollectionGeometryType.from_calculator(calculator), Phenomenology=ExploitationFeaturesCollectionPhenomenologyType.from_calculator(calculator)) class ExploitationFeaturesProductType(Serializable): _fields = ('Resolution', 'Ellipticity', 'Polarizations', 'North', 'Extensions') _required = ('Resolution', 'Ellipticity', 'Polarizations') _collections_tags = { 'Polarizations': {'array': False, 'child_tag': 'Polarization'}, 'Extensions': {'array': False, 'child_tag': 'Extension'}} _numeric_format = {'Ellipticity': '0.17E', 'North': '0.17E'} Resolution = SerializableDescriptor( 'Resolution', RowColDoubleType, _required, strict=DEFAULT_STRICT, docstring='Uniformly-weighted resolution projected into the Earth Tangent ' 'Plane (ETP).') Ellipticity = FloatDescriptor( 'Ellipticity', _required, strict=DEFAULT_STRICT, docstring="Ellipticity of the 2D-IPR at the ORP, measured in the *Earth Geodetic " "Tangent Plane (EGTP)*. Ellipticity is the ratio of the IPR ellipse's " "major axis to minor axis.") Polarizations = SerializableListDescriptor( 'Polarizations', ProcTxRcvPolarizationType, _collections_tags, _required, strict=DEFAULT_STRICT, docstring='Describes the processed transmit and receive polarizations for the ' 'product.') North = FloatModularDescriptor( 'North', 180.0, _required, strict=DEFAULT_STRICT, docstring='Counter-clockwise angle from increasing row direction to north at the center ' 'of the image.') Extensions = ParametersDescriptor( 'Extensions', _collections_tags, _required, strict=DEFAULT_STRICT, docstring='Exploitation feature extension related to geometry for a ' 'single input image.')
MIT License
numberly/flask-stupe
flask_stupe/app.py
Stupeflask.register_converters
python
def register_converters(self, converter_list): for converter in converter_list: self.register_converter(converter)
Register multiple converters at once. See :meth:`register_converter`.
https://github.com/numberly/flask-stupe/blob/1aabda272d84c0f651c6302cb5eefe11c46567ef/flask_stupe/app.py#L47-L53
import os from pkgutil import iter_modules from flask import Blueprint, Flask from flask_stupe.config import Config from flask_stupe.converters import converters from flask_stupe.logging import log from flask_stupe.request import Request class Stupeflask(Flask): config_class = Config request_class = Request def __init__(self, *args, **kwargs): super(Stupeflask, self).__init__(*args, **kwargs) config_path = os.path.join(os.getcwd(), "config.py") log.info(" * Loading default config ({})".format(config_path)) self.config.from_pyfile(config_path, silent=True) log.info(" * Loading $CONFIG ({})".format(os.environ.get("CONFIG"))) self.config.from_envvar("CONFIG", silent=True) from_env = self.config.from_env() log.info(" * Overriden by environment: " + ", ".join(from_env)) self.register_converters(converters) def register_converter(self, converter, name=None): if not name: name = converter.__name__ if "Converter" in name: name = converter.__name__.replace("Converter", "") self.url_map.converters[name] = converter
MIT License
stiletto/bnw
bnw/handlers/command_vcard.py
cmd_vcard
python
def cmd_vcard(request, safe=None): reply = domish.Element((None, 'iq')) reply.addUniqueId() reply['type'] = 'get' reply.addElement('vCard', 'vcard-temp') send_raw(request.user['jid'], None, reply) return dict(ok=True, desc='vCard has been requested.')
Request user's vCard.
https://github.com/stiletto/bnw/blob/bce286de01fac6b4edff71f91d8b4ebb6825d0ed/bnw/handlers/command_vcard.py#L8-L15
from twisted.words.xish import domish from base import require_auth from bnw.xmpp.base import send_raw @require_auth
BSD 2-Clause Simplified License
numba/pyculib
pyculib/nputil.py
colmajor
python
def colmajor(x, dtype, var): if not x.flags['F_CONTIGUOUS']: warnings.warn("%s is converted to column-major layout"%(var), warnings.PerformanceWarning, stacklevel=3) return np.asfortranarray(x, dtype=dtype) else: return astype(x, dtype, var, stacklevel=4)
Return `x` or a copy of `x`, with its dimension ordering converted to column-major, and its type converted to `dtype`. `var` is the name of `x` as seen by users of a public API, which may be used in a warning message.
https://github.com/numba/pyculib/blob/cb1b2a884e7fafa898ad5711172cda527b9875c8/pyculib/nputil.py#L32-L43
import numpy as np from pyculib import warnings promote = np.promote_types def alias(a, b): if a is b: return True elif a.base is None and b.base is None: return False else: return a.base is b or a is b.base or a.base is b.base def astype(x, dtype, var, stacklevel=3): if dtype != x.dtype: warnings.warn("%s (%s) is converted to %s"%(var, x.dtype, dtype), warnings.PerformanceWarning, stacklevel=stacklevel) return x.astype(dtype, copy=False)
BSD 2-Clause Simplified License
netmanchris/pyawair
pyawair/data.py
get_raw_data
python
def get_raw_data(auth, device_name=None, device_type=None, device_id=None): if device_type is None or device_id is None: awair_device = pyawair.objects.AwairDev(device_name, auth) device_type = awair_device.type() device_id = awair_device.id() base_url = "https://developer-apis.awair.is/v1/users/self/devices/" data_url = "/air-data/raw" data = pyawair.conn.get_data(auth, device_id, device_type, base_url, data_url) return data
Function to get air data that is averaged over each 5 minutes from a single specific Awair Device linked to your account :param auth: pyawair.auth.AwairAuth object which contains a valid authentication token :param device_type: str which matches the awair device type :param device_name: str which matches exactly to the name of a specific device :param device_id: str or int which matches the specific awair device internal id number :return: Object of Dict type which contains current air data
https://github.com/netmanchris/pyawair/blob/5f0bbcfe79712fca467b116ef1dce77317a692b9/pyawair/data.py#L91-L111
from pyawair.devices import * import pyawair.conn import pyawair.objects def get_current_air_data(auth, device_name=None, device_type=None, device_id=None): if device_type is None or device_id is None: awair_device = pyawair.objects.AwairDev(device_name, auth) device_type = awair_device.type() device_id = awair_device.id() base_url = "https://developer-apis.awair.is/v1/users/self/devices/" data_url = "/air-data/latest" data = pyawair.conn.get_data(auth, device_id, device_type, base_url, data_url) return data def get_5_min_average(auth, device_name=None, device_type=None, device_id=None, limit=1000, desc=True): if device_type is None or device_id is None: awair_device = pyawair.objects.AwairDev(device_name, auth) device_type = awair_device.type() device_id = awair_device.id() base_url = "https://developer-apis.awair.is/v1/users/self/devices/" data_url = "/air-data/5-min-avg" if desc: desc_param = "true" else: desc_param = "false" args = "?limit={}&desc={}".format(limit, desc_param) data = pyawair.conn.get_data(auth, device_id, device_type, base_url, data_url, args) return data def get_15_min_average(auth, device_name=None, device_type=None, device_id=None, limit=1000, desc=True): if device_type is None or device_id is None: awair_device = pyawair.objects.AwairDev(device_name, auth) device_type = awair_device.type() device_id = awair_device.id() base_url = "https://developer-apis.awair.is/v1/users/self/devices/" data_url = "/air-data/15-min-avg" if desc: desc_param = "true" else: desc_param = "false" args = "?limit={}&desc={}".format(limit, desc_param) data = pyawair.conn.get_data(auth, device_id, device_type, base_url, data_url, args) return data
Apache License 2.0
alvenchen/nsfw-pytorch
src/train.py
parse_args
python
def parse_args(): parser = argparse.ArgumentParser(description='Head pose estimation using the Hopenet network.') parser.add_argument('--gpu', dest='gpu', help='GPU device id to use', nargs='+', default=[0, 1], type=int) parser.add_argument('--num_epochs', dest='num_epochs', help='Maximum number of training epochs.', default=100, type=int) parser.add_argument('--batch_size', dest='batch_size', help='Batch size.', default=64, type=int) parser.add_argument('--lr', dest='lr', help='Base learning rate.', default=0.1, type=float) parser.add_argument('--trainning_data_dir', dest='trainning_data_dir', help='Directory path for trainning data.', default='./data/train', type=str) parser.add_argument('--validation_data_dir', dest='validation_data_dir', help='Directory path for validation data.', default='./data/test', type=str) parser.add_argument('--save_path', dest='save_path', help='Path of model snapshot for save.', default='./models', type=str) parser.add_argument('--saved_model', help='Path of model snapshot for continue training.', default='./models/resnet50-19c8e357.pth', type=str) args = parser.parse_args() return args
Parse input arguments.
https://github.com/alvenchen/nsfw-pytorch/blob/7cd9d237e09e19054550e9831cba0a3c33cdddcf/src/train.py#L20-L42
import argparse import time from model import resnet from model.dpn import dpn92 import torch import torch.backends.cudnn as cudnn import torchvision from torchvision import transforms import torch.nn as nn from torch.autograd import Variable from model.utils import load_filtered_state_dict, SaveBestModel, AverageMeter, accuracy from data_wrapper import get_dataset, DataWrapper from tensorboardX import SummaryWriter
MIT License
peerchemist/cryptotik
cryptotik/kraken.py
Kraken.get_market_ticker
python
def get_market_ticker(self, pair): p = self.format_pair(pair) return self.api(self.url + "public/Ticker", params={'pair': p})[p]
returns simple current market status report
https://github.com/peerchemist/cryptotik/blob/24ffd74c43ff1fc171081e135cb2b66b775af3f3/cryptotik/kraken.py#L129-L133
import hmac import hashlib import time import base64 import requests from cryptotik.common import (headers, ExchangeWrapper, NormalizedExchangeWrapper) from cryptotik.exceptions import (InvalidBaseCurrencyError, InvalidDelimiterError, APIError) from re import findall from decimal import Decimal from datetime import datetime class Kraken(ExchangeWrapper): url = 'https://api.kraken.com/0/' name = 'kraken' delimiter = "" headers = headers taker_fee, maker_fee = 0.00, 0.00 quote_order = 0 base_currencies = ['xbt', 'eur', 'usd', 'eth', 'cad', 'gbp', 'jpy'] @classmethod def format_pair(cls, pair): return "".join(findall(r"[^\W\d_]+|\d+", pair)).upper() def get_base_currencies(self): raise NotImplementedError def __init__(self, apikey=None, secret=None, timeout=None, proxy=None): if apikey and secret: self.apikey = apikey.encode('utf-8') self.secret = secret.encode('utf-8') if proxy: assert proxy.startswith('https'), {'Error': 'Only https proxies supported.'} self.proxy = {'https': proxy} if not timeout: self.timeout = (8, 15) else: self.timeout = timeout self.api_session = requests.Session() def _verify_response(self, response): if response.json()['error']: raise APIError(response.json()['error']) def _generate_signature(self, message): sig = hmac.new(base64.b64decode(self.secret), message, hashlib.sha512) return base64.b64encode(sig.digest()).decode() def api(self, url, params=None): try: result = self.api_session.get(url, headers=self.headers, params=params, timeout=self.timeout, proxies=self.proxy) result.raise_for_status() except requests.exceptions.HTTPError as e: print(e) self._verify_response(result) return result.json()['result'] def get_nonce(self): return int(1000 * time.time()) def private_api(self, url, params={}): urlpath = url[22:] data = params data['nonce'] = self.get_nonce() postdata = requests.compat.urlencode(data) encoded = (str(data['nonce']) + postdata).encode() message = urlpath.encode() + hashlib.sha256(encoded).digest() signature = self._generate_signature(message) try: result = self.api_session.post(url, data=data, headers={ 'API-Key': self.apikey, 'API-Sign': signature}, timeout=self.timeout, proxies=self.proxy) except requests.exceptions.HTTPError as e: print(e) self._verify_response(result) return result.json()['result'] def get_markets(self): markets = self.api(self.url + "public/AssetPairs") return [markets[i]['altname'].lower() for i in markets.keys()] def get_market_ohlcv_data(self, pair, interval, since=None): if str(interval) not in "1, 5, 15, 30, 60, 240, 1440, 10080, 21600".split(', '): raise APIError('Unsupported interval.') return self.api(self.url + 'public/OHLC', params={'pair': self.format_pair(pair), 'interval': interval, 'since': since})
BSD 3-Clause New or Revised License
fgnt/paderbox
paderbox/utils/nested.py
deflatten
python
def deflatten(d: dict, sep: Optional[str] = '.', maxdepth: int = -1): ret = {} if sep is not None: d = { tuple(k.split(sep, maxdepth)): v for k, v in d.items() } for keys, v in d.items(): sub_dict = ret for sub_key in keys[:-1]: if sub_key not in sub_dict: sub_dict[sub_key] = {} assert isinstance(sub_dict[sub_key], dict), ( f'Conflicting keys! {keys}' ) sub_dict = sub_dict[sub_key] assert keys[-1] not in sub_dict, f'Conflicting keys! {keys}' sub_dict[keys[-1]] = v return ret
Build a nested `dict` from a flat dict respecting a separator. Args: d: Flattened `dict` to reconstruct a `nested` dict from sep: The separator used in the keys of `d`. If `None`, `d.keys()` should only contain `tuple`s. maxdepth: Maximum depth until wich nested conversion is performed >>> d_in = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]} >>> d = flatten(d_in) >>> for k, v in d.items(): print(k, v) a 1 c.a 2 c.b.x 5 c.b.y 10 d [1, 2, 3] >>> deflatten(d) {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y': 10}}, 'd': [1, 2, 3]} >>> deflatten(d, maxdepth=1) {'a': 1, 'c': {'a': 2, 'b.x': 5, 'b.y': 10}, 'd': [1, 2, 3]} >>> deflatten(d, maxdepth=0) {'a': 1, 'c.a': 2, 'c.b.x': 5, 'c.b.y': 10, 'd': [1, 2, 3]} >>> d = flatten(d_in, sep='_') >>> for k, v in d.items(): print(k, v) a 1 c_a 2 c_b_x 5 c_b_y 10 d [1, 2, 3] >>> deflatten(d, sep='_') {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y': 10}}, 'd': [1, 2, 3]} >>> deflatten({('a', 'b'): 'd', ('a', 'c'): 'e'}, sep=None) {'a': {'b': 'd', 'c': 'e'}} >>> deflatten({'a.b': 1, 'a': 2}) Traceback (most recent call last): ... AssertionError: Conflicting keys! ('a',) >>> deflatten({'a': 1, 'a.b': 2}) Traceback (most recent call last): ... AssertionError: Conflicting keys! ('a', 'b')
https://github.com/fgnt/paderbox/blob/c71538fe65235c5f5a46dae781ddbbd4be955c30/paderbox/utils/nested.py#L62-L124
import copy import collections import itertools import operator from typing import Optional, Any, Union, Sequence, Mapping, Tuple, Callable, Generator, Iterable, Iterator def flatten(d, sep: Optional[str] = '.', *, flat_type=dict): def inner(d, parent_key): items = {} for k, v in d.items(): new_key = parent_key + (k,) if isinstance(v, flat_type) and v: items.update(inner(v, new_key)) else: items[new_key] = v return items items = inner(d, ()) if sep is None: return items else: return { sep.join(k): v for k, v in items.items() }
MIT License
kriaga/health-checker
HealthChecker/venv/Lib/site-packages/selenium/webdriver/phantomjs/service.py
Service.service_url
python
def service_url(self): return "http://localhost:%d/wd/hub" % self.port
Gets the url of the GhostDriver Service
https://github.com/kriaga/health-checker/blob/3d9ce933f131bcbb897103b0f509cc45393cae4a/HealthChecker/venv/Lib/site-packages/selenium/webdriver/phantomjs/service.py#L59-L63
import os import tempfile from selenium.webdriver.common import service class Service(service.Service): def __init__(self, executable_path, port=0, service_args=None, log_path=None): self.service_args = service_args if self.service_args is None: self.service_args = [] else: self.service_args = service_args[:] if not log_path: log_path = "ghostdriver.log" if not self._args_contain("--cookies-file="): self._cookie_temp_file_handle, self._cookie_temp_file = tempfile.mkstemp() self.service_args.append("--cookies-file=" + self._cookie_temp_file) else: self._cookie_temp_file = None service.Service.__init__(self, executable_path, port=port, log_file=open(log_path, 'w')) def _args_contain(self, arg): return len(list(filter(lambda x: x.startswith(arg), self.service_args))) > 0 def command_line_args(self): return self.service_args + ["--webdriver=%d" % self.port] @property
MIT License
luciferjack/python-mysql-pool
PyMysqlPool/mysql/connector/conversion.py
MySQLConverterBase.escape
python
def escape(self, buf): return buf
Escape buffer for sending to MySQL
https://github.com/luciferjack/python-mysql-pool/blob/7b812c6fc7f04255620cb86f272a2d8900c2240d/PyMysqlPool/mysql/connector/conversion.py#L106-L108
import datetime import time from decimal import Decimal from .catch23 import PY2, NUMERIC_TYPES, struct_unpack from .constants import FieldType, FieldFlag, CharacterSet from .custom_types import HexLiteral class MySQLConverterBase(object): def __init__(self, charset='utf8', use_unicode=True): self.python_types = None self.mysql_types = None self.charset = None self.charset_id = 0 self.use_unicode = None self.set_charset(charset) self.set_unicode(use_unicode) self._cache_field_types = {} def set_charset(self, charset): if charset == 'utf8mb4': charset = 'utf8' if charset is not None: self.charset = charset else: self.charset = 'utf8' self.charset_id = CharacterSet.get_charset_info(self.charset)[0] def set_unicode(self, value=True): self.use_unicode = value def to_mysql(self, value): type_name = value.__class__.__name__.lower() try: if isinstance(value, tuple): value_count = len(list(value)) if value_count == 1: for item in value: return "(" + str(item) + ")" else: value = value[:value_count] return value return getattr(self, "_{0}_to_mysql".format(type_name))(value) except AttributeError: return value def to_python(self, vtype, value): if (value == b'\x00' or value is None) and vtype[1] != FieldType.BIT: return None if not self._cache_field_types: self._cache_field_types = {} for name, info in FieldType.desc.items(): try: self._cache_field_types[info[0]] = getattr( self, '_{0}_to_python'.format(name)) except AttributeError: pass try: return self._cache_field_types[vtype[1]](value, vtype) except KeyError: return value
MIT License
pwwang/datar
datar/dplyr/order_by.py
with_order
python
def with_order( order: Iterable[Any], func: Callable, x: Iterable[Any], *args: Any, **kwargs: Any, ) -> Series: x = order_by(order, x) out = func(x, *args, **kwargs) if isinstance(out, Series): out = out.reset_index(drop=True) return order_by(order, out)
Control argument and result of a window function Examples: >>> with_order([5,4,3,2,1], cumsum, [1,2,3,4,5]) >>> # 15, 14, 12, 9, 5 Args: order: An iterable to order the arugment and result func: The window function x: The first arugment for the function *args: and **kwargs: Other arugments for the function Returns: A Seroes of ordered result
https://github.com/pwwang/datar/blob/4e2b5db026ad35918954576badef9951928c0cb1/datar/dplyr/order_by.py#L53-L82
from typing import Callable, Iterable, Any, Sequence from pandas import Series from pipda import register_func from ..core.contexts import Context from ..core.types import is_scalar @register_func(None, context=Context.EVAL) def order_by( order: Sequence[Any], data: Sequence[Any], ) -> Series: if is_scalar(order): order = [order] if is_scalar(data): data = [data] order = Series(order) if len(order) > 1 else Series(order, dtype=object) order = order.sort_values() out = Series(data) if len(data) > 1 else Series(data, dtype=object) return out[order.index] @register_func(None, context=Context.EVAL)
MIT License
tensorflow/tensor2tensor
tensor2tensor/models/research/vqa_self_attention.py
iterative_encoder_decoder
python
def iterative_encoder_decoder(encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias, query, hparams): for _ in range(hparams.num_rec_steps): with tf.variable_scope("step", reuse=tf.AUTO_REUSE): encoder_output = image_question_encoder( encoder_input, encoder_self_attention_bias, hparams, query) decoder_output = decoder( query, encoder_output, None, encoder_decoder_attention_bias, hparams) encoder_input = encoder_output query = decoder_output return decoder_output
Iterative encoder decoder.
https://github.com/tensorflow/tensor2tensor/blob/c22a226704e5887862bf9edd9f269892c9016ad4/tensor2tensor/models/research/vqa_self_attention.py#L654-L678
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range from tensor2tensor.layers import common_attention from tensor2tensor.layers import common_hparams from tensor2tensor.layers import common_layers from tensor2tensor.layers import vqa_layers from tensor2tensor.models.research import vqa_attention from tensor2tensor.utils import registry import tensorflow.compat.v1 as tf from tensorflow.contrib.layers.python.layers import utils @registry.register_model class VqaSelfAttention(vqa_attention.VqaAttentionBaseline): def body(self, features): hp = self.hparams if hp.image_input_type == "image": image_feat = vqa_layers.image_embedding( features["inputs"], model_fn=eval(hp.image_model_fn), trainable=hp.train_resnet, is_training=hp.mode == tf.estimator.ModeKeys.TRAIN) else: image_feat = features["inputs"] image_feat = common_layers.flatten4d3d(image_feat) image_hidden_size = hp.image_hidden_size or hp.hidden_size if hp.image_feat_preprocess_proj: image_feat = common_layers.dense(image_feat, image_hidden_size) utils.collect_named_outputs("norms", "image_feat_after_proj", tf.norm(image_feat, axis=-1)) else: assert image_hidden_size == 2048 image_feat = tf.nn.dropout( image_feat, keep_prob=1.-hp.layer_prepostprocess_dropout) if hp.image_feat_encode: image_feat = image_encoder(image_feat, hp) utils.collect_named_outputs("norms", "image_feat_encoded", tf.norm(image_feat, axis=-1)) else: image_feat = common_layers.layer_norm(image_feat) utils.collect_named_outputs("norms", "image_feat_after_layer", tf.norm(image_feat, axis=-1)) question = common_layers.flatten4d3d(features["question"]) utils.collect_named_outputs("norms", "question_embedding", tf.norm(question, axis=-1)) question, question_self_attention_bias = prepare_question_encoder( question, hp) question = tf.nn.dropout( question, keep_prob=1.-hp.layer_prepostprocess_dropout) query = question_encoder(question, question_self_attention_bias, hp) utils.collect_named_outputs( "norms", "query_encode", tf.norm(query, axis=-1)) query = (query + tf.expand_dims( tf.squeeze(question_self_attention_bias, [1, 2]), axis=2)) query = tf.reduce_max(query, axis=1) utils.collect_named_outputs( "norms", "query_maxpool", tf.norm(query, axis=-1)) image_ave = attn(image_feat, query, hp) utils.collect_named_outputs("norms", "image_ave", tf.norm(image_ave, axis=-1)) if hp.multimodal_combine == "concat": image_question = tf.concat([image_ave, query], axis=1) elif hp.multimodal_combine == "sum": image_question = image_ave + query elif hp.multimodal_combine == "product": image_question = image_ave * query utils.collect_named_outputs("norms", "image_question", tf.norm(image_question, axis=-1)) image_question = tf.nn.dropout(image_question, 1. - hp.dropout) output = mlp(image_question, hp) utils.collect_named_outputs("norms", "output", tf.norm(output, axis=-1)) norm_tensors = utils.convert_collection_to_dict("norms") vqa_layers.summarize_tensors(norm_tensors, tag="norms/") return tf.expand_dims(tf.expand_dims(output, axis=1), axis=2) @registry.register_model class VqaCombinedSelfAttention(VqaSelfAttention): def body(self, features): hp = self.hparams if hp.image_input_type == "image": image_feat = vqa_layers.image_embedding( features["inputs"], model_fn=eval(hp.image_model_fn), trainable=hp.train_resnet, is_training=hp.mode == tf.estimator.ModeKeys.TRAIN) else: image_feat = features["inputs"] image_feat = common_layers.flatten4d3d(image_feat) image_hidden_size = hp.hidden_size image_feat = common_layers.dense(image_feat, image_hidden_size) utils.collect_named_outputs("norms", "image_feat_after_proj", tf.norm(image_feat, axis=-1)) question = common_layers.flatten4d3d(features["question"]) utils.collect_named_outputs("norms", "question_embedding", tf.norm(question, axis=-1)) (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias) = prepare_image_question_encoder( image_feat, question, hp) encoder_input = tf.nn.dropout( encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout) encoder_output = image_question_encoder( encoder_input, encoder_self_attention_bias, hp) utils.collect_named_outputs( "norms", "encoder_output", tf.norm(encoder_output, axis=-1)) query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5 query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0) batch_size = common_layers.shape_list(encoder_input)[0] query = tf.tile(query, [batch_size, 1, 1]) query = tf.nn.dropout( query, keep_prob=1.-hp.layer_prepostprocess_dropout) decoder_output = decoder( query, encoder_output, None, encoder_decoder_attention_bias, hp) utils.collect_named_outputs("norms", "decoder_output", tf.norm(decoder_output, axis=-1)) norm_tensors = utils.convert_collection_to_dict("norms") vqa_layers.summarize_tensors(norm_tensors, tag="norms/") return tf.expand_dims(decoder_output, axis=1) @registry.register_model class VqaIterativeCombinedSelfAttention(VqaSelfAttention): def body(self, features): hp = self.hparams if hp.image_input_type == "image": image_feat = vqa_layers.image_embedding( features["inputs"], model_fn=eval(hp.image_model_fn), trainable=hp.train_resnet, is_training=hp.mode == tf.estimator.ModeKeys.TRAIN) else: image_feat = features["inputs"] image_feat = common_layers.flatten4d3d(image_feat) image_hidden_size = hp.hidden_size image_feat = common_layers.dense(image_feat, image_hidden_size) utils.collect_named_outputs("norms", "image_feat_after_proj", tf.norm(image_feat, axis=-1)) question = common_layers.flatten4d3d(features["question"]) utils.collect_named_outputs("norms", "question_embedding", tf.norm(question, axis=-1)) (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias) = prepare_image_question_encoder( image_feat, question, hp) encoder_input = tf.nn.dropout( encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout) query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5 query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0) batch_size = common_layers.shape_list(encoder_input)[0] query = tf.tile(query, [batch_size, 1, 1]) query = tf.nn.dropout( query, keep_prob=1.-hp.layer_prepostprocess_dropout) decoder_output = iterative_encoder_decoder( encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias, query, hp) utils.collect_named_outputs("norms", "decoder_output", tf.norm(decoder_output, axis=-1)) norm_tensors = utils.convert_collection_to_dict("norms") vqa_layers.summarize_tensors(norm_tensors, tag="norms/") return tf.expand_dims(decoder_output, axis=1) def image_encoder(image_feat, hparams, name="image_encoder", save_weights_to=None, make_image_summary=True): x = image_feat image_hidden_size = hparams.image_hidden_size or hparams.hidden_size image_filter_size = hparams.image_filter_size or hparams.filter_size with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, None, hparams.attention_key_channels or image_hidden_size, hparams.attention_value_channels or image_hidden_size, image_hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.image_self_attention_type, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "image_feat_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "image_feat_self_attention_postprocess_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), image_filter_size, image_hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs( "norms", "image_feat_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "image_feat_ffn_postprocess_%d"%(layer), tf.norm(x, axis=-1)) return common_layers.layer_preprocess(x, hparams) def prepare_question_encoder(inputs, hparams): encoder_input = inputs encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) encoder_self_attention_bias = ignore_padding if hparams.pos == "timing": encoder_input = common_attention.add_timing_signal_1d(encoder_input) elif hparams.pos == "emb": encoder_input = common_attention.add_positional_embedding( encoder_input, hparams.max_length, "inputs_positional_embedding", None) return (encoder_input, encoder_self_attention_bias) def question_encoder(question, question_self_attention_bias, hparams, name="question_encoder", save_weights_to=None, make_image_summary=True): x = question with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, question_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.question_self_attention_type, block_length=hparams.block_length, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "query_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "query_self_attention_postprocess_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs( "norms", "query_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "query_ffn_postprocess_%d"%(layer), tf.norm(x, axis=-1)) return common_layers.layer_preprocess(x, hparams) def attn(image_feat, query, hparams, name="attn", save_weights_to=None, make_image_summary=True): with tf.variable_scope(name, "attn", values=[image_feat, query]): total_key_depth = hparams.attention_key_channels or hparams.hidden_size total_value_depth = hparams.attention_value_channels or hparams.hidden_size num_heads = hparams.num_heads query = tf.expand_dims(query, 1) q, k, v = common_attention.compute_qkv( query, image_feat, total_key_depth, total_value_depth, ) q = common_attention.split_heads(q, num_heads) k = common_attention.split_heads(k, num_heads) v = common_attention.split_heads(v, num_heads) if hparams.scale_dotproduct: key_depth_per_head = total_key_depth // num_heads q *= key_depth_per_head**-0.5 x = common_attention.dot_product_attention( q, k, v, None, dropout_rate=hparams.attention_dropout, image_shapes=None, save_weights_to=save_weights_to, make_image_summary=make_image_summary) x = common_attention.combine_heads(x) return tf.squeeze(x, axis=1) def mlp(feature, hparams, name="mlp"): with tf.variable_scope(name, "mlp", values=[feature]): num_mlp_layers = hparams.num_mlp_layers mlp_size = hparams.mlp_size for _ in range(num_mlp_layers): feature = common_layers.dense(feature, mlp_size, activation=None) utils.collect_named_outputs("norms", "mlp_feature", tf.norm(feature, axis=-1)) feature = common_layers.layer_norm(feature) feature = tf.nn.relu(feature) feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout) return feature def prepare_image_question_encoder(image_feat, question, hparams): encoder_input = tf.concat([image_feat, question], axis=1) encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) encoder_self_attention_bias = ignore_padding encoder_decoder_attention_bias = ignore_padding if hparams.pos == "timing": question = common_attention.add_timing_signal_1d(question) elif hparams.pos == "emb": question = common_attention.add_positional_embedding( question, hparams.max_length, "inputs_positional_embedding", None) encoder_input = tf.concat([image_feat, question], axis=1) return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias) def image_question_encoder(encoder_inputs, encoder_self_attention_bias, hparams, query=None, name="image_question_encoder", save_weights_to=None, make_image_summary=True): x = encoder_inputs with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, encoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, block_length=hparams.block_length, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "encoder_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "encoder_self_attention_postprocess_%d"%(layer), tf.norm(x, axis=-1)) if query is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), query, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, block_length=hparams.block_length, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "encoder_decoder_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "encoder_decoder_attention_post_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs( "norms", "encoder_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "encoder_ffn_postprocess_%d"%(layer), tf.norm(x, axis=-1)) return common_layers.layer_preprocess(x, hparams) def decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, name="decoder", save_weights_to=None, make_image_summary=True,): x = decoder_input with tf.variable_scope(name): for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers): layer_name = "layer_%d" % layer with tf.variable_scope(layer_name): with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, make_image_summary=make_image_summary, ) utils.collect_named_outputs("norms", "decoder_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "decoder_self_attention_post_%d"%(layer), tf.norm(x, axis=-1)) if encoder_output is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, save_weights_to=save_weights_to, make_image_summary=make_image_summary, ) utils.collect_named_outputs( "norms", "decoder_encoder_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "decoder_encoder_attention_post_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs("norms", "decoder_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "decoder_ffn_post_%d"%(layer), tf.norm(x, axis=-1)) return common_layers.layer_preprocess(x, hparams)
Apache License 2.0
xrplf/xrpl-py
xrpl/core/binarycodec/types/currency.py
Currency.__init__
python
def __init__(self: Currency, buffer: Optional[bytes] = None) -> None: if buffer is not None: super().__init__(buffer) else: super().__init__(bytes(self.LENGTH)) code_bytes = self.buffer[12:15] if self.buffer[0] != 0: self._iso = None elif code_bytes.hex() == "000000": self._iso = "XRP" else: self._iso = _iso_code_from_hex(code_bytes)
Construct a Currency.
https://github.com/xrplf/xrpl-py/blob/3635339bfb579353e56f126bbcf303d931b26d65/xrpl/core/binarycodec/types/currency.py#L76-L93
from __future__ import annotations from typing import Optional, Type from typing_extensions import Final from xrpl.constants import HEX_CURRENCY_REGEX, ISO_CURRENCY_REGEX from xrpl.core.binarycodec.exceptions import XRPLBinaryCodecException from xrpl.core.binarycodec.types.hash160 import Hash160 _CURRENCY_CODE_LENGTH: Final[int] = 20 def _is_iso_code(value: str) -> bool: return bool(ISO_CURRENCY_REGEX.fullmatch(value)) def _iso_code_from_hex(value: bytes) -> Optional[str]: candidate_iso = value.decode("ascii") if candidate_iso == "XRP": raise XRPLBinaryCodecException( "Disallowed currency code: to indicate the currency " "XRP you must use 20 bytes of 0s" ) if _is_iso_code(candidate_iso): return candidate_iso return None def _is_hex(value: str) -> bool: return bool(HEX_CURRENCY_REGEX.fullmatch(value)) def _iso_to_bytes(iso: str) -> bytes: if not _is_iso_code(iso): raise XRPLBinaryCodecException(f"Invalid ISO code: {iso}") if iso == "XRP": return bytes(_CURRENCY_CODE_LENGTH) iso_bytes = iso.encode("ASCII") return bytes(12) + iso_bytes + bytes(5) class Currency(Hash160): LENGTH: Final[int] = 20 _iso: Optional[str] = None
ISC License
mlflow/mlflow
mlflow/azureml/__init__.py
build_image
python
def build_image( model_uri, workspace, image_name=None, model_name=None, mlflow_home=None, description=None, tags=None, synchronous=True, ): from azureml.core.image import ContainerImage from azureml.core.model import Model as AzureModel absolute_model_path = _download_artifact_from_uri(model_uri) model_pyfunc_conf, _ = _load_pyfunc_conf_with_model(model_path=absolute_model_path) model_python_version = model_pyfunc_conf.get(pyfunc.PY_VERSION, None) if model_python_version is not None and Version(model_python_version) < Version("3.0.0"): raise MlflowException( message=( "Azure ML can only deploy models trained in Python 3 and above. See" " the following MLflow GitHub issue for a thorough explanation of this" " limitation and a workaround to enable support for deploying models" " trained in Python 2: https://github.com/mlflow/mlflow/issues/668" ), error_code=INVALID_PARAMETER_VALUE, ) tags = _build_tags( model_uri=model_uri, model_python_version=model_python_version, user_tags=tags ) if image_name is None: image_name = _get_mlflow_azure_resource_name() if model_name is None: model_name = _get_mlflow_azure_resource_name() with TempDir(chdr=True) as tmp: model_directory_path = tmp.path("model") tmp_model_path = os.path.join( model_directory_path, _copy_file_or_tree(src=absolute_model_path, dst=model_directory_path), ) registered_model = AzureModel.register( workspace=workspace, model_path=tmp_model_path, model_name=model_name, tags=tags, description=description, ) _logger.info( "Registered an Azure Model with name: `%s` and version: `%s`", registered_model.name, registered_model.version, ) execution_script_path = tmp.path("execution_script.py") _create_execution_script(output_path=execution_script_path, azure_model=registered_model) execution_script_path = os.path.basename(execution_script_path) if mlflow_home is not None: _logger.info( "Copying the specified mlflow_home directory: `%s` to a temporary location for" " container creation", mlflow_home, ) mlflow_home = os.path.join( tmp.path(), _copy_project(src_path=mlflow_home, dst_path=tmp.path()) ) image_file_dependencies = [mlflow_home] else: image_file_dependencies = None dockerfile_path = tmp.path("Dockerfile") _create_dockerfile(output_path=dockerfile_path, mlflow_path=mlflow_home) conda_env_path = None if pyfunc.ENV in model_pyfunc_conf: conda_env_path = os.path.join(tmp_model_path, model_pyfunc_conf[pyfunc.ENV]) image_configuration = ContainerImage.image_configuration( execution_script=execution_script_path, runtime="python", docker_file=dockerfile_path, dependencies=image_file_dependencies, conda_file=conda_env_path, description=description, tags=tags, ) image = ContainerImage.create( workspace=workspace, name=image_name, image_config=image_configuration, models=[registered_model], ) _logger.info( "Building an Azure Container Image with name: `%s` and version: `%s`", image.name, image.version, ) if synchronous: image.wait_for_creation(show_output=True) return image, registered_model
Register an MLflow model with Azure ML and build an Azure ML ContainerImage for deployment. The resulting image can be deployed as a web service to Azure Container Instances (ACI) or Azure Kubernetes Service (AKS). The resulting Azure ML ContainerImage will contain a webserver that processes model queries. For information about the input data formats accepted by this webserver, see the :ref:`MLflow deployment tools documentation <azureml_deployment>`. :param model_uri: The location, in URI format, of the MLflow model used to build the Azure ML deployment image. For example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` - ``models:/<model_name>/<model_version>`` - ``models:/<model_name>/<stage>`` For more information about supported URI schemes, see `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html# artifact-locations>`_. :param image_name: The name to assign the Azure Container Image that will be created. If unspecified, a unique image name will be generated. :param model_name: The name to assign the Azure Model will be created. If unspecified, a unique model name will be generated. :param workspace: The AzureML workspace in which to build the image. This is a `azureml.core.Workspace` object. :param mlflow_home: Path to a local copy of the MLflow GitHub repository. If specified, the image will install MLflow from this directory. Otherwise, it will install MLflow from pip. :param description: A string description to associate with the Azure Container Image and the Azure Model that will be created. For more information, see `<https://docs.microsoft.com/en-us/python/api/azureml-core/ azureml.core.image.container.containerimageconfig?view=azure-ml-py>`_ and `<https://docs.microsoft.com/en-us/python/api/azureml-core/ azureml.core.model.model?view=azure-ml-py#register>`_. :param tags: A collection of tags, represented as a dictionary of string key-value pairs, to associate with the Azure Container Image and the Azure Model that will be created. These tags are added to a set of default tags that include the model uri, and more. For more information, see `<https://docs.microsoft.com/en-us/python/api/azureml-core/ azureml.core.image.container.containerimageconfig?view-azure-ml-py>`_ and `<https://docs.microsoft.com/en-us/python/api/azureml-core/ azureml.core.model.model?view=azure-ml-py#register>`_. :param synchronous: If ``True``, this method blocks until the image creation procedure terminates before returning. If ``False``, the method returns immediately, but the returned image will not be available until the asynchronous creation process completes. Use the ``azureml.core.Image.wait_for_creation()`` function to wait for the creation process to complete. :return: A tuple containing the following elements in order: - An ``azureml.core.image.ContainerImage`` object containing metadata for the new image. - An ``azureml.core.model.Model`` object containing metadata for the new model. .. code-block:: python :caption: Example import mlflow.azureml from azureml.core import Workspace from azureml.core.webservice import AciWebservice, Webservice # Load or create an Azure ML Workspace workspace_name = "<Name of your Azure ML workspace>" subscription_id = "<Your Azure subscription ID>" resource_group = "<Name of the Azure resource group in which to create Azure ML resources>" location = "<Name of the Azure location (region) in which to create Azure ML resources>" azure_workspace = Workspace.create(name=workspace_name, subscription_id=subscription_id, resource_group=resource_group, location=location, create_resource_group=True, exist_ok=True) # Build an Azure ML Container Image for an MLflow model azure_image, azure_model = mlflow.azureml.build_image(model_uri="<model_uri>", workspace=azure_workspace, synchronous=True) # If your image build failed, you can access build logs at the following URI: print("Access the following URI for build logs: {}".format(azure_image.image_build_log_uri)) # Deploy the image to Azure Container Instances (ACI) for real-time serving webservice_deployment_config = AciWebservice.deploy_configuration() webservice = Webservice.deploy_from_image( image=azure_image, workspace=azure_workspace, name="<deployment-name>") webservice.wait_for_deployment()
https://github.com/mlflow/mlflow/blob/c4b8e849a3c718575e106f3499a12bea9b0b821e/mlflow/azureml/__init__.py#L32-L237
import sys import os import subprocess import logging import uuid from packaging.version import Version from mlflow import get_tracking_uri, get_registry_uri from mlflow import pyfunc from mlflow import register_model as mlflow_register_model from mlflow.exceptions import MlflowException from mlflow.models import Model from mlflow.models.model import MLMODEL_FILE_NAME from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE from mlflow.tracking.artifact_utils import _download_artifact_from_uri from mlflow.utils import get_unique_resource_id from mlflow.utils.annotations import deprecated from mlflow.utils.file_utils import TempDir, _copy_file_or_tree, _copy_project from mlflow.version import VERSION as mlflow_version from pathlib import Path _logger = logging.getLogger(__name__) @deprecated("the azureml deployment plugin, https://aka.ms/aml-mlflow-deploy", since="1.19.0")
Apache License 2.0
chaffelson/whoville
whoville/cloudbreak/models/blueprint_details.py
BlueprintDetails.blueprint_name
python
def blueprint_name(self): return self._blueprint_name
Gets the blueprint_name of this BlueprintDetails. :return: The blueprint_name of this BlueprintDetails. :rtype: str
https://github.com/chaffelson/whoville/blob/f71fda629c9fd50d0a482120165ea5abcc754522/whoville/cloudbreak/models/blueprint_details.py#L135-L142
from pprint import pformat from six import iteritems import re class BlueprintDetails(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'int', 'name': 'str', 'description': 'str', 'blueprint_name': 'str', 'blueprint_json': 'str' } attribute_map = { 'id': 'id', 'name': 'name', 'description': 'description', 'blueprint_name': 'blueprintName', 'blueprint_json': 'blueprintJson' } def __init__(self, id=None, name=None, description=None, blueprint_name=None, blueprint_json=None): self._id = None self._name = None self._description = None self._blueprint_name = None self._blueprint_json = None if id is not None: self.id = id if name is not None: self.name = name if description is not None: self.description = description if blueprint_name is not None: self.blueprint_name = blueprint_name if blueprint_json is not None: self.blueprint_json = blueprint_json @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def description(self): return self._description @description.setter def description(self, description): self._description = description @property
Apache License 2.0
meteomatics/python-connector-api
meteomatics/api.py
query_grid_png
python
def query_grid_png(filename, startdate, parameter_grid, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon, username, password, model=None, ens_select=None, interp_select=None, api_base_url=DEFAULT_API_BASE_URL, request_type='GET'): startdate = sanitize_datetime(startdate) url_params = dict() url_params['connector'] = VERSION if model is not None: url_params['model'] = model if ens_select is not None: url_params['ens_select'] = ens_select if interp_select is not None: url_params['interp_select'] = interp_select url = GRID_PNG_TEMPLATE.format( api_base_url=api_base_url, startdate=startdate.isoformat(), parameter_grid=parameter_grid, lat_N=lat_N, lon_W=lon_W, lat_S=lat_S, lon_E=lon_E, res_lat=res_lat, res_lon=res_lon, urlParams="&".join(["{}={}".format(k, v) for k, v in url_params.items()]) ) headers = {'Accept': 'image/png'} response = query_api(url, username, password, request_type=request_type, headers=headers) with open(filename, 'wb') as f: _logger.debug('Create File {}'.format(filename)) for chunk in response.iter_content(chunk_size=1024): f.write(chunk) return
Gets a png image generated by the Meteomatics API from grid data (see method query_grid) and saves it to the specified filename. request_type is one of 'GET'/'POST'
https://github.com/meteomatics/python-connector-api/blob/755df82f84b2c3aef5ed8382ae778d69a3a89b43/meteomatics/api.py#L552-L598
from __future__ import print_function import itertools import logging import os import warnings from functools import wraps from io import StringIO import isodate import pandas as pd import requests from urllib3.exceptions import InsecureRequestWarning from ._constants_ import DEFAULT_API_BASE_URL, VERSION, TIME_SERIES_TEMPLATE, GRID_TEMPLATE, POLYGON_TEMPLATE, GRID_TIME_SERIES_TEMPLATE, GRID_PNG_TEMPLATE, LIGHTNING_TEMPLATE, NETCDF_TEMPLATE, STATIONS_LIST_TEMPLATE, INIT_DATE_TEMPLATE, AVAILABLE_TIME_RANGES_TEMPLATE, NA_VALUES, LOGGERNAME from .binary_parser import BinaryParser from .binary_reader import BinaryReader from .exceptions import API_EXCEPTIONS, WeatherApiException from .parsing_util import all_entries_postal, build_coordinates_str_for_polygon, build_coordinates_str, build_coordinates_str_from_postal_codes, build_response_params, convert_grid_binary_response_to_df, convert_lightning_response_to_df, convert_polygon_response_to_df, parse_date_num, extract_user_statistics, parse_ens, parse_query_station_params, parse_query_station_timeseries_params, parse_time_series_params, parse_url_for_post_data, localize_datenum, sanitize_datetime, set_index_for_ts _logger = logging.getLogger(LOGGERNAME) class Config: _config = { "VERIFY_SSL": True } @staticmethod def get(item): return Config._config[item] @staticmethod def set(key, value): if key not in Config._config.keys(): raise KeyError("Key '{}' does not exist.".format(key)) Config._config[key] = value def handle_ssl(func): @wraps(func) def wrapper(*args, **kwargs): if not Config.get("VERIFY_SSL"): with warnings.catch_warnings(): warnings.simplefilter('ignore', InsecureRequestWarning) return func(*args, verify=False, **kwargs) return func(*args, **kwargs) return wrapper @handle_ssl def get_request(*args, **kwargs): return requests.get(*args, **kwargs) @handle_ssl def post_request(*args, **kwargs): return requests.post(*args, **kwargs) def create_path(_file): _path = os.path.dirname(_file) if not os.path.exists(_path) and len(_path) > 0: _logger.info("Create Path: {}".format(_path)) os.makedirs(_path) def query_api(url, username, password, request_type="GET", timeout_seconds=300, headers={'Accept': 'application/octet-stream'}): if request_type.lower() == "get": _logger.debug("Calling URL: {} (username = {})".format(url, username)) response = get_request(url, timeout=timeout_seconds, auth=(username, password), headers=headers) elif request_type.lower() == "post": url, data = parse_url_for_post_data(url) _logger.debug("Calling URL: {} (username = {})".format(url, username)) headers['Content-Type'] = "text/plain" response = post_request(url, timeout=timeout_seconds, auth=(username, password), headers=headers, data=data) else: raise ValueError('Unknown request_type: {}.'.format(request_type)) if response.status_code != requests.codes.ok: exc = API_EXCEPTIONS[response.status_code] raise exc(response.text) return response def query_user_features(username, password): response = get_request(DEFAULT_API_BASE_URL + '/user_stats_json', auth=(username, password)) if response.status_code != requests.codes.ok: exc = API_EXCEPTIONS[response.status_code] raise exc(response.text) return extract_user_statistics(response) def convert_time_series_binary_response_to_df(bin_input, coordinate_list, parameters, station=False, na_values=NA_VALUES): binary_parser = BinaryParser(BinaryReader(bin_input), na_values) df = binary_parser.parse(parameters, station, coordinate_list) df = df.apply(lambda col: parse_date_num(col) if col.name.endswith(":sql") else col) df = set_index_for_ts(df, station, coordinate_list) return df def query_station_list(username, password, source=None, parameters=None, startdate=None, enddate=None, location=None, api_base_url=DEFAULT_API_BASE_URL, request_type='GET', elevation=None, id=None): url_params_dict = parse_query_station_params(source, parameters, startdate, enddate, location, elevation, id) url = STATIONS_LIST_TEMPLATE.format( api_base_url=api_base_url, urlParams="&".join(["{}={}".format(k, v) for k, v in url_params_dict.items()]) ) response = query_api(url, username, password, request_type=request_type) sl = pd.read_csv(StringIO(response.text), sep=";") sl['lat'] = sl['Location Lat,Lon'].apply(lambda x: float(x.split(",")[0])) sl['lon'] = sl['Location Lat,Lon'].apply(lambda x: float(x.split(",")[1])) sl.drop('Location Lat,Lon', 1, inplace=True) return sl def query_station_timeseries(startdate, enddate, interval, parameters, username, password, model='mix-obs', latlon_tuple_list=None, wmo_ids=None, mch_ids=None, general_ids=None, hash_ids=None, metar_ids=None, temporal_interpolation=None, spatial_interpolation=None, on_invalid=None, api_base_url=DEFAULT_API_BASE_URL, request_type='GET', na_values=NA_VALUES): startdate = sanitize_datetime(startdate) enddate = sanitize_datetime(enddate) coordinates = build_coordinates_str(latlon_tuple_list, wmo_ids, metar_ids, mch_ids, general_ids, hash_ids) url_params_dict = parse_query_station_timeseries_params(model, on_invalid, temporal_interpolation, spatial_interpolation) url = TIME_SERIES_TEMPLATE.format( api_base_url=api_base_url, coordinates=coordinates, startdate=startdate.isoformat(), enddate=enddate.isoformat(), interval=isodate.duration_isoformat(interval), parameters=",".join(parameters), urlParams="&".join(["{}={}".format(k, v) for k, v in url_params_dict.items()]) ) headers = {'Accept': 'text/csv'} response = query_api(url, username, password, request_type=request_type, headers=headers) coordinates_list = coordinates.split("+") return convert_time_series_binary_response_to_df(response.content, coordinates_list, parameters, station=True, na_values=na_values) def query_special_locations_timeseries(startdate, enddate, interval, parameters, username, password, model='mix', postal_codes=None, temporal_interpolation=None, spatial_interpolation=None, on_invalid=None, api_base_url=DEFAULT_API_BASE_URL, request_type='GET', na_values=NA_VALUES): startdate = sanitize_datetime(startdate) enddate = sanitize_datetime(enddate) coordinates = build_coordinates_str_from_postal_codes(postal_codes) url_params = parse_query_station_timeseries_params(model, on_invalid, temporal_interpolation, spatial_interpolation) url = TIME_SERIES_TEMPLATE.format( api_base_url=api_base_url, coordinates=coordinates, startdate=startdate.isoformat(), enddate=enddate.isoformat(), interval=isodate.duration_isoformat(interval), parameters=",".join(parameters), urlParams="&".join(["{}={}".format(k, v) for k, v in url_params.items()]) ) headers = {'Accept': 'text/csv'} response = query_api(url, username, password, request_type=request_type, headers=headers) coordinates_list = coordinates.split("+") return convert_time_series_binary_response_to_df(response.content, coordinates_list, parameters, station=True, na_values=na_values) def query_time_series(coordinate_list, startdate, enddate, interval, parameters, username, password, model=None, ens_select=None, interp_select=None, on_invalid=None, api_base_url=DEFAULT_API_BASE_URL, request_type='GET', cluster_select=None, na_values=NA_VALUES, **kwargs): startdate = sanitize_datetime(startdate) enddate = sanitize_datetime(enddate) extended_params = parameters if ens_select is None else build_response_params(parameters, parse_ens(ens_select)) url_params = parse_time_series_params(model, ens_select, cluster_select, interp_select, on_invalid, kwargs) is_postal = all_entries_postal(coordinate_list) coordinate_list_str = '+'.join(coordinate_list) if is_postal else "+".join( ["{},{}".format(*latlon_tuple) for latlon_tuple in coordinate_list]) url = TIME_SERIES_TEMPLATE.format( api_base_url=api_base_url, coordinates=coordinate_list_str, startdate=startdate.isoformat(), enddate=enddate.isoformat(), interval=isodate.duration_isoformat(interval), parameters=",".join(parameters), urlParams="&".join(["{}={}".format(k, v) for k, v in url_params.items()]) ) response = query_api(url, username, password, request_type=request_type) df = convert_time_series_binary_response_to_df(response.content, coordinate_list, extended_params, na_values=na_values) return df def query_grid(startdate, parameter_grid, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon, username, password, model=None, ens_select=None, interp_select=None, api_base_url=DEFAULT_API_BASE_URL, request_type='GET', na_values=NA_VALUES, **kwargs): startdate = sanitize_datetime(startdate) url_params = parse_time_series_params(model=model, ens_select=ens_select, cluster_select=None, interp_select=interp_select, on_invalid=None, kwargs=kwargs) url = GRID_TEMPLATE.format( api_base_url=api_base_url, startdate=startdate.isoformat(), parameter_grid=parameter_grid, lat_N=lat_N, lon_W=lon_W, lat_S=lat_S, lon_E=lon_E, res_lat=res_lat, res_lon=res_lon, urlParams="&".join(["{}={}".format(k, v) for k, v in url_params.items()]) ) response = query_api(url, username, password, request_type=request_type) return convert_grid_binary_response_to_df(response.content, parameter_grid, na_values=na_values) def query_grid_unpivoted(valid_dates, parameters, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon, username, password, model=None, ens_select=None, interp_select=None, request_type='GET', na_values=NA_VALUES): idxcols = ['valid_date', 'lat', 'lon'] vd_dfs = [] for valid_date in valid_dates: vd_df = None for parameter in parameters: dmo = query_grid(valid_date, parameter, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon, username, password, model, ens_select, interp_select, request_type=request_type, na_values=na_values) df = pd.melt(dmo.reset_index(), id_vars='lat', var_name='lon', value_name=parameter) df['valid_date'] = valid_date df.lat = df.lat.apply(float) df.lon = df.lon.apply(float) if vd_df is None: vd_df = df else: vd_df = vd_df.merge(df, on=idxcols) vd_dfs.append(vd_df) data = pd.concat(vd_dfs) try: data.sort_values(idxcols, inplace=True) except AttributeError: data.sort(idxcols, inplace=True) data.set_index(idxcols, inplace=True) return data def query_grid_timeseries(startdate, enddate, interval, parameters, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon, username, password, model=None, ens_select=None, interp_select=None, on_invalid=None, api_base_url=DEFAULT_API_BASE_URL, request_type='GET', na_values=NA_VALUES): startdate = sanitize_datetime(startdate) enddate = sanitize_datetime(enddate) url_params = parse_time_series_params(model=model, ens_select=ens_select, cluster_select=None, interp_select=interp_select, on_invalid=on_invalid, kwargs=None) url = GRID_TIME_SERIES_TEMPLATE.format( api_base_url=api_base_url, startdate=startdate.isoformat(), enddate=enddate.isoformat(), interval=isodate.duration_isoformat(interval), lat_N=lat_N, lon_W=lon_W, lat_S=lat_S, lon_E=lon_E, res_lat=res_lat, res_lon=res_lon, parameters=",".join(parameters), urlParams="&".join(["{}={}".format(k, v) for k, v in url_params.items()]) ) response = query_api(url, username, password, request_type=request_type) lats = arange(lat_S, lat_N, res_lat) lons = arange(lon_W, lon_E, res_lon) latlon_tuple_list = list(itertools.product(lats, lons)) df = convert_time_series_binary_response_to_df(response.content, latlon_tuple_list, parameters, na_values=na_values) return df def query_polygon(latlon_tuple_lists, startdate, enddate, interval, parameters, aggregation, username, password, operator=None, model=None, ens_select=None, interp_select=None, on_invalid=None, api_base_url=DEFAULT_API_BASE_URL, request_type='GET', cluster_select=None, **kwargs): startdate = sanitize_datetime(startdate) enddate = sanitize_datetime(enddate) url_params_dict = parse_time_series_params(model=model, ens_select=ens_select, cluster_select=cluster_select, interp_select=interp_select, on_invalid=on_invalid, kwargs=kwargs) coordinates = build_coordinates_str_for_polygon(latlon_tuple_lists, aggregation, operator) url = POLYGON_TEMPLATE.format( api_base_url=api_base_url, coordinates_aggregation=coordinates, startdate=startdate.isoformat(), enddate=enddate.isoformat(), interval=isodate.duration_isoformat(interval), parameters=",".join(parameters), urlParams="&".join(["{}={}".format(k, v) for k, v in url_params_dict.items()]) ) response = query_api(url, username, password, request_type=request_type) df = convert_polygon_response_to_df(response.text) return df def query_lightnings(startdate, enddate, lat_N, lon_W, lat_S, lon_E, username, password, api_base_url=DEFAULT_API_BASE_URL, request_type='GET'): startdate = sanitize_datetime(startdate) enddate = sanitize_datetime(enddate) url = LIGHTNING_TEMPLATE.format( api_base_url=api_base_url, startdate=startdate.isoformat(), enddate=enddate.isoformat(), lat_N=lat_N, lon_W=lon_W, lat_S=lat_S, lon_E=lon_E ) headers = {'Accept': 'text/csv'} response = query_api(url, username, password, request_type=request_type, headers=headers) return convert_lightning_response_to_df(response.text) def query_netcdf(filename, startdate, enddate, interval, parameter_netcdf, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon, username, password, model=None, ens_select=None, interp_select=None, api_base_url=DEFAULT_API_BASE_URL, request_type='GET', cluster_select=None): startdate = sanitize_datetime(startdate) enddate = sanitize_datetime(enddate) url_params_dict = parse_time_series_params(model=model, ens_select=ens_select, cluster_select=cluster_select, interp_select=interp_select) url = NETCDF_TEMPLATE.format( api_base_url=api_base_url, startdate=startdate.isoformat(), enddate=enddate.isoformat(), interval=isodate.duration_isoformat(interval), parameter_netcdf=parameter_netcdf, lat_N=lat_N, lon_W=lon_W, lat_S=lat_S, lon_E=lon_E, res_lat=res_lat, res_lon=res_lon, urlParams="&".join(["{}={}".format(k, v) for k, v in url_params_dict.items()]) ) headers = {'Accept': 'application/netcdf'} response = query_api(url, username, password, request_type=request_type, headers=headers) create_path(filename) with open(filename, 'wb') as f: _logger.debug('Create File {}'.format(filename)) for chunk in response.iter_content(chunk_size=1024): f.write(chunk) return def query_init_date(startdate, enddate, interval, parameter, username, password, model, api_base_url=DEFAULT_API_BASE_URL): startdate = sanitize_datetime(startdate) enddate = sanitize_datetime(enddate) interval_string = "{}--{}:{}".format(startdate.isoformat(), enddate.isoformat(), isodate.duration_isoformat(interval)) url = INIT_DATE_TEMPLATE.format(api_base_url=api_base_url, model=model, interval_string=interval_string, parameter=parameter) headers = {'Accept': 'text/csv'} response = query_api(url, username, password, request_type='GET', headers=headers) try: df = pd.read_csv( StringIO(response.text), sep=";", header=0, encoding="utf-8", index_col=0, na_values=["0000-00-00T00:00:00Z"], parse_dates=[0, 1] ) except Exception: raise WeatherApiException(response.text) df = localize_datenum(df) return df def query_available_time_ranges(parameters, username, password, model, api_base_url=DEFAULT_API_BASE_URL): url = AVAILABLE_TIME_RANGES_TEMPLATE.format(api_base_url=api_base_url, model=model, parameters=",".join(parameters)) headers = {'Accept': 'text/csv'} response = query_api(url, username, password, request_type='GET', headers=headers) try: df = pd.read_csv( StringIO(response.text), sep=";", header=0, encoding="utf-8", index_col=0, na_values=["0000-00-00T00:00:00Z"], parse_dates=['min_date', 'max_date'] ) except Exception: raise WeatherApiException(response.text) return df
MIT License
harmon758/harmonbot
Discord/cogs/search.py
Search.baidu
python
async def baidu(self, ctx, *search: str): await ctx.embed_reply(f"[Baidu search for \"{' '.join(search)}\"](http://www.baidu.com/s?wd={'+'.join(search)})")
Search with Baidu
https://github.com/harmon758/harmonbot/blob/def3849beabdaea5e0f9c594dcf6d6d8980782bd/Discord/cogs/search.py#L77-L79
import discord from discord.ext import commands import functools import inspect import re import youtube_dl from utilities import checks def setup(bot): bot.add_cog(Search(bot)) class Search(commands.Cog): def __init__(self, bot): self.bot = bot for name, command in inspect.getmembers(self): if isinstance(command, commands.Command) and command.parent is None and name != "search": self.bot.add_command(command) self.search.add_command(command) command = commands.Command(self.youtube, aliases = ["yt"], checks = [checks.not_forbidden().predicate]) command.error(self.youtube_error) self.search.add_command(command) if (cog := self.bot.get_cog("Audio")) and (parent := getattr(cog, "audio")): command = commands.Command(self.youtube, name = "search", checks = [checks.not_forbidden().predicate]) command.error(self.youtube_error) parent.add_command(command) def cog_unload(self): if (cog := self.bot.get_cog("Audio")) and (parent := getattr(cog, "audio")): parent.remove_command("search") async def cog_check(self, ctx): return await checks.not_forbidden().predicate(ctx) @commands.group(invoke_without_command = True, case_insensitive = True) async def search(self, ctx): await ctx.embed_reply(":grey_question: Search what?") async def youtube(self, ctx, *, search: str): ydl = youtube_dl.YoutubeDL({"default_search": "auto", "noplaylist": True, "quiet": True}) func = functools.partial(ydl.extract_info, search, download = False) info = await self.bot.loop.run_in_executor(None, func) if not info.get("entries"): return await ctx.embed_reply(f"{ctx.bot.error_emoji} Video not found") await ctx.message.reply(info["entries"][0].get("webpage_url")) async def youtube_error(self, ctx, error): if isinstance(error, commands.CommandInvokeError) and isinstance(error.original, youtube_dl.utils.DownloadError): await ctx.embed_reply(f":no_entry: Error: {error.original}") @commands.command() async def amazon(self, ctx, *search: str): await ctx.embed_reply(f"[Amazon search for \"{' '.join(search)}\"](https://smile.amazon.com/s/?field-keywords={'+'.join(search)})") @commands.command() async def aol(self, ctx, *search: str): await ctx.embed_reply(f"[AOL search for \"{' '.join(search)}\"](https://search.aol.com/aol/search?q={'+'.join(search)})") @commands.command(name = "ask.com") async def ask_com(self, ctx, *search: str): await ctx.embed_reply(f"[Ask.com search for \"{' '.join(search)}\"](http://www.ask.com/web?q={'+'.join(search)})") @commands.command()
MIT License
nxt-dev/nxt
nxt/session.py
RPCServerProcess.terminate
python
def terminate(self): try: proxy = NxtClient() proxy.kill() except Exception as e: if getattr(e, 'errno', -1) == 10061 and self.terminal: try: logger.info('Telling rpc server to shutdown...') proxy = NxtClient() proxy.kill() self.server_log_file = '' except: logger.warning( 'Unable to tell rpc server to shutdown!') try: self.terminal.terminate() self.terminal = None logger.debug('RPC server has shutdown!') except: logger.warning('Unable to kill rpc process!') nxt_log.shutdown_log_socket()
Attempts to kill the rpc server. Note: The server is killed even if it isn't owned by this class! :return: None
https://github.com/nxt-dev/nxt/blob/272e41dbfc963178a3509761bf244739a9bc3bd5/nxt/session.py#L370-L395
import os import logging import subprocess import sys import time import socket from nxt import nxt_log from . import nxt_io from . import nxt_path from . import nxt_layer from nxt.remote import get_running_server_address from nxt.remote.client import NxtClient from .remote import contexts from nxt.stage import Stage from nxt.constants import NXT_DCC_ENV_VAR, is_standalone logger = logging.getLogger(__name__) class Session(object): def __init__(self): self._loaded_files = {} self.log_file = nxt_log.make_session_log() self.rpc_server = None @property def loaded_files(self): return self._loaded_files def new_file(self): new_graph = Stage(name=self.get_unused_graph_name('untitled')) self._loaded_files[new_graph.uid] = new_graph return new_graph def get_unused_graph_name(self, name): def test_name(n): for stage in self._loaded_files.values(): if n == stage._name: return False return True result_name = '' num_suffix = 0 potential_name = name while result_name == '': if test_name(potential_name): result_name = potential_name else: num_suffix += 1 potential_name = name + str(num_suffix) return result_name def load_file(self, filepath): try: layer_data = nxt_io.load_file_data(filepath) new_stage = Stage(layer_data=layer_data) except IOError: logger.exception('Failed to open: "{}"'.format(filepath)) raise self._loaded_files[new_stage.uid] = new_stage return new_stage def unload_file(self, uid): unload_key = [k for k in self.loaded_files.keys() if k == uid] uid_lst = [k for k in self.loaded_files.keys()] if not unload_key: err_format = ('Unable to find a graph with the uuid {uid} ' 'exsisting uuids are {uidLst}') raise LookupError(err_format.format(uid=uid, uidLst=uid_lst)) unload_key = unload_key[0] self.loaded_files.pop(unload_key) def save_file(self, graph_file_path, new_path=None): target_path = graph_file_path if new_path: target_path = os.path.normcase(new_path) current_path = None if graph_file_path: current_path = os.path.normcase(graph_file_path) if current_path in (k[0] for k in self._loaded_files.keys()): self._loaded_files[current_path].save_layer(filepath=target_path) if current_path != target_path: changed_graph = self._loaded_files.pop(current_path) self._loaded_files[target_path] = changed_graph return True else: return False def save_layer(self, layer, filepath=None): layer.save(filepath=filepath) def get_stage(self, path): norm_path = os.path.normcase(path) for uid, data in self._loaded_files.items(): if data.filepath == norm_path: return self._loaded_files[uid] else: return self.load_file(path) def execute_graph(self, filepath, start=None, parameters=None, context=None): stage = self.get_stage(filepath) if context is None: self.start_rpc_if_needed(stage) try: return stage.execute(start=start, parameters=parameters) finally: self.shutdown_rpc_server() else: self._start_rpc_server() proxy = NxtClient() try: cache_file = proxy.exec_in_headless(filepath, start, None, parameters, context) return nxt_layer.CacheLayer.load_from_filepath(cache_file) finally: self.shutdown_rpc_server() def execute_nodes(self, filepath, node_paths, parameters=None): stage = self.get_stage(filepath) self.start_rpc_if_needed(stage) comp_layer = stage.build_stage() try: return stage.execute_nodes(node_paths=node_paths, layer=comp_layer, parameters=parameters) finally: self.shutdown_rpc_server() def start_rpc_if_needed(self, stage): remote_node_name = contexts.REMOTE_CONTEXT_BUILTIN_NODE remote_path = nxt_path.join_node_paths(nxt_path.NODE_SEP, remote_node_name) sub_graph_node_name = contexts.SUB_GRAPH_BUILTIN_NODE sub_graph_path = nxt_path.join_node_paths(nxt_path.NODE_SEP, sub_graph_node_name) start_rpc = False for layer in stage._sub_layers: if layer.lookup(remote_path) or layer.lookup(sub_graph_path): start_rpc = True break if start_rpc: self._start_rpc_server() return logger.info('It was determined you do not need an rpc server.') def _start_rpc_server(self, custom_stdout=False, rpc_log_filepath=None, socket_log=False, stream_handler=None): logger.debug('Starting rpc server!') rpc_server = RPCServerProcess.start(use_custom_stdout=custom_stdout, stdout_filepath=rpc_log_filepath, socket_log=socket_log, stream_handler=stream_handler) self.rpc_server = rpc_server def shutdown_rpc_server(self): if not self.rpc_server: logger.debug('This session does not own an rpc server.') return logger.debug('Trying to shutdown rpc server...') try: self.rpc_server.terminate() except: logging.exception('Failed to shut down rpc server!') pass self.rpc_server = None class RPCServerProcess(object): STARTUP_TIMEOUT = 5 def __init__(self, use_custom_stdout=False, stdout_filepath=None, socket_log=False, stream_handler=None): self.terminal = None self.use_custom_stdout = use_custom_stdout self.server_log_file = stdout_filepath or '' self.socket_logging = socket_log self.stream_handler = stream_handler @classmethod def start(cls, use_custom_stdout=False, stdout_filepath=None, socket_log=False, stream_handler=None): if not is_standalone(): logger.warning('The nxt rpc server cannot be started unless nxt ' 'is launched as standalone.') return rpc_server = cls(use_custom_stdout=use_custom_stdout, stdout_filepath=stdout_filepath, socket_log=socket_log, stream_handler=stream_handler) if rpc_server.run(): return rpc_server def run(self): if self.is_running(): logger.info('Server already running somewhere...') return False elif not self.is_port_available(): _, port = get_running_server_address(as_str=False) raise OSError('Port {} is not available!'.format(port)) old_env_verbosity = os.environ.get(nxt_log.VERBOSE_ENV_VAR, None) if self.socket_logging: os.environ[nxt_log.VERBOSE_ENV_VAR] = 'socket' if self.use_custom_stdout: if not self.server_log_file: _ext = '.nxtlog' self.server_log_file = nxt_io.generate_temp_file(suffix=_ext) nxt_log.track_log_file(self.server_log_file) server_log_handle = open(self.server_log_file, 'w') nxt_log.startup_log_socket(stream_handler=self.stream_handler) else: server_log_handle = None call = [sys.executable, '-m', 'nxt.remote.server', self.server_log_file] logger.debug("Calling: ") logger.debug(str(call)) self.terminal = subprocess.Popen(call, stdout=server_log_handle, stderr=server_log_handle) count = 0 while count < self.STARTUP_TIMEOUT: logger.debug('Waiting on rpc server...') time.sleep(1) count += 1 if self.is_running(): break if count == self.STARTUP_TIMEOUT: raise OSError('Failed to start RPC server!') logger.debug('rpc server started') if old_env_verbosity is not None: os.environ[nxt_log.VERBOSE_ENV_VAR] = old_env_verbosity return True def is_running(self): is_running = False try: proxy = NxtClient() is_running = proxy.is_alive() except Exception as e: if getattr(e, 'errno', -1) == 10061 and self.terminal: poll = self.terminal.poll() if poll is None: is_running = True return is_running
MIT License
dials/dials
algorithms/refinement/parameterisation/prediction_parameters.py
XYPhiPredictionParameterisation._goniometer_derivatives
python
def _goniometer_derivatives( self, isel, parameterisation=None, dS_dgon_p=None, reflections=None ): axis = self._axis.select(isel) fixed_rotation = self._fixed_rotation.select(isel) phi_calc = self._phi_calc.select(isel) h = self._h.select(isel) s1 = self._s1.select(isel) e_X_r = self._e_X_r.select(isel) e_r_s0 = self._e_r_s0.select(isel) UB = self._UB.select(isel) D = self._D.select(isel) if dS_dgon_p is None: dS_dgon_p = [ None if der is None else flex.mat3_double(len(isel), der.elems) for der in parameterisation.get_ds_dp(use_none_as_null=True) ] dphi_dp = [] dpv_dp = [] for der in dS_dgon_p: if der is None: dphi_dp.append(None) dpv_dp.append(None) continue tmp = fixed_rotation * (UB * h) dr = der * tmp.rotate_around_origin(axis, phi_calc) dphi = -1.0 * dr.dot(s1) / e_r_s0 dphi_dp.append(dphi) dpv_dp.append(D * (dr + e_X_r * dphi)) return dpv_dp, dphi_dp
helper function to extend the derivatives lists by derivatives of the goniometer parameterisations
https://github.com/dials/dials/blob/a2cb71bf410e179b92554bcce2e21388e1dc25d1/algorithms/refinement/parameterisation/prediction_parameters.py#L857-L904
from collections import namedtuple from scitbx import matrix, sparse from dials.algorithms.refinement import DialsRefineConfigError from dials.array_family import flex ParamSet = namedtuple( "ParamSet", ["beam_param", "xl_ori_param", "xl_uc_param", "det_param", "gonio_param"], ) class PredictionParameterisation: def __init__( self, experiments, detector_parameterisations=None, beam_parameterisations=None, xl_orientation_parameterisations=None, xl_unit_cell_parameterisations=None, goniometer_parameterisations=None, ): if detector_parameterisations is None: detector_parameterisations = [] if beam_parameterisations is None: beam_parameterisations = [] if xl_orientation_parameterisations is None: xl_orientation_parameterisations = [] if xl_unit_cell_parameterisations is None: xl_unit_cell_parameterisations = [] if goniometer_parameterisations is None: goniometer_parameterisations = [] self._experiments = experiments self._detector_parameterisations = detector_parameterisations self._beam_parameterisations = beam_parameterisations self._xl_orientation_parameterisations = xl_orientation_parameterisations self._xl_unit_cell_parameterisations = xl_unit_cell_parameterisations self._goniometer_parameterisations = goniometer_parameterisations self._update() def _update(self): self._length = self._len() if self._length == 0: raise DialsRefineConfigError("There are no free parameters for refinement") e2bp = { ids: i for i, p in enumerate(self._beam_parameterisations) for ids in p.get_experiment_ids() } e2xop = { ids: i for i, p in enumerate(self._xl_orientation_parameterisations) for ids in p.get_experiment_ids() } e2xucp = { ids: i for i, p in enumerate(self._xl_unit_cell_parameterisations) for ids in p.get_experiment_ids() } e2dp = { ids: i for i, p in enumerate(self._detector_parameterisations) for ids in p.get_experiment_ids() } e2gp = { ids: i for i, p in enumerate(self._goniometer_parameterisations) for ids in p.get_experiment_ids() } self._exp_to_param = { i: ParamSet( e2bp.get(i), e2xop.get(i), e2xucp.get(i), e2dp.get(i), e2gp.get(i) ) for i, _ in enumerate(self._experiments) } def get_detector_parameterisations(self): return self._detector_parameterisations def get_beam_parameterisations(self): return self._beam_parameterisations def get_crystal_orientation_parameterisations(self): return self._xl_orientation_parameterisations def get_crystal_unit_cell_parameterisations(self): return self._xl_unit_cell_parameterisations def get_goniometer_parameterisations(self): return self._goniometer_parameterisations def _len(self): length = 0 for model in self._detector_parameterisations: length += model.num_free() for model in self._beam_parameterisations: length += model.num_free() for model in self._xl_orientation_parameterisations: length += model.num_free() for model in self._xl_unit_cell_parameterisations: length += model.num_free() for model in self._goniometer_parameterisations: length += model.num_free() return length def __len__(self): return self._length def get_param_vals(self): global_p_list = [] if self._detector_parameterisations: det_plists = [x.get_param_vals() for x in self._detector_parameterisations] params = [x for l in det_plists for x in l] global_p_list.extend(params) if self._beam_parameterisations: src_plists = [x.get_param_vals() for x in self._beam_parameterisations] params = [x for l in src_plists for x in l] global_p_list.extend(params) if self._xl_orientation_parameterisations: xlo_plists = [ x.get_param_vals() for x in self._xl_orientation_parameterisations ] params = [x for l in xlo_plists for x in l] global_p_list.extend(params) if self._xl_unit_cell_parameterisations: xluc_plists = [ x.get_param_vals() for x in self._xl_unit_cell_parameterisations ] params = [x for l in xluc_plists for x in l] global_p_list.extend(params) if self._goniometer_parameterisations: gon_plists = [ x.get_param_vals() for x in self._goniometer_parameterisations ] params = [x for l in gon_plists for x in l] global_p_list.extend(params) return global_p_list def get_param_names(self): param_names = [] for p in self._detector_parameterisations: prefix = p.model_identifier param_names.extend([prefix + x for x in p.get_param_names()]) for p in self._beam_parameterisations: prefix = p.model_identifier param_names.extend([prefix + x for x in p.get_param_names()]) for p in self._xl_orientation_parameterisations: prefix = p.model_identifier param_names.extend([prefix + x for x in p.get_param_names()]) for p in self._xl_unit_cell_parameterisations: prefix = p.model_identifier param_names.extend([prefix + x for x in p.get_param_names()]) for p in self._goniometer_parameterisations: prefix = p.model_identifier param_names.extend([prefix + x for x in p.get_param_names()]) return param_names def _modify_parameters(self, vals, set_vals=False, set_esds=False, set_fix=False): assert [set_vals, set_esds, set_fix].count(True) == 1 assert len(vals) == len(self) it = iter(vals) for model in ( self._detector_parameterisations + self._beam_parameterisations + self._xl_orientation_parameterisations + self._xl_unit_cell_parameterisations + self._goniometer_parameterisations ): tmp = [next(it) for i in range(model.num_free())] if set_esds: model.set_param_esds(tmp) elif set_vals: model.set_param_vals(tmp) elif set_fix: current_fixes = model.get_fixed() free_indices = [i for i, e in enumerate(current_fixes) if not e] assert len(free_indices) == model.num_free() for i, fix in zip(free_indices, tmp): if fix: current_fixes[i] = True model.set_fixed(current_fixes) def set_param_vals(self, vals): self._modify_parameters(vals, set_vals=True) def set_param_esds(self, esds): self._modify_parameters(esds, set_esds=True) def fix_params(self, fix): self._modify_parameters(fix, set_fix=True) self._update() def calculate_model_state_uncertainties(self, var_cov): i = 0 for model in ( self._detector_parameterisations + self._beam_parameterisations + self._xl_orientation_parameterisations + self._xl_unit_cell_parameterisations + self._goniometer_parameterisations ): n = model.num_free() sub = var_cov.matrix_copy_block(i, i, n, n) state_covs = model.calculate_state_uncertainties(sub) if state_covs is None: continue if len(state_covs) == 1: model.set_state_uncertainties(state_covs[0]) else: for i_state, state_cov in enumerate(state_covs): model.set_state_uncertainties(state_cov, multi_state_elt=i_state) i += n def get_gradients(self, reflections, callback=None): self._nref = len(reflections) self._D = flex.mat3_double(self._nref) self._s0 = flex.vec3_double(self._nref) self._U = flex.mat3_double(self._nref) self._B = flex.mat3_double(self._nref) self._axis = flex.vec3_double(self._nref) self._fixed_rotation = flex.mat3_double(self._nref) self._setting_rotation = flex.mat3_double(self._nref) self._experiment_to_idx = [] for iexp, exp in enumerate(self._experiments): sel = reflections["id"] == iexp isel = sel.iselection() self._experiment_to_idx.append(isel) subref = reflections.select(sel) states = self._get_model_data_for_experiment(exp, subref) self._D.set_selected(sel, states["D"]) self._s0.set_selected(sel, states["s0"]) self._U.set_selected(sel, states["U"]) self._B.set_selected(sel, states["B"]) if exp.goniometer: self._setting_rotation.set_selected(sel, states["S"]) self._axis.set_selected(sel, exp.goniometer.get_rotation_axis_datum()) self._fixed_rotation.set_selected( sel, exp.goniometer.get_fixed_rotation() ) self._h = reflections["miller_index"].as_vec3_double() self._UB = self._U * self._B self._s1 = reflections["s1"] self._pv = self._D * self._s1 u, v, w = self._pv.parts() assert w.all_ne(0) self._w_inv = 1.0 / w self._u_w_inv = u * self._w_inv self._v_w_inv = v * self._w_inv self._iparam = 0 self._local_setup(reflections) results = [] results = self._grads_detector_loop(reflections, results, callback=callback) results = self._grads_beam_loop(reflections, results, callback=callback) results = self._grads_xl_orientation_loop( reflections, results, callback=callback ) results = self._grads_xl_unit_cell_loop(reflections, results, callback=callback) results = self._grads_goniometer_loop(reflections, results, callback=callback) return results @staticmethod def _extend_gradient_vectors(results, m, n, keys=("dX_dp", "dY_dp", "dZ_dp")): new_results = [] for i in range(n): result = {} for key in keys: result[key] = flex.double(m, 0.0) new_results.append(result) results.extend(new_results) return results def _get_model_data_for_experiment(self, experiment, reflections): D = flex.mat3_double(len(reflections)) panels = reflections["panel"] for ipanel, D_mat in enumerate([p.get_D_matrix() for p in experiment.detector]): sel = panels == ipanel D.set_selected(sel, D_mat) result = { "s0": experiment.beam.get_s0(), "U": matrix.sqr(experiment.crystal.get_U()), "B": matrix.sqr(experiment.crystal.get_B()), "D": D, } if experiment.goniometer: result["S"] = matrix.sqr(experiment.goniometer.get_setting_rotation()) return result def _detector_derivatives( self, isel, panel_id, parameterisation=None, dd_ddet_p=None, reflections=None ): pv = self._pv.select(isel) D = self._D.select(isel) if dd_ddet_p is None: dd_ddet_p = parameterisation.get_ds_dp( multi_state_elt=panel_id, use_none_as_null=True ) dd_ddet_p = [ None if e is None else flex.mat3_double(len(D), e.elems) for e in dd_ddet_p ] dpv_ddet_p = [ der if der is None else (D * (der * -1.0)) * pv for der in dd_ddet_p ] return dpv_ddet_p def _beam_derivatives( self, isel, parameterisation=None, ds0_dbeam_p=None, reflections=None ): pass def _xl_orientation_derivatives( self, isel, parameterisation=None, dU_dxlo_p=None, reflections=None ): pass def _xl_unit_cell_derivatives( self, isel, parameterisation=None, dB_dxluc_p=None, reflections=None ): pass def _goniometer_derivatives( self, isel, parameterisation=None, dS_dgon_p=None, reflections=None ): pass def _grads_detector_loop(self, reflections, results, callback=None): for dp in self._detector_parameterisations: isel = flex.size_t() for exp_id in dp.get_experiment_ids(): isel.extend(self._experiment_to_idx[exp_id]) detector = dp.get_model() panel = reflections["panel"].select(isel) results = self._extend_gradient_vectors( results, self._nref, dp.num_free(), keys=self._grad_names ) for panel_id, _ in enumerate(detector): sub_isel = isel.select(panel == panel_id) if len(sub_isel) == 0: continue dpv_ddet_p = self._detector_derivatives( sub_isel, panel_id, parameterisation=dp, reflections=reflections ) sub_w_inv = self._w_inv.select(sub_isel) sub_u_w_inv = self._u_w_inv.select(sub_isel) sub_v_w_inv = self._v_w_inv.select(sub_isel) dX_ddet_p, dY_ddet_p = self._calc_dX_dp_and_dY_dp_from_dpv_dp( sub_w_inv, sub_u_w_inv, sub_v_w_inv, dpv_ddet_p ) iparam = self._iparam for dX, dY in zip(dX_ddet_p, dY_ddet_p): if dX is not None: results[iparam][self._grad_names[0]].set_selected(sub_isel, dX) if dY is not None: results[iparam][self._grad_names[1]].set_selected(sub_isel, dY) iparam += 1 if callback is not None: iparam = self._iparam for i in range(dp.num_free()): results[iparam] = callback(results[iparam]) iparam += 1 self._iparam += dp.num_free() return results def _grads_model_loop( self, parameterisations, reflections, results, callback=None, derivatives_fn=None, ): for p in parameterisations: isel = flex.size_t() for exp_id in p.get_experiment_ids(): isel.extend(self._experiment_to_idx[exp_id]) results = self._extend_gradient_vectors( results, self._nref, p.num_free(), keys=self._grad_names ) if len(isel) == 0: if callback: for _ in range(p.num_free()): results[self._iparam] = callback(results[self._iparam]) self._iparam += 1 else: self._iparam += p.num_free() continue w_inv = self._w_inv.select(isel) u_w_inv = self._u_w_inv.select(isel) v_w_inv = self._v_w_inv.select(isel) dpv_dbeam_p, dAngle_dbeam_p = derivatives_fn( isel, parameterisation=p, reflections=reflections ) dX_dbeam_p, dY_dbeam_p = self._calc_dX_dp_and_dY_dp_from_dpv_dp( w_inv, u_w_inv, v_w_inv, dpv_dbeam_p ) for dX, dY, dAngle in zip(dX_dbeam_p, dY_dbeam_p, dAngle_dbeam_p): if dX is not None: results[self._iparam][self._grad_names[0]].set_selected(isel, dX) if dY is not None: results[self._iparam][self._grad_names[1]].set_selected(isel, dY) if dAngle is not None: results[self._iparam][self._grad_names[2]].set_selected( isel, dAngle ) if callback is not None: results[self._iparam] = callback(results[self._iparam]) self._iparam += 1 return results def _grads_beam_loop(self, reflections, results, callback=None): return self._grads_model_loop( self._beam_parameterisations, reflections, results, derivatives_fn=self._beam_derivatives, callback=callback, ) def _grads_xl_orientation_loop(self, reflections, results, callback=None): return self._grads_model_loop( self._xl_orientation_parameterisations, reflections, results, derivatives_fn=self._xl_orientation_derivatives, callback=callback, ) def _grads_xl_unit_cell_loop(self, reflections, results, callback=None): return self._grads_model_loop( self._xl_unit_cell_parameterisations, reflections, results, derivatives_fn=self._xl_unit_cell_derivatives, callback=callback, ) def _grads_goniometer_loop(self, reflections, results, callback=None): return self._grads_model_loop( self._goniometer_parameterisations, reflections, results, derivatives_fn=self._goniometer_derivatives, callback=callback, ) class SparseGradientVectorMixin: @staticmethod def _extend_gradient_vectors(results, m, n, keys=("dX_dp", "dY_dp", "dZ_dp")): new_results = [{key: sparse.matrix_column(m) for key in keys} for _ in range(n)] results.extend(new_results) return results class XYPhiPredictionParameterisation(PredictionParameterisation): _grad_names = ("dX_dp", "dY_dp", "dphi_dp") def _local_setup(self, reflections): self._phi_calc = reflections["xyzcal.mm"].parts()[2] q = self._fixed_rotation * (self._UB * self._h) self._r = self._setting_rotation * q.rotate_around_origin( self._axis, self._phi_calc ) self._e_X_r = (self._setting_rotation * self._axis).cross(self._r) self._e_r_s0 = (self._e_X_r).dot(self._s0) e_r_s0_mag = flex.abs(self._e_r_s0) try: assert flex.min(e_r_s0_mag) > 1.0e-6 except AssertionError as e: imin = flex.min_index(e_r_s0_mag) print("(e X r).s0 too small:") print("for", (e_r_s0_mag <= 1.0e-6).count(True), "reflections") print("out of", len(e_r_s0_mag), "total") print("such as", reflections["miller_index"][imin]) print("with scattering vector", reflections["s1"][imin]) print("where r =", self._r[imin]) print("e =", self._axis[imin]) print("s0 =", self._s0[imin]) print("this reflection forms angle with the equatorial plane " "normal:") vecn = ( matrix.col(self._s0[imin]) .cross(matrix.col(self._axis[imin])) .normalize() ) print(matrix.col(reflections["s1"][imin]).accute_angle(vecn)) raise e def _beam_derivatives( self, isel, parameterisation=None, ds0_dbeam_p=None, reflections=None ): r = self._r.select(isel) e_X_r = self._e_X_r.select(isel) e_r_s0 = self._e_r_s0.select(isel) D = self._D.select(isel) if ds0_dbeam_p is None: ds0_dbeam_p = parameterisation.get_ds_dp(use_none_as_null=True) ds0_dbeam_p = [ None if e is None else flex.vec3_double(len(r), e.elems) for e in ds0_dbeam_p ] dphi_dp = [] dpv_dp = [] for der in ds0_dbeam_p: if der is None: dphi_dp.append(None) dpv_dp.append(None) continue dphi = (r.dot(der) / e_r_s0) * -1.0 dphi_dp.append(dphi) dpv_dp.append(D * (e_X_r * dphi + der)) return dpv_dp, dphi_dp def _xl_derivatives(self, isel, derivatives, b_matrix, parameterisation=None): axis = self._axis.select(isel) fixed_rotation = self._fixed_rotation.select(isel) setting_rotation = self._setting_rotation.select(isel) phi_calc = self._phi_calc.select(isel) h = self._h.select(isel) s1 = self._s1.select(isel) e_X_r = self._e_X_r.select(isel) e_r_s0 = self._e_r_s0.select(isel) if b_matrix: B = self._B.select(isel) else: U = self._U.select(isel) D = self._D.select(isel) if derivatives is None: derivatives = [ None if der is None else flex.mat3_double(len(isel), der.elems) for der in parameterisation.get_ds_dp(use_none_as_null=True) ] dphi_dp = [] dpv_dp = [] for der in derivatives: if der is None: dphi_dp.append(None) dpv_dp.append(None) continue if b_matrix: tmp = fixed_rotation * (der * B * h) else: tmp = fixed_rotation * (U * der * h) dr = setting_rotation * tmp.rotate_around_origin(axis, phi_calc) dphi = -1.0 * dr.dot(s1) / e_r_s0 dphi_dp.append(dphi) dpv_dp.append(D * (dr + e_X_r * dphi)) return dpv_dp, dphi_dp def _xl_orientation_derivatives( self, isel, parameterisation=None, dU_dxlo_p=None, reflections=None ): return self._xl_derivatives( isel, dU_dxlo_p, b_matrix=True, parameterisation=parameterisation ) def _xl_unit_cell_derivatives( self, isel, parameterisation=None, dB_dxluc_p=None, reflections=None ): return self._xl_derivatives( isel, dB_dxluc_p, b_matrix=False, parameterisation=parameterisation )
BSD 3-Clause New or Revised License
almost-matching-exactly/dame-flame-python-package
dame_flame/matching.py
FLAME.predict
python
def predict(self, input_data, pre_dame=False, C=0.1): self.input_data, self.holdout_data = data_cleaning.read_files( input_data, self.holdout_data) return_array = _FLAME(self.input_data.copy(deep=True), self.holdout_data.copy(deep=True), self.treatment_column_name, self.weight_array,self.outcome_column_name, self.adaptive_weights, self.alpha, self.repeats, self.verbose, self.want_pe, self.early_stop_iterations, self.stop_unmatched_c, self.early_stop_un_c_frac, self.stop_unmatched_t, self.early_stop_un_t_frac, self.early_stop_pe, self.early_stop_pe_frac, self.want_bf, self.missing_indicator, self.missing_data_replace, self.missing_holdout_replace, self.missing_holdout_imputations, self.missing_data_imputations, pre_dame, C) self.bf_each_iter = None self.pe_each_iter = None if self.missing_data_replace != 3 and not pre_dame: self.df_units_and_covars_matched = return_array[0] self.groups_per_unit = self.df_units_and_covars_matched['weights'] self.df_units_and_covars_matched = self.df_units_and_covars_matched.drop(columns=['weights']) self.units_per_group = return_array[1] if self.want_pe: self.pe_each_iter = return_array[2] if self.want_bf: self.bf_each_iter = return_array[-1] if (self.missing_data_replace != 3 and pre_dame==False): self.df_units_and_covars_matched = return_array[0] self.groups_per_unit = self.df_units_and_covars_matched['weights'] self.df_units_and_covars_matched = self.df_units_and_covars_matched.drop(columns = ['weights']) self.units_per_group = return_array[1] if (self.want_pe == True): self.pe_each_iter = return_array[2] if (self.want_bf == True): self.bf_each_iter = return_array[-1] elif pre_dame: self.df_units_and_covars_matched = return_array[0] self.df_units_and_covars_matched = self.df_units_and_covars_matched.append(return_array[-1][0], sort=True) if self.repeats == True: df_wrepeats = self.df_units_and_covars_matched df_no_repeats = df_wrepeats[~df_wrepeats.index.duplicated(keep='first')].copy() matched_twice_ind = df_wrepeats[df_wrepeats.index.duplicated()].index.unique() for index in matched_twice_ind: df_no_repeats.loc[index, 'weights'] = df_wrepeats.loc[index].copy()['weights'].sum() self.df_units_and_covars_matched = df_no_repeats self.df_units_and_covars_matched.replace(np.nan, "*") self.groups_per_unit = self.df_units_and_covars_matched['weights'] self.df_units_and_covars_matched = self.df_units_and_covars_matched.drop(columns = ['weights']) self.units_per_group = return_array[1] self.units_per_group += return_array[-1][1] if (self.want_pe == True): self.pe_each_iter = return_array[2] self.pe_each_iter += return_array[-1][2] if (self.want_bf == True): self.bf_each_iter = return_array[-2] self.bf_each_iter += return_array[-1][-2] else: self.df_units_and_covars_matched = [] self.groups_per_unit = [] self.df_units_and_covars_matched = [] self.units_per_group = [] self.pe_each_iter = [] self.bf_each_iter = [] for return_val in return_array: self.df_units_and_covars_matched.append(return_val[0].drop(columns = ['weights'])) self.groups_per_unit.append(return_val[0]['weights']) self.units_per_group.append(return_val[1]) if self.want_pe: self.pe_each_iter.append(return_val[2]) if self.want_bf: self.bf_each_iter.append(return_val[-1]) return self.df_units_and_covars_matched
Performs match and returns matched data. Parameters ---------- input_data: {string, dataframe}, required parameter The dataframe on which to perform the matching, or the location of the CSV with the dataframe pre_dame (int, False): Indicates whether to switch to dame and after int number of iterations. C (float, 0.1): The tradeoff between PE and BF in computing MQ
https://github.com/almost-matching-exactly/dame-flame-python-package/blob/b2e53a154ff514fbcbead2e8be36a8c769035015/dame_flame/matching.py#L193-L297
import numpy as np from . import data_cleaning from . import dame_algorithm from . import flame_algorithm from . import flame_dame_helpers class MatchParent: def __init__(self, adaptive_weights='ridge', alpha=0.1, repeats=True, verbose=2, early_stop_iterations=False, stop_unmatched_c=False, early_stop_un_c_frac=False, stop_unmatched_t=False, early_stop_un_t_frac=False, early_stop_pe=True, early_stop_pe_frac=0.05, missing_indicator=np.nan, missing_data_replace=0, missing_holdout_replace=0, missing_holdout_imputations=10, missing_data_imputations=1, want_pe=False, want_bf=False): self.adaptive_weights = adaptive_weights self.alpha = alpha self.repeats = repeats self.verbose = verbose self.missing_indicator = missing_indicator self.missing_data_replace = missing_data_replace self.missing_holdout_replace = missing_holdout_replace self.missing_holdout_imputations = missing_holdout_imputations self.missing_data_imputations = missing_data_imputations self.early_stop_iterations = early_stop_iterations self.stop_unmatched_c = stop_unmatched_c self.early_stop_un_c_frac = early_stop_un_c_frac self.stop_unmatched_t = stop_unmatched_t self.early_stop_un_t_frac = early_stop_un_t_frac self.early_stop_pe = early_stop_pe self.early_stop_pe_frac = early_stop_pe_frac self.want_pe = want_pe self.want_bf = want_bf def fit(self, holdout_data=False, treatment_column_name='treated', outcome_column_name='outcome', weight_array=False): self.holdout_data = holdout_data self.treatment_column_name = treatment_column_name self.outcome_column_name = outcome_column_name self.weight_array = weight_array class DAME(MatchParent): def predict(self, input_data): self.input_data, self.holdout_data = data_cleaning.read_files( input_data, self.holdout_data) return_array = _DAME(self.input_data.copy(deep=True), self.holdout_data.copy(deep=True), self.treatment_column_name, self.weight_array, self.outcome_column_name, self.adaptive_weights, self.alpha, self.repeats, self.verbose, self.want_pe, self.early_stop_iterations, self.stop_unmatched_c, self.early_stop_un_c_frac, self.stop_unmatched_t, self.early_stop_un_t_frac, self.early_stop_pe, self.early_stop_pe_frac, self.want_bf, self.missing_indicator, self.missing_data_replace, self.missing_holdout_replace, self.missing_holdout_imputations, self.missing_data_imputations) self.bf_each_iter = None self.pe_each_iter = None if (self.missing_data_replace != 3): self.df_units_and_covars_matched = return_array[0] self.groups_per_unit = self.df_units_and_covars_matched['weights'] self.df_units_and_covars_matched = self.df_units_and_covars_matched.drop(columns = ['weights']) self.units_per_group = return_array[1] if (self.want_pe == True): self.pe_each_iter = return_array[2] if (self.want_bf == True): self.bf_each_iter = return_array[-1] else: array_of_dfs = [] array_of_groups_per_unit = [] array_of_units_per_group = [] for arr in return_array: temp_df = arr[0] array_of_groups_per_unit.append(temp_df['weights']) array_of_dfs.append(temp_df.drop(columns=['weights'])) array_of_units_per_group.append(arr[1]) self.groups_per_unit = array_of_groups_per_unit self.df_units_and_covars_matched = array_of_dfs self.units_per_group = array_of_units_per_group return self.df_units_and_covars_matched class FLAME(MatchParent):
MIT License
macbre/index-digest
indexdigest/database.py
Database._get_information_schema_where
python
def _get_information_schema_where(self, table_name): return "WHERE TABLE_SCHEMA='{db}' AND TABLE_NAME='{table_name}'".format( db=self._connection_params['db'], table_name=table_name)
:type table_name str :rtype: str
https://github.com/macbre/index-digest/blob/b1f07b4730a424b74e3398010361e8e4a4282300/indexdigest/database.py#L253-L260
import logging import re from collections import OrderedDict, defaultdict from warnings import filterwarnings import MySQLdb from MySQLdb.cursors import DictCursor from MySQLdb._exceptions import OperationalError, ProgrammingError from indexdigest.schema import Column, Index from indexdigest.utils import parse_dsn, memoize, IndexDigestError class IndexDigestQueryError(IndexDigestError): class DatabaseBase: def __init__(self, host, user, passwd, db, port=3306): self.logger = logging.getLogger(__name__) self.query_logger = logging.getLogger(__name__ + '.query') self._connection_params = dict(host=host, port=port, user=user, passwd=passwd, db=db) self._connection = None self.db_name = db filterwarnings('ignore', category=MySQLdb.Warning) self._queries = [] @classmethod def connect_dsn(cls, dsn): parsed = parse_dsn(dsn) return cls(**parsed) @property def connection(self): if self._connection is None: self.logger.info('Lazy connecting to %s:%i and using %s database', self._connection_params['host'], self._connection_params['port'], self._connection_params['db']) self._connection = MySQLdb.connect(**self._connection_params) return self._connection def get_queries(self): return self._queries def query(self, sql, cursor_class=None): self.query_logger.info('%s', sql) cursor = self.connection.cursor(cursorclass=cursor_class) try: try: sql = sql.encode('utf8') except UnicodeDecodeError: pass cursor.execute(sql) except (OperationalError, ProgrammingError) as ex: (code, message) = ex.args self.query_logger.error('Database error #%d: %s', code, message) raise IndexDigestQueryError(message) from ex self._queries.append(sql) return cursor def query_row(self, sql): return self.query(sql).fetchone() def query_dict_row(self, sql): return self.query(sql, cursor_class=DictCursor).fetchone() def query_dict_rows(self, sql): for row in self.query(sql, cursor_class=DictCursor): yield row def query_field(self, sql): return self.query_row(sql)[0] def query_list(self, sql): for row in self.query(sql): yield str(row[0]) def query_key_value(self, sql): res = OrderedDict() for row in self.query(sql): res[row[0]] = row[1] return res class Database(DatabaseBase): @memoize def get_server_version(self): return self.query_field('SELECT VERSION()') def get_server_hostname(self): return self.get_variables(like='hostname').get('hostname') @memoize def get_tables(self): return sorted(self.query_list( 'SELECT TABLE_NAME FROM information_schema.tables ' 'WHERE table_schema = "{}" and TABLE_TYPE = "BASE TABLE"'. format(self.db_name) )) def get_variables(self, like=None): sql = 'SHOW VARIABLES' if like is not None: sql += ' LIKE "{}%"'.format(like) return self.query_key_value(sql) @memoize def explain_query(self, sql): return list(self.query_dict_rows('EXPLAIN {}'.format(sql))) @memoize def get_table_schema(self, table_name): schema = str(self.query_row('SHOW CREATE TABLE `{}`'.format(table_name))[1]) schema = re.sub(r'/\*!50100[^*]+\*/', '', schema) return schema.rstrip()
MIT License
netflix/pygenie
pygenie/jobs/hive.py
HiveJob._parameter_file
python
def _parameter_file(self): param_file = "" for name, value in self._parameters.items(): value = text_type(value) param_file = '{p}SET hivevar:{name}={value};\n' .format(p=param_file, name=name, value=value) return param_file.strip()
Takes specified parameters and creates a string for the parameter file.
https://github.com/netflix/pygenie/blob/96058a7586e001478048d84d5e8c7a6415c66f81/pygenie/jobs/hive.py#L98-L110
from __future__ import absolute_import, division, print_function, unicode_literals import logging import os from six import text_type from collections import OrderedDict from ..utils import unicodify from .core import GenieJob from .utils import (add_to_repr, arg_list, arg_string, is_file) logger = logging.getLogger('com.netflix.genie.jobs.hive') class HiveJob(GenieJob): DEFAULT_SCRIPT_NAME = 'script.hive' def __init__(self, conf=None): super(HiveJob, self).__init__(conf=conf) self._parameters = OrderedDict() self._property_files = list() self._script = None @property def cmd_args(self): if self._command_arguments is not None: return self._command_arguments filename = HiveJob.DEFAULT_SCRIPT_NAME if is_file(self._script): filename = os.path.basename(self._script) self._add_dependency(self._script) elif self._script is not None: self._add_dependency({'name': filename, 'data': self._script}) param_str = self._parameter_file if param_str: self._add_dependency({ 'name': '_hive_parameters.txt', 'data': param_str }) props_str = ' '.join([ '--hiveconf {name}={value}'.format(name=k, value=v) for k, v in self._command_options.get('--hiveconf', {}).items() ]) prop_file_str = ' '.join(['-i {}'.format(os.path.basename(f)) for f in self._property_files]) if self._property_files else '' return '{prop_file} {props} {params} -f {filename} {post_cmd_args}' .format(prop_file=prop_file_str, props=props_str, filename=filename, params='-i _hive_parameters.txt' if param_str else '', post_cmd_args=' '.join(self._post_cmd_args)) .strip() @property
Apache License 2.0
terrainbento/terrainbento
tests/test_advanced_output_intervals_generic.py
generate_previous_list
python
def generate_previous_list(next_list): previous_list = [None] + next_list[:-1] idx_last_valid = 1 for i in range(1, len(previous_list)): if previous_list[i] is None: previous_list[i] = previous_list[idx_last_valid] else: idx_last_valid = i assert len(next_list) == len(previous_list) return previous_list
Generate the expected list of previous values given a list of next values. Lags the next list, holds the last valid number, and adds None to the front. e.g. [0,1,2,None,None,None] -> [None, 0,1,2,2,2]
https://github.com/terrainbento/terrainbento/blob/3758d3526a3a134e2cee5263ccff5d51d3ea13d1/tests/test_advanced_output_intervals_generic.py#L25-L44
import itertools import os.path import pytest from terrainbento.output_writers import ( GenericOutputWriter, OutputIteratorSkipWarning, ) class ClockModel: def __init__(self, clock): self.clock = clock def to_floats(int_list): return [None if i is None else float(i) for i in int_list]
MIT License
ohmu/poni
poni/cloudbase.py
Provider.get_provider_key
python
def get_provider_key(cls, cloud_prop): raise NoProviderMethod(cls, "get_provider_key")
Return the cloud provider key for the given cloud properties. A unique provider key can be returned based on, for example, the region of the specified data-center. Returns a minimum unique key value needed to uniquely describe the cloud Provider. Can be e.g. (provider_type, data_center_id), like with AWS-EC2. The return value also needs to be hash()able.
https://github.com/ohmu/poni/blob/6bcccf2853c63f3658c490e1068f5489c11ce754/poni/cloudbase.py#L46-L57
from . import errors class NoProviderMethod(NotImplementedError): def __init__(self, obj, func): name = (obj if isinstance(obj, type) else obj.__class__).__name__ NotImplementedError.__init__(self, "{0} does not implement {1}".format(name, func)) class Provider(object): def __init__(self, provider_id, cloud_prop): self.provider_id = provider_id self._provider_key = self.get_provider_key(cloud_prop) def __eq__(self, other): if not other or not isinstance(other, Provider): return False return self._provider_key == other._provider_key def __ne__(self, other): if not other or not isinstance(other, Provider): return True return self._provider_key != other._provider_key def __hash__(self): return hash(("cloudbase.Provider", self._provider_key)) def required_prop(self, cloud_prop, prop_name): value = cloud_prop.get(prop_name) if value is None: raise errors.CloudError("'cloud.{0}' property required by {1} not defined".format( prop_name, self.provider_id)) return value @classmethod
Apache License 2.0
funcwj/aps
aps/transform/utils.py
_pytorch_istft
python
def _pytorch_istft(transform: th.Tensor, frame_len: int, frame_hop: int, window: th.Tensor, n_fft: int = 512, return_polar: bool = False, normalized: bool = False, onesided: bool = True, center: bool = False, eps: float = EPSILON) -> th.Tensor: if TORCH_VERSION < 1.7: raise RuntimeError("Can not use this function as TORCH_VERSION < 1.7") transform_dim = transform.dim() if transform_dim == 3: transform = th.unsqueeze(transform, 0) if transform_dim != 4: raise RuntimeError(f"Expect 4D tensor, but got {transform_dim}D") if return_polar: real = transform[..., 0] * th.cos(transform[..., 1]) imag = transform[..., 0] * th.sin(transform[..., 1]) transform = th.stack([real, imag], -1) stft = th.view_as_complex(transform) wav = th.istft(stft, n_fft, hop_length=frame_hop, win_length=window.shape[-1], window=window, center=center, normalized=normalized, onesided=onesided, return_complex=False) return wav
Wrapper of PyTorch iSTFT function Args: transform (Tensor): results of STFT frame_len: length of the frame frame_hop: hop size between frames window: window tensor n_fft: number of the FFT size return_polar: keep same with _pytorch_stft center: same definition with the parameter in librosa.stft normalized: use normalized DFT kernel onesided: output onesided STFT Return: wav (Tensor): synthetic audio
https://github.com/funcwj/aps/blob/d7208bca3a2f04e751fcc6e2d3c56964eeb179a5/aps/transform/utils.py#L417-L468
import math import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as tf import librosa.filters as filters from aps.const import EPSILON, TORCH_VERSION from typing import Optional, Tuple if TORCH_VERSION >= 1.7: from torch.fft import fft as fft_func else: pass def export_jit(transform: nn.Module) -> nn.Module: export_out = [module for module in transform if module.exportable()] return nn.Sequential(*export_out) def init_window(wnd: str, frame_len: int, device: th.device = "cpu") -> th.Tensor: def sqrthann(frame_len, periodic=True): return th.hann_window(frame_len, periodic=periodic)**0.5 if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]: raise RuntimeError(f"Unknown window type: {wnd}") wnd_tpl = { "sqrthann": sqrthann, "hann": th.hann_window, "hamm": th.hamming_window, "blackman": th.blackman_window, "bartlett": th.bartlett_window, "rect": th.ones } if wnd != "rect": c = wnd_tpl[wnd](frame_len, periodic=True) else: c = wnd_tpl[wnd](frame_len) return c.to(device) def init_kernel(frame_len: int, frame_hop: int, window: th.Tensor, round_pow_of_two: bool = True, normalized: bool = False, inverse: bool = False, mode: str = "librosa") -> Tuple[th.Tensor, th.Tensor]: if mode not in ["librosa", "kaldi"]: raise ValueError(f"Unsupported mode: {mode}") if round_pow_of_two or mode == "kaldi": fft_size = 2**math.ceil(math.log2(frame_len)) else: fft_size = frame_len if mode == "librosa" and fft_size != frame_len: lpad = (fft_size - frame_len) // 2 window = tf.pad(window, (lpad, fft_size - frame_len - lpad)) if normalized: S = fft_size**0.5 else: S = 1 if TORCH_VERSION >= 1.7: K = fft_func(th.eye(fft_size) / S, dim=-1) K = th.stack([K.real, K.imag], dim=-1) else: I = th.stack([th.eye(fft_size), th.zeros(fft_size, fft_size)], dim=-1) K = th.fft(I / S, 1) if mode == "kaldi": K = K[:frame_len] if inverse and not normalized: K = K / fft_size K = th.transpose(K, 0, 2) K = th.reshape(K, (fft_size * 2, 1, K.shape[-1])) return K.to(window.device), window def mel_filter(frame_len: int, round_pow_of_two: bool = True, num_bins: Optional[int] = None, sr: int = 16000, num_mels: int = 80, fmin: float = 0.0, fmax: Optional[float] = None, norm: bool = False) -> th.Tensor: if num_bins is None: N = 2**math.ceil( math.log2(frame_len)) if round_pow_of_two else frame_len else: N = (num_bins - 1) * 2 freq_upper = sr // 2 if fmax is None: fmax = freq_upper else: fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper) fmin = max(0, fmin) mel = filters.mel(sr, N, n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True, norm="slaney" if norm else None) return th.tensor(mel, dtype=th.float32) def speed_perturb_filter(src_sr: int, dst_sr: int, cutoff_ratio: float = 0.95, num_zeros: int = 64) -> th.Tensor: if src_sr == dst_sr: raise ValueError( f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}") gcd = math.gcd(src_sr, dst_sr) src_sr = src_sr // gcd dst_sr = dst_sr // gcd if src_sr == 1 or dst_sr == 1: raise ValueError("do not support integer downsample/upsample") zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio padding = 1 + int(num_zeros / zeros_per_block) times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) - np.arange(src_sr)[None, :, None] / float(src_sr) - np.arange(2 * padding + 1)[None, None, :] + padding) window = np.heaviside(1 - np.abs(times / padding), 0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi)) weight = np.sinc( times * zeros_per_block) * window * zeros_per_block / float(src_sr) return th.tensor(weight, dtype=th.float32) def splice_feature(feats: th.Tensor, lctx: int = 1, rctx: int = 1, op: str = "cat") -> th.Tensor: if lctx + rctx == 0: return feats if op not in ["cat", "stack"]: raise ValueError(f"Unknown op for feature splicing: {op}") ctx = [] T = feats.shape[-2] for c in range(-lctx, rctx + 1): idx = th.arange(c, c + T, device=feats.device, dtype=th.int64) idx = th.clamp(idx, min=0, max=T - 1) ctx.append(th.index_select(feats, -2, idx)) if op == "cat": splice = th.cat(ctx, -1) else: splice = th.stack(ctx, -1) return splice def _forward_stft(wav: th.Tensor, kernel: th.Tensor, window: th.Tensor, return_polar: bool = False, pre_emphasis: float = 0, frame_hop: int = 256, onesided: bool = False, center: bool = False, eps: float = EPSILON) -> th.Tensor: wav_dim = wav.dim() if wav_dim not in [2, 3]: raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D") N, S = wav.shape[0], wav.shape[-1] wav = wav.view(-1, 1, S) if center: pad = kernel.shape[-1] // 2 wav = tf.pad(wav, (pad, pad), mode="reflect") kernel = kernel * window if pre_emphasis > 0: frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]), stride=frame_hop, padding=0) frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1] frames[:, 0] *= (1 - pre_emphasis) packed = th.matmul(kernel[:, 0][None, ...], frames) else: packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0) if wav_dim == 3: packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1]) real, imag = th.chunk(packed, 2, dim=-2) if onesided: num_bins = kernel.shape[0] // 4 + 1 real = real[..., :num_bins, :] imag = imag[..., :num_bins, :] if return_polar: mag = (real**2 + imag**2 + eps)**0.5 pha = th.atan2(imag, real) return th.stack([mag, pha], dim=-1) else: return th.stack([real, imag], dim=-1) def _inverse_stft(transform: th.Tensor, kernel: th.Tensor, window: th.Tensor, return_polar: bool = False, frame_hop: int = 256, onesided: bool = False, center: bool = False, eps: float = EPSILON) -> th.Tensor: transform_dim = transform.dim() if transform_dim == 3: transform = th.unsqueeze(transform, 0) if transform_dim != 4: raise RuntimeError(f"Expect 4D tensor, but got {transform_dim}D") if return_polar: real = transform[..., 0] * th.cos(transform[..., 1]) imag = transform[..., 0] * th.sin(transform[..., 1]) else: real, imag = transform[..., 0], transform[..., 1] if onesided: reverse = range(kernel.shape[0] // 4 - 1, 0, -1) real = th.cat([real, real[:, reverse]], 1) imag = th.cat([imag, -imag[:, reverse]], 1) packed = th.cat([real, imag], dim=1) wav = tf.conv_transpose1d(packed, kernel * window, stride=frame_hop, padding=0) num_frames = packed.shape[-1] win_length = window.shape[0] win = th.repeat_interleave(window[..., None]**2, num_frames, dim=-1) I = th.eye(win_length, device=win.device)[:, None] denorm = tf.conv_transpose1d(win[None, ...], I, stride=frame_hop, padding=0) if center: pad = kernel.shape[-1] // 2 wav = wav[..., pad:-pad] denorm = denorm[..., pad:-pad] wav = wav / (denorm + eps) return wav.squeeze(1) def _pytorch_stft(wav: th.Tensor, frame_len: int, frame_hop: int, n_fft: int = 512, return_polar: bool = False, window: str = "sqrthann", normalized: bool = False, onesided: bool = True, center: bool = False, eps: float = EPSILON) -> th.Tensor: if TORCH_VERSION < 1.7: raise RuntimeError("Can not use this function as TORCH_VERSION < 1.7") wav_dim = wav.dim() if wav_dim not in [2, 3]: raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D") N, S = wav.shape[0], wav.shape[-1] wav = wav.view(-1, S) stft = th.stft(wav, n_fft, hop_length=frame_hop, win_length=window.shape[-1], window=window, center=center, normalized=normalized, onesided=onesided, return_complex=False) if wav_dim == 3: stft = stft.view(N, -1, stft.shape[-3], stft.shape[-2]) if not return_polar: return stft real, imag = stft[..., 0], stft[..., 1] mag = (real**2 + imag**2 + eps)**0.5 pha = th.atan2(imag, real) return th.stack([mag, pha], dim=-1)
Apache License 2.0
ayust/kitnirc
kitnirc/client.py
Client.userinfo
python
def userinfo(self, username, realname=None): realname = realname or username _log.info("Requesting user info update: username=%s realname=%s", username, realname) self.send("USER", username, socket.getfqdn(), self.server.host, ":%s" % realname) self.user.username = username self.user.realname = realname
Set the username and realname for this connection. Note: this should only be called once, on connect. (The default on-connect routine calls this automatically.)
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/client.py#L348-L362
import logging import re import socket try: import ssl as _ssl _hush_pyflakes = [_ssl] del _hush_pyflakes except ImportError: _ssl = None from kitnirc.events import NUMERIC_EVENTS from kitnirc.user import User _log = logging.getLogger(__name__) class Channel(object): def __init__(self, name): self.name = name.lower() self.topic = None self.members = {} self.modes = {} def __str__(self): return self.name def __repr__(self): return "kitnirc.client.Channel(%r)" % self.name def add_user(self, user): if not isinstance(user, User): user = User(user) if user.nick in self.members: _log.warning("Ignoring request to add user '%s' to channel '%s' " "because that user is already in the member list.", user, self.name) return self.members[user.nick] = user _log.debug("Added '%s' to channel '%s'", user, self.name) def remove_user(self, user): if not isinstance(user, User): user = User(user) if user.nick not in self.members: _log.warning("Ignoring request to remove user '%s' from channel " "'%s' because that user is already not in the member " "list.", user, self.name) return del self.members[user.nick] _log.debug("Removed '%s' from channel '%s'", user, self.name) class Host(object): def __init__(self, host, port): self.host = host self.original_host = host self.port = port self.password = None self.motd = "" self._motd = [] self._whois = {} self.channels = {} self.features = dict() self.user_modes = set() self.channel_modes = set() self.version = None self.created = None def __str__(self): return self.host def __repr__(self): return "kitnirc.client.Host(%r, %r)" % (self.host, self.port) def add_channel(self, channel): if not isinstance(channel, Channel): channel = Channel(channel) if channel.name in self.channels: _log.warning("Ignoring request to add a channel that has already " "been added: '%s'", channel) return self.channels[channel.name] = channel _log.info("Entered channel %s.", channel) def remove_channel(self, channel): if isinstance(channel, Channel): channel = channel.name channel = channel.lower() if channel not in self.channels: _log.warning("Ignoring request to remove a channel that hasn't " "been added: '%s'", channel) return del self.channels[channel] _log.info("Left channel %s.", channel) def get_channel(self, channel): if isinstance(channel, Channel): channel = channel.name channel = channel.lower() if channel not in self.channels: _log.warning("Ignoring request to get a channel that hasn't " "been added: '%s'", channel) return None return self.channels[channel] def in_channel(self, channel): channel = str(channel).lower() return channel in self.channels class Client(object): def __init__(self, host=None, port=6667): if host: self.server = Host(host, port) else: self.server = None self.connected = False self.socket = None self._stop = False self._buffer = "" self.event_handlers = { "PASSWORD": [], "CONNECTED": [on_connect], "LINE": [on_line], "RAWLINE": [], "ACTIVITY": [], "WELCOME": [], "PRIVMSG": [], "NOTICE": [], "MOTD": [], "JOIN": [], "PART": [], "QUIT": [], "KICK": [], "MEMBERS": [], "MODE": [], "WHOIS": [], "TOPIC": [], "INVITE": [], } def add_handler(self, event, handler): if event not in self.event_handlers: _log.info("Adding event handler for new event %s.", event) self.event_handlers[event] = [handler] else: self.event_handlers[event].append(handler) def dispatch_event(self, event, *args): if event not in self.event_handlers: _log.error("Dispatch requested for unknown event '%s'", event) return False elif event != "LINE": _log.debug("Dispatching event %s %r", event, args) try: for handler in self.event_handlers[event]: if handler(self, *args): return True except Exception as e: _log.exception("Error while processing event '%s': %r", event, e) if event == "LINE": return self.dispatch_event("RAWLINE", *args) return False def connect(self, nick, username=None, realname=None, password=None, host=None, port=6667, ssl=None): if host: self.server = Host(host, port) if self.server is None: _log.error("Can't connect() without a host specified.") return self.user = User(nick) self.user.username = username or nick self.user.realname = realname or username or nick _log.info("Connecting to %s as %s ...", self.server.host, nick) self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if ssl and _ssl: ssl_kwargs = ssl if isinstance(ssl, dict) else {} self.socket = _ssl.wrap_socket(self.socket, **ssl_kwargs) elif ssl: _log.error("SSL requested but no SSL support available!") return self.socket.connect((self.server.host, self.server.port)) self.connected = True _log.info("Connected to %s.", self.server.host) suppress_password = self.dispatch_event("PASSWORD") if password and not suppress_password: _log.info("Sending server password.") self.socket.send("PASS %s\r\n" % password) self.server.password = password self.dispatch_event('CONNECTED') def disconnect(self, msg="Shutting down..."): if not self.connected: _log.warning("Disconnect requested from non-connected client (%s)", self.server.host) return _log.info("Disconnecting from %s ...", self.server.host) self._stop = True self.send("QUIT", ":" + msg) try: self.socket.close() except socket.error: pass def run(self): self._stop = False while not self._stop: try: self._buffer += self.socket.recv(4096) except socket.error: raise lines = self._buffer.split("\n") self._buffer = lines.pop() for line in lines: line = line.rstrip("\r") _log.debug("%s --> %s", self.server.host, line) self.dispatch_event("LINE", line) self.dispatch_event("ACTIVITY") def ping(self): self.send("PING " + self.server.host) def send(self, *args): msg = " ".join(a.nick if isinstance(a, User) else str(a) for a in args) if "\n" in msg: raise ValueError("Cannot send() a newline. Args: %s" % repr(args)) _log.debug("%s <-- %s", self.server.host, msg) self.socket.send(msg + "\r\n") def nick(self, nick): _log.info("Requesting nick change to '%s'", nick) self.send("NICK", nick)
MIT License
google-research/ott
ott/tools/soft_sort.py
ranks
python
def ranks(inputs: jnp.ndarray, axis: int = -1, num_targets: int = None, **kwargs) -> jnp.ndarray: return apply_on_axis(_ranks, inputs, axis, num_targets, **kwargs)
r"""Applies the soft trank operator on input tensor. Args: inputs: a jnp.ndarray<float> of any shape. axis: the axis on which to apply the soft ranks operator. num_targets: num_targets defines the number of targets used to compute a composite ranks for each value in ``inputs``: that soft rank will be a convex combination of values in [0,...,``(num_targets-2)/num_targets``,1] specified by the optimal transport between values in ``inputs`` towards those values. If not specified, ``num_targets`` is set by default to be the size of the slices of the input that are sorted. **kwargs: keyword arguments passed on to lower level functions. Of interest to the user are ``squashing_fun``, which will redistribute the values in ``inputs`` to lie in [0,1] (sigmoid of whitened values by default) to solve the optimal transport problem; ``cost_fn``, used in ``PointCloud``, that defines the ground cost function to transport from ``inputs`` to the ``num_targets`` target values (squared Euclidean distance by default, see ``pointcloud.py`` for more details); ``epsilon`` values as well as other parameters to shape the ``sinkhorn`` algorithm. Returns: A jnp.ndarray<float> of the same shape as inputs, with the ranks.
https://github.com/google-research/ott/blob/03afbebdb55ec37891f90e4d89376c4838cf4770/ott/tools/soft_sort.py#L168-L195
import functools from typing import Callable, Optional import jax import jax.numpy as jnp import numpy as np from ott.tools import transport def transport_for_sort( inputs: jnp.ndarray, weights: jnp.ndarray, target_weights: jnp.ndarray, squashing_fun: Callable[[jnp.ndarray], jnp.ndarray] = None, epsilon: float = 1e-2, **kwargs) -> jnp.ndarray: shape = inputs.shape if len(shape) > 2 or (len(shape) == 2 and shape[1] != 1): raise ValueError( 'Shape ({shape}) not supported. The input should be one-dimensional.') x = jnp.expand_dims(jnp.squeeze(inputs), axis=1) if squashing_fun is None: squashing_fun = lambda z: jax.nn.sigmoid((z - jnp.mean(z)) / (jnp.std(z) + 1e-10)) x = squashing_fun(x) a = jnp.squeeze(weights) b = jnp.squeeze(target_weights) num_targets = b.shape[0] y = jnp.linspace(0.0, 1.0, num_targets)[:, jnp.newaxis] return transport.Transport(x, y, a=a, b=b, epsilon=epsilon, **kwargs) def apply_on_axis(op, inputs, axis, *args, **kwargs): op_inner = functools.partial(op, **kwargs) axis = (axis,) if isinstance(axis, int) else axis num_points = np.prod(np.array(inputs.shape)[tuple([axis])]) permutation = np.arange(len(inputs.shape)) axis = tuple(permutation[a] for a in axis) permutation = tuple(sorted(set(permutation) - set(axis)) + sorted(axis)) inputs = jnp.transpose(inputs, permutation) batch_fn = jax.vmap(op_inner, in_axes=(0,) + (None,) * len(args)) result = batch_fn(jnp.reshape(inputs, (-1, num_points)), *args) shrink = len(axis) result = jnp.reshape(result, inputs.shape[:-shrink] + result.shape[-1:]) permutation = tuple(range(len(result.shape))) rank = len(result.shape) - 1 axis = min(axis) permutation = permutation[:axis] + (rank,) + permutation[axis:-1] result = jnp.transpose(result, permutation) return result def _sort(inputs: jnp.ndarray, topk, num_targets, **kwargs) -> jnp.ndarray: num_points = inputs.shape[0] a = jnp.ones((num_points,)) / num_points if 0 < topk < num_points: start_index = 1 b = jnp.concatenate([ jnp.array([(num_points - topk) / num_points]), jnp.ones(topk, dtype=inputs.dtype) / num_points ]) else: num_targets = num_points if num_targets is None else num_targets start_index = 0 b = jnp.ones((num_targets,)) / num_targets ot = transport_for_sort(inputs, a, b, **kwargs) out = 1.0 / b * ot.apply(inputs, axis=0) return out[start_index:] def sort(inputs: jnp.ndarray, axis: int = -1, topk: int = -1, num_targets: int = None, **kwargs) -> jnp.ndarray: return apply_on_axis(_sort, inputs, axis, topk, num_targets, **kwargs) def _ranks(inputs: jnp.ndarray, num_targets, **kwargs) -> jnp.ndarray: num_points = inputs.shape[0] num_targets = num_points if num_targets is None else num_targets a = jnp.ones((num_points,)) / num_points b = jnp.ones((num_targets,)) / num_targets ot = transport_for_sort(inputs, a, b, **kwargs) out = 1.0 / a * ot.apply(jnp.arange(num_targets), axis=1) return jnp.reshape(out, inputs.shape)
Apache License 2.0
electronick1/stairs
stairs/core/app/components_interface.py
ComponentsMixin.consumer
python
def consumer(self): def _handler_wrap(func) -> Consumer: return Consumer(app=self, handler=func) return _handler_wrap
It's a component which don't change pipeline data and behaves as a standalone function. Consumer useful in case when you need to save or load data somewhere. Consumer will be executed simultaneously with pipelines by cli command: python manage.py pipelines:run Or by execute pipelines directly: project.run_pipelines(pipelines_to_run) If you want to achieve true fault tolerance use consumer as a worker `as_worker=True` inside pipeline and make only one db transaction inside.
https://github.com/electronick1/stairs/blob/add7ee711c1d8ae3e529941aee14b4e38969a3b9/stairs/core/app/components_interface.py#L333-L356
from typing import Union from stairs.core.producer import Producer from stairs.core.producer.batch import BatchProducer from stairs.core.consumer import Consumer from stairs.core.consumer.standalone import StandAloneConsumer from stairs.core.consumer.iter import ConsumerIter from stairs.core.pipeline import Pipeline class ComponentsMixin: components = None def producer(self, *pipelines: Pipeline, single_transaction=False, repeat_on_signal=None, repeat_times=None): def _producer_handler_wrap(handler) -> Producer: producer = Producer(app=self, handler=handler, default_callbacks=list(pipelines or []), single_transaction=single_transaction, repeat_on_signal=repeat_on_signal, repeat_times=repeat_times) return producer return _producer_handler_wrap def batch_producer(self, producer: Producer, repeat_on_signal=None, repeat_times=None) -> BatchProducer: def _batch_producer_handler_wrap(handler): batch_producer = BatchProducer(app=self, handler=handler, simple_producer=producer, repeat_on_signal=repeat_on_signal, repeat_times=repeat_times) return batch_producer return _batch_producer_handler_wrap def producer_redirect(self, based_on: Producer, *pipelines: Pipeline): def _producer_redirect_handler_wrap(handler) -> Producer: redirect_handler = based_on.redirect_handler(handler) producer = Producer(app=self, handler=redirect_handler, default_callbacks=list(pipelines), single_transaction=based_on.single_transaction, repeat_on_signal=based_on.repeat_on_signal, repeat_times=based_on.repeat_times) return producer return _producer_redirect_handler_wrap def spark_producer(self, *pipelines: Pipeline): from stairs.core.producer.spark import SparkProducer def _spark_producer_handler_wrap(handler) -> SparkProducer: producer = SparkProducer(app=self, handler=handler, default_callbacks=list(pipelines)) return producer return _spark_producer_handler_wrap def pipeline(self, config=None, before=None, after=None): def _pipeline_handler_wrap(func) -> Pipeline: return Pipeline(self, func, config, before_callbacks=before, after_callbacks=after) return _pipeline_handler_wrap
Apache License 2.0
facebookresearch/fbpcs
fbpcs/private_computation/service/prepare_data_stage_service.py
PrepareDataStageService.run_async
python
async def run_async( self, pc_instance: PrivateComputationInstance, server_ips: Optional[List[str]] = None, ) -> PrivateComputationInstance: output_path = pc_instance.data_processing_output_path combine_output_path = output_path + "_combine" self._logger.info(f"[{self}] Starting id spine combiner service") await self._run_combiner_service( pc_instance, combine_output_path, self._log_cost_to_s3 ) self._logger.info("Finished running CombinerService, starting to reshard") await self._run_sharder_service( pc_instance, combine_output_path ) if self._update_status_to_complete: pc_instance.status = pc_instance.current_stage.completed_status return pc_instance
Runs the private computation prepare data stage Args: pc_instance: the private computation instance to run prepare data with server_ips: ignored Returns: An updated version of pc_instance
https://github.com/facebookresearch/fbpcs/blob/2eec5e8f84cb6993ecd1fcaf88fb822b9c08ccc2/fbpcs/private_computation/service/prepare_data_stage_service.py#L74-L115
import asyncio import logging import math from typing import DefaultDict from typing import List, Optional from fbpcp.service.onedocker import OneDockerService from fbpcp.util.typing import checked_cast from fbpcs.data_processing.attribution_id_combiner.attribution_id_spine_combiner_cpp import ( CppAttributionIdSpineCombinerService, ) from fbpcs.data_processing.lift_id_combiner.lift_id_spine_combiner_cpp import ( CppLiftIdSpineCombinerService, ) from fbpcs.data_processing.sharding.sharding import ShardType from fbpcs.data_processing.sharding.sharding_cpp import CppShardingService from fbpcs.onedocker_binary_config import OneDockerBinaryConfig from fbpcs.onedocker_binary_names import OneDockerBinaryNames from fbpcs.pid.service.pid_service.pid_stage import PIDStage from fbpcs.private_computation.entity.private_computation_instance import ( PrivateComputationGameType, ) from fbpcs.private_computation.entity.private_computation_instance import ( PrivateComputationInstance, ) from fbpcs.private_computation.entity.private_computation_instance import ( PrivateComputationInstanceStatus, ) from fbpcs.private_computation.service.private_computation_service_data import ( PrivateComputationServiceData, ) from fbpcs.private_computation.service.private_computation_stage_service import ( PrivateComputationStageService, ) class PrepareDataStageService(PrivateComputationStageService): def __init__( self, onedocker_svc: OneDockerService, onedocker_binary_config_map: DefaultDict[str, OneDockerBinaryConfig], is_validating: bool = False, log_cost_to_s3: bool = False, update_status_to_complete: bool = False ) -> None: self._onedocker_svc = onedocker_svc self._onedocker_binary_config_map = onedocker_binary_config_map self._is_validating = is_validating self._log_cost_to_s3 = log_cost_to_s3 self._update_status_to_complete = update_status_to_complete self._logger: logging.Logger = logging.getLogger(__name__)
MIT License
whyliam/whyliam.workflows.youdao
workflow/workflow.py
Workflow.__init__
python
def __init__(self, default_settings=None, update_settings=None, input_encoding='utf-8', normalization='NFC', capture_args=True, libraries=None, help_url=None): self._default_settings = default_settings or {} self._update_settings = update_settings or {} self._input_encoding = input_encoding self._normalizsation = normalization self._capture_args = capture_args self.help_url = help_url self._workflowdir = None self._settings_path = None self._settings = None self._bundleid = None self._debugging = None self._name = None self._cache_serializer = 'cpickle' self._data_serializer = 'cpickle' self._info = None self._info_loaded = False self._logger = None self._items = [] self._alfred_env = None self._version = UNSET self._last_version_run = UNSET self._search_pattern_cache = {} self.magic_prefix = 'workflow:' self.magic_arguments = {} self._register_default_magic() if libraries: sys.path = libraries + sys.path
Create new :class:`Workflow` object.
https://github.com/whyliam/whyliam.workflows.youdao/blob/2dfa7f1de56419dab1c2e70c1a27e5e13ba25a5c/workflow/workflow.py#L939-L988
from __future__ import print_function, unicode_literals import binascii import cPickle from copy import deepcopy import json import logging import logging.handlers import os import pickle import plistlib import re import shutil import string import subprocess import sys import time import unicodedata try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET from util import AcquisitionError from util import ( atomic_writer, LockFile, uninterruptible, ) UNSET = object() ICON_ROOT = '/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources' ICON_ACCOUNT = os.path.join(ICON_ROOT, 'Accounts.icns') ICON_BURN = os.path.join(ICON_ROOT, 'BurningIcon.icns') ICON_CLOCK = os.path.join(ICON_ROOT, 'Clock.icns') ICON_COLOR = os.path.join(ICON_ROOT, 'ProfileBackgroundColor.icns') ICON_COLOUR = ICON_COLOR ICON_EJECT = os.path.join(ICON_ROOT, 'EjectMediaIcon.icns') ICON_ERROR = os.path.join(ICON_ROOT, 'AlertStopIcon.icns') ICON_FAVORITE = os.path.join(ICON_ROOT, 'ToolbarFavoritesIcon.icns') ICON_FAVOURITE = ICON_FAVORITE ICON_GROUP = os.path.join(ICON_ROOT, 'GroupIcon.icns') ICON_HELP = os.path.join(ICON_ROOT, 'HelpIcon.icns') ICON_HOME = os.path.join(ICON_ROOT, 'HomeFolderIcon.icns') ICON_INFO = os.path.join(ICON_ROOT, 'ToolbarInfo.icns') ICON_NETWORK = os.path.join(ICON_ROOT, 'GenericNetworkIcon.icns') ICON_NOTE = os.path.join(ICON_ROOT, 'AlertNoteIcon.icns') ICON_SETTINGS = os.path.join(ICON_ROOT, 'ToolbarAdvanced.icns') ICON_SWIRL = os.path.join(ICON_ROOT, 'ErasingIcon.icns') ICON_SWITCH = os.path.join(ICON_ROOT, 'General.icns') ICON_SYNC = os.path.join(ICON_ROOT, 'Sync.icns') ICON_TRASH = os.path.join(ICON_ROOT, 'TrashIcon.icns') ICON_USER = os.path.join(ICON_ROOT, 'UserIcon.icns') ICON_WARNING = os.path.join(ICON_ROOT, 'AlertCautionIcon.icns') ICON_WEB = os.path.join(ICON_ROOT, 'BookmarkIcon.icns') ASCII_REPLACEMENTS = { 'À': 'A', 'Á': 'A', 'Â': 'A', 'Ã': 'A', 'Ä': 'A', 'Å': 'A', 'Æ': 'AE', 'Ç': 'C', 'È': 'E', 'É': 'E', 'Ê': 'E', 'Ë': 'E', 'Ì': 'I', 'Í': 'I', 'Î': 'I', 'Ï': 'I', 'Ð': 'D', 'Ñ': 'N', 'Ò': 'O', 'Ó': 'O', 'Ô': 'O', 'Õ': 'O', 'Ö': 'O', 'Ø': 'O', 'Ù': 'U', 'Ú': 'U', 'Û': 'U', 'Ü': 'U', 'Ý': 'Y', 'Þ': 'Th', 'ß': 'ss', 'à': 'a', 'á': 'a', 'â': 'a', 'ã': 'a', 'ä': 'a', 'å': 'a', 'æ': 'ae', 'ç': 'c', 'è': 'e', 'é': 'e', 'ê': 'e', 'ë': 'e', 'ì': 'i', 'í': 'i', 'î': 'i', 'ï': 'i', 'ð': 'd', 'ñ': 'n', 'ò': 'o', 'ó': 'o', 'ô': 'o', 'õ': 'o', 'ö': 'o', 'ø': 'o', 'ù': 'u', 'ú': 'u', 'û': 'u', 'ü': 'u', 'ý': 'y', 'þ': 'th', 'ÿ': 'y', 'Ł': 'L', 'ł': 'l', 'Ń': 'N', 'ń': 'n', 'Ņ': 'N', 'ņ': 'n', 'Ň': 'N', 'ň': 'n', 'Ŋ': 'ng', 'ŋ': 'NG', 'Ō': 'O', 'ō': 'o', 'Ŏ': 'O', 'ŏ': 'o', 'Ő': 'O', 'ő': 'o', 'Œ': 'OE', 'œ': 'oe', 'Ŕ': 'R', 'ŕ': 'r', 'Ŗ': 'R', 'ŗ': 'r', 'Ř': 'R', 'ř': 'r', 'Ś': 'S', 'ś': 's', 'Ŝ': 'S', 'ŝ': 's', 'Ş': 'S', 'ş': 's', 'Š': 'S', 'š': 's', 'Ţ': 'T', 'ţ': 't', 'Ť': 'T', 'ť': 't', 'Ŧ': 'T', 'ŧ': 't', 'Ũ': 'U', 'ũ': 'u', 'Ū': 'U', 'ū': 'u', 'Ŭ': 'U', 'ŭ': 'u', 'Ů': 'U', 'ů': 'u', 'Ű': 'U', 'ű': 'u', 'Ŵ': 'W', 'ŵ': 'w', 'Ŷ': 'Y', 'ŷ': 'y', 'Ÿ': 'Y', 'Ź': 'Z', 'ź': 'z', 'Ż': 'Z', 'ż': 'z', 'Ž': 'Z', 'ž': 'z', 'ſ': 's', 'Α': 'A', 'Β': 'B', 'Γ': 'G', 'Δ': 'D', 'Ε': 'E', 'Ζ': 'Z', 'Η': 'E', 'Θ': 'Th', 'Ι': 'I', 'Κ': 'K', 'Λ': 'L', 'Μ': 'M', 'Ν': 'N', 'Ξ': 'Ks', 'Ο': 'O', 'Π': 'P', 'Ρ': 'R', 'Σ': 'S', 'Τ': 'T', 'Υ': 'U', 'Φ': 'Ph', 'Χ': 'Kh', 'Ψ': 'Ps', 'Ω': 'O', 'α': 'a', 'β': 'b', 'γ': 'g', 'δ': 'd', 'ε': 'e', 'ζ': 'z', 'η': 'e', 'θ': 'th', 'ι': 'i', 'κ': 'k', 'λ': 'l', 'μ': 'm', 'ν': 'n', 'ξ': 'x', 'ο': 'o', 'π': 'p', 'ρ': 'r', 'ς': 's', 'σ': 's', 'τ': 't', 'υ': 'u', 'φ': 'ph', 'χ': 'kh', 'ψ': 'ps', 'ω': 'o', 'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E', 'Ж': 'Zh', 'З': 'Z', 'И': 'I', 'Й': 'I', 'К': 'K', 'Л': 'L', 'М': 'M', 'Н': 'N', 'О': 'O', 'П': 'P', 'Р': 'R', 'С': 'S', 'Т': 'T', 'У': 'U', 'Ф': 'F', 'Х': 'Kh', 'Ц': 'Ts', 'Ч': 'Ch', 'Ш': 'Sh', 'Щ': 'Shch', 'Ъ': "'", 'Ы': 'Y', 'Ь': "'", 'Э': 'E', 'Ю': 'Iu', 'Я': 'Ia', 'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e', 'ж': 'zh', 'з': 'z', 'и': 'i', 'й': 'i', 'к': 'k', 'л': 'l', 'м': 'm', 'н': 'n', 'о': 'o', 'п': 'p', 'р': 'r', 'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'kh', 'ц': 'ts', 'ч': 'ch', 'ш': 'sh', 'щ': 'shch', 'ъ': "'", 'ы': 'y', 'ь': "'", 'э': 'e', 'ю': 'iu', 'я': 'ia', 'ᴦ': 'G', 'ᴧ': 'L', 'ᴨ': 'P', 'ᴩ': 'R', 'ᴪ': 'PS', 'ẞ': 'Ss', 'Ỳ': 'Y', 'ỳ': 'y', 'Ỵ': 'Y', 'ỵ': 'y', 'Ỹ': 'Y', 'ỹ': 'y', } DUMB_PUNCTUATION = { '‘': "'", '’': "'", '‚': "'", '“': '"', '”': '"', '„': '"', '–': '-', '—': '-' } INITIALS = string.ascii_uppercase + string.digits split_on_delimiters = re.compile('[^a-zA-Z0-9]').split MATCH_STARTSWITH = 1 MATCH_CAPITALS = 2 MATCH_ATOM = 4 MATCH_INITIALS_STARTSWITH = 8 MATCH_INITIALS_CONTAIN = 16 MATCH_INITIALS = 24 MATCH_SUBSTRING = 32 MATCH_ALLCHARS = 64 MATCH_ALL = 127 DEFAULT_UPDATE_FREQUENCY = 1 class KeychainError(Exception): class PasswordNotFound(KeychainError): class PasswordExists(KeychainError): def isascii(text): try: text.encode('ascii') except UnicodeEncodeError: return False return True class SerializerManager(object): def __init__(self): self._serializers = {} def register(self, name, serializer): getattr(serializer, 'load') getattr(serializer, 'dump') self._serializers[name] = serializer def serializer(self, name): return self._serializers.get(name) def unregister(self, name): if name not in self._serializers: raise ValueError('No such serializer registered : {0}'.format( name)) serializer = self._serializers[name] del self._serializers[name] return serializer @property def serializers(self): return sorted(self._serializers.keys()) class JSONSerializer(object): @classmethod def load(cls, file_obj): return json.load(file_obj) @classmethod def dump(cls, obj, file_obj): return json.dump(obj, file_obj, indent=2, encoding='utf-8') class CPickleSerializer(object): @classmethod def load(cls, file_obj): return cPickle.load(file_obj) @classmethod def dump(cls, obj, file_obj): return cPickle.dump(obj, file_obj, protocol=-1) class PickleSerializer(object): @classmethod def load(cls, file_obj): return pickle.load(file_obj) @classmethod def dump(cls, obj, file_obj): return pickle.dump(obj, file_obj, protocol=-1) manager = SerializerManager() manager.register('cpickle', CPickleSerializer) manager.register('pickle', PickleSerializer) manager.register('json', JSONSerializer) class Item(object): def __init__(self, title, subtitle='', modifier_subtitles=None, arg=None, autocomplete=None, valid=False, uid=None, icon=None, icontype=None, type=None, largetext=None, copytext=None, quicklookurl=None): self.title = title self.subtitle = subtitle self.modifier_subtitles = modifier_subtitles or {} self.arg = arg self.autocomplete = autocomplete self.valid = valid self.uid = uid self.icon = icon self.icontype = icontype self.type = type self.largetext = largetext self.copytext = copytext self.quicklookurl = quicklookurl @property def elem(self): attr = {} if self.valid: attr['valid'] = 'yes' else: attr['valid'] = 'no' if self.autocomplete is not None: attr['autocomplete'] = self.autocomplete for name in ('uid', 'type'): value = getattr(self, name, None) if value: attr[name] = value root = ET.Element('item', attr) ET.SubElement(root, 'title').text = self.title ET.SubElement(root, 'subtitle').text = self.subtitle for mod in ('cmd', 'ctrl', 'alt', 'shift', 'fn'): if mod in self.modifier_subtitles: ET.SubElement(root, 'subtitle', {'mod': mod}).text = self.modifier_subtitles[mod] if self.arg: ET.SubElement(root, 'arg').text = self.arg if self.icon: if self.icontype: attr = dict(type=self.icontype) else: attr = {} ET.SubElement(root, 'icon', attr).text = self.icon if self.largetext: ET.SubElement(root, 'text', {'type': 'largetype'}).text = self.largetext if self.copytext: ET.SubElement(root, 'text', {'type': 'copy'}).text = self.copytext if self.quicklookurl: ET.SubElement(root, 'quicklookurl').text = self.quicklookurl return root class Settings(dict): def __init__(self, filepath, defaults=None): super(Settings, self).__init__() self._filepath = filepath self._nosave = False self._original = {} if os.path.exists(self._filepath): self._load() elif defaults: for key, val in defaults.items(): self[key] = val self.save() def _load(self): data = {} with LockFile(self._filepath, 0.5): with open(self._filepath, 'rb') as fp: data.update(json.load(fp)) self._original = deepcopy(data) self._nosave = True self.update(data) self._nosave = False @uninterruptible def save(self): if self._nosave: return data = {} data.update(self) with LockFile(self._filepath, 0.5): with atomic_writer(self._filepath, 'wb') as fp: json.dump(data, fp, sort_keys=True, indent=2, encoding='utf-8') def __setitem__(self, key, value): if self._original.get(key) != value: super(Settings, self).__setitem__(key, value) self.save() def __delitem__(self, key): super(Settings, self).__delitem__(key) self.save() def update(self, *args, **kwargs): super(Settings, self).update(*args, **kwargs) self.save() def setdefault(self, key, value=None): ret = super(Settings, self).setdefault(key, value) self.save() return ret class Workflow(object): item_class = Item
MIT License
wagtail/wagtail-live
src/wagtail_live/publishers/redis/bus.py
RedisBus.run
python
async def run(self): self._running = asyncio.Event() await self._running.wait() await self.pubsub.run()
Retrieves messages from Redis and dispatches them as long as the server is running.
https://github.com/wagtail/wagtail-live/blob/3395f473c3c34f8932d8b0ea6de56745d446e3bd/src/wagtail_live/publishers/redis/bus.py#L42-L50
import asyncio from collections import defaultdict import aioredis from ..utils import get_redis_url class RedisBus: def __init__(self, url, broadcast): redis = aioredis.from_url(get_redis_url(), decode_responses=True) self.pubsub = redis.pubsub(ignore_subscribe_messages=True) self.broadcast = broadcast self.channel_groups = defaultdict(set) self._running = None
BSD 3-Clause New or Revised License
mic-dkfz/nnunet
nnunet/training/network_training/nnUNetTrainer.py
nnUNetTrainer.predict_preprocessed_data_return_seg_and_softmax
python
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]: if pad_border_mode == 'constant' and pad_kwargs is None: pad_kwargs = {'constant_values': 0} if do_mirroring and mirror_axes is None: mirror_axes = self.data_aug_params['mirror_axes'] if do_mirroring: assert self.data_aug_params["do_mirror"], "Cannot do mirroring as test time augmentation when training " "was done without mirroring" valid = list((SegmentationNetwork, nn.DataParallel)) assert isinstance(self.network, tuple(valid)) current_mode = self.network.training self.network.eval() ret = self.network.predict_3D(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, patch_size=self.patch_size, regions_class_order=self.regions_class_order, use_gaussian=use_gaussian, pad_border_mode=pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose, mixed_precision=mixed_precision) self.network.train(current_mode) return ret
:param data: :param do_mirroring: :param mirror_axes: :param use_sliding_window: :param step_size: :param use_gaussian: :param pad_border_mode: :param pad_kwargs: :param all_in_gpu: :param verbose: :return:
https://github.com/mic-dkfz/nnunet/blob/96d44c2fc1ce5e18da4cd54bf52882047c37982e/nnunet/training/network_training/nnUNetTrainer.py#L483-L524
import shutil from collections import OrderedDict from multiprocessing import Pool from time import sleep from typing import Tuple, List import matplotlib import numpy as np import torch from batchgenerators.utilities.file_and_folder_operations import * from torch import nn from torch.optim import lr_scheduler import nnunet from nnunet.configuration import default_num_threads from nnunet.evaluation.evaluator import aggregate_scores from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.postprocessing.connected_components import determine_postprocessing from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, default_2D_augmentation_params, get_default_augmentation, get_patch_size from nnunet.training.dataloading.dataset_loading import load_dataset, DataLoader3D, DataLoader2D, unpack_dataset from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss from nnunet.training.network_training.network_trainer import NetworkTrainer from nnunet.utilities.nd_softmax import softmax_helper from nnunet.utilities.tensor_utilities import sum_tensor matplotlib.use("agg") class nnUNetTrainer(NetworkTrainer): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super(nnUNetTrainer, self).__init__(deterministic, fp16) self.unpack_data = unpack_data self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.stage = stage self.experiment_name = self.__class__.__name__ self.plans_file = plans_file self.output_folder = output_folder self.dataset_directory = dataset_directory self.output_folder_base = self.output_folder self.fold = fold self.plans = None if self.dataset_directory is not None and isdir(self.dataset_directory): self.gt_niftis_folder = join(self.dataset_directory, "gt_segmentations") else: self.gt_niftis_folder = None self.folder_with_preprocessed_data = None self.dl_tr = self.dl_val = None self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = self.net_num_pool_op_kernel_sizes = self.net_conv_kernel_sizes = None self.basic_generator_patch_size = self.data_aug_params = self.transpose_forward = self.transpose_backward = None self.batch_dice = batch_dice self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) self.online_eval_foreground_dc = [] self.online_eval_tp = [] self.online_eval_fp = [] self.online_eval_fn = [] self.classes = self.do_dummy_2D_aug = self.use_mask_for_norm = self.only_keep_largest_connected_component = self.min_region_size_per_class = self.min_size_per_class = None self.inference_pad_border_mode = "constant" self.inference_pad_kwargs = {'constant_values': 0} self.update_fold(fold) self.pad_all_sides = None self.lr_scheduler_eps = 1e-3 self.lr_scheduler_patience = 30 self.initial_lr = 3e-4 self.weight_decay = 3e-5 self.oversample_foreground_percent = 0.33 self.conv_per_stage = None self.regions_class_order = None def update_fold(self, fold): if fold is not None: if isinstance(fold, str): assert fold == "all", "if self.fold is a string then it must be \'all\'" if self.output_folder.endswith("%s" % str(self.fold)): self.output_folder = self.output_folder_base self.output_folder = join(self.output_folder, "%s" % str(fold)) else: if self.output_folder.endswith("fold_%s" % str(self.fold)): self.output_folder = self.output_folder_base self.output_folder = join(self.output_folder, "fold_%s" % str(fold)) self.fold = fold def setup_DA_params(self): if self.threeD: self.data_aug_params = default_3D_augmentation_params if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size def initialize(self, training=True, force_load_plans=False): maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() if training: self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: self.print_to_log_file("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) self.print_to_log_file("done") else: self.print_to_log_file( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_default_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() self.was_initialized = True def initialize_network(self): net_numpool = len(self.net_num_pool_op_kernel_sizes) if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, net_numpool, self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) self.network.inference_apply_nonlin = softmax_helper if torch.cuda.is_available(): self.network.cuda() def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, amsgrad=True) self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2, patience=self.lr_scheduler_patience, verbose=True, threshold=self.lr_scheduler_eps, threshold_mode="abs") def plot_network_architecture(self): try: from batchgenerators.utilities.file_and_folder_operations import join import hiddenlayer as hl if torch.cuda.is_available(): g = hl.build_graph(self.network, torch.rand((1, self.num_input_channels, *self.patch_size)).cuda(), transforms=None) else: g = hl.build_graph(self.network, torch.rand((1, self.num_input_channels, *self.patch_size)), transforms=None) g.save(join(self.output_folder, "network_architecture.pdf")) del g except Exception as e: self.print_to_log_file("Unable to plot network architecture:") self.print_to_log_file(e) self.print_to_log_file("\nprinting the network instead:\n") self.print_to_log_file(self.network) self.print_to_log_file("\n") finally: if torch.cuda.is_available(): torch.cuda.empty_cache() def save_debug_information(self): dct = OrderedDict() for k in self.__dir__(): if not k.startswith("__"): if not callable(getattr(self, k)): dct[k] = str(getattr(self, k)) del dct['plans'] del dct['intensity_properties'] del dct['dataset'] del dct['dataset_tr'] del dct['dataset_val'] save_json(dct, join(self.output_folder, "debug.json")) import shutil shutil.copy(self.plans_file, join(self.output_folder_base, "plans.pkl")) def run_training(self): self.save_debug_information() super(nnUNetTrainer, self).run_training() def load_plans_file(self): self.plans = load_pickle(self.plans_file) def process_plans(self, plans): if self.stage is None: assert len(list(plans['plans_per_stage'].keys())) == 1, "If self.stage is None then there can be only one stage in the plans file. That seems to not be the " "case. Please specify which stage of the cascade must be trained" self.stage = list(plans['plans_per_stage'].keys())[0] self.plans = plans stage_plans = self.plans['plans_per_stage'][self.stage] self.batch_size = stage_plans['batch_size'] self.net_pool_per_axis = stage_plans['num_pool_per_axis'] self.patch_size = np.array(stage_plans['patch_size']).astype(int) self.do_dummy_2D_aug = stage_plans['do_dummy_2D_data_aug'] if 'pool_op_kernel_sizes' not in stage_plans.keys(): assert 'num_pool_per_axis' in stage_plans.keys() self.print_to_log_file("WARNING! old plans file with missing pool_op_kernel_sizes. Attempting to fix it...") self.net_num_pool_op_kernel_sizes = [] for i in range(max(self.net_pool_per_axis)): curr = [] for j in self.net_pool_per_axis: if (max(self.net_pool_per_axis) - j) <= i: curr.append(2) else: curr.append(1) self.net_num_pool_op_kernel_sizes.append(curr) else: self.net_num_pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes'] if 'conv_kernel_sizes' not in stage_plans.keys(): self.print_to_log_file("WARNING! old plans file with missing conv_kernel_sizes. Attempting to fix it...") self.net_conv_kernel_sizes = [[3] * len(self.net_pool_per_axis)] * (max(self.net_pool_per_axis) + 1) else: self.net_conv_kernel_sizes = stage_plans['conv_kernel_sizes'] self.pad_all_sides = None self.intensity_properties = plans['dataset_properties']['intensityproperties'] self.normalization_schemes = plans['normalization_schemes'] self.base_num_features = plans['base_num_features'] self.num_input_channels = plans['num_modalities'] self.num_classes = plans['num_classes'] + 1 self.classes = plans['all_classes'] self.use_mask_for_norm = plans['use_mask_for_norm'] self.only_keep_largest_connected_component = plans['keep_only_largest_region'] self.min_region_size_per_class = plans['min_region_size_per_class'] self.min_size_per_class = None if plans.get('transpose_forward') is None or plans.get('transpose_backward') is None: print("WARNING! You seem to have data that was preprocessed with a previous version of nnU-Net. " "You should rerun preprocessing. We will proceed and assume that both transpose_foward " "and transpose_backward are [0, 1, 2]. If that is not correct then weird things will happen!") plans['transpose_forward'] = [0, 1, 2] plans['transpose_backward'] = [0, 1, 2] self.transpose_forward = plans['transpose_forward'] self.transpose_backward = plans['transpose_backward'] if len(self.patch_size) == 2: self.threeD = False elif len(self.patch_size) == 3: self.threeD = True else: raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size)) if "conv_per_stage" in plans.keys(): self.conv_per_stage = plans['conv_per_stage'] else: self.conv_per_stage = 2 def load_dataset(self): self.dataset = load_dataset(self.folder_with_preprocessed_data) def get_basic_generators(self): self.load_dataset() self.do_split() if self.threeD: dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size, False, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') else: dl_tr = DataLoader2D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') return dl_tr, dl_val def preprocess_patient(self, input_files): from nnunet.training.model_restore import recursive_find_python_class preprocessor_name = self.plans.get('preprocessor_name') if preprocessor_name is None: if self.threeD: preprocessor_name = "GenericPreprocessor" else: preprocessor_name = "PreprocessorFor2D" print("using preprocessor", preprocessor_name) preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], "preprocessing")], preprocessor_name, current_module="nnunet.preprocessing") assert preprocessor_class is not None, "Could not find preprocessor %s in nnunet.preprocessing" % preprocessor_name preprocessor = preprocessor_class(self.normalization_schemes, self.use_mask_for_norm, self.transpose_forward, self.intensity_properties) d, s, properties = preprocessor.preprocess_test_case(input_files, self.plans['plans_per_stage'][self.stage][ 'current_spacing']) return d, s, properties def preprocess_predict_nifti(self, input_files: List[str], output_file: str = None, softmax_ouput_file: str = None, mixed_precision: bool = True) -> None: print("preprocessing...") d, s, properties = self.preprocess_patient(input_files) print("predicting...") pred = self.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=self.data_aug_params["do_mirror"], mirror_axes=self.data_aug_params['mirror_axes'], use_sliding_window=True, step_size=0.5, use_gaussian=True, pad_border_mode='constant', pad_kwargs={'constant_values': 0}, verbose=True, all_in_gpu=False, mixed_precision=mixed_precision)[1] pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward]) if 'segmentation_export_params' in self.plans.keys(): force_separate_z = self.plans['segmentation_export_params']['force_separate_z'] interpolation_order = self.plans['segmentation_export_params']['interpolation_order'] interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z'] else: force_separate_z = None interpolation_order = 1 interpolation_order_z = 0 print("resampling to original spacing and nifti export...") save_segmentation_nifti_from_softmax(pred, output_file, properties, interpolation_order, self.regions_class_order, None, None, softmax_ouput_file, None, force_separate_z=force_separate_z, interpolation_order_z=interpolation_order_z) print("done")
Apache License 2.0
nipy/nipy
nipy/labs/spatial_models/discrete_domain.py
DiscreteDomain.representative_feature
python
def representative_feature(self, fid, method): f = self.get_feature(fid) if method == "mean": return np.mean(f, 0) if method == "min": return np.min(f, 0) if method == "max": return np.max(f, 0) if method == "median": return np.median(f, 0)
Compute a statistical representative of the within-Foain feature Parameters ---------- fid: string, feature id method: string, method used to compute a representative to be chosen among 'mean', 'max', 'median', 'min'
https://github.com/nipy/nipy/blob/d16d268938dcd5c15748ca051532c21f57cf8a22/nipy/labs/spatial_models/discrete_domain.py#L567-L585
from __future__ import absolute_import import numpy as np import scipy.sparse as sp from nibabel import load, Nifti1Image, save from nipy.io.nibcompat import get_header, get_affine from nipy.algorithms.graph import (WeightedGraph, wgraph_from_coo_matrix, wgraph_from_3d_grid) from nipy.externals.six import string_types def smatrix_from_3d_array(mask, nn=18): ijk = np.array(np.where(mask)).T return smatrix_from_3d_idx(ijk, nn) def smatrix_from_3d_idx(ijk, nn=18): G = wgraph_from_3d_grid(ijk, nn) return G.to_coo_matrix() def smatrix_from_nd_array(mask, nn=0): idx = np.array(np.where(mask)).T return smatrix_from_nd_idx(idx, nn) def smatrix_from_nd_idx(idx, nn=0): n = idx.shape[0] dim = idx.shape[1] nidx = idx - idx.min(0) eA = [] eB = [] for d in range(dim): mi = nidx.max(0) + 2 a = np.hstack((1, np.cumprod(mi[:dim - 1]))) v1 = np.dot(nidx, a) assert(np.size(v1) == np.size(np.unique(v1))) o1 = np.argsort(v1) sv1 = v1[o1] nz = np.squeeze(np.nonzero(sv1[:n - 1] - sv1[1:] == - 1)) nz = np.reshape(nz, np.size(nz)) eA.append(o1[nz]) eB.append(o1[nz + 1]) nidx = np.roll(nidx, 1, 1) eA = np.concatenate(eA) eB = np.concatenate(eB) E = 2 * np.size(eA) if E == 0: return sp.coo_matrix((n, n)) edges = np.vstack((np.hstack((eA, eB)), np.hstack((eB, eA)))).T weights = np.ones(E) G = WeightedGraph(n, edges, weights) return G.to_coo_matrix() def array_affine_coord(mask, affine): idx = np.array(np.where(mask)).T return idx_affine_coord(idx, affine) def idx_affine_coord(idx, affine): size = idx.shape[0] hidx = np.hstack((idx, np.ones((size, 1)))) coord = np.dot(hidx, affine.T)[:, 0:-1] return coord def reduce_coo_matrix(mat, mask): G = wgraph_from_coo_matrix(mat) K = G.subgraph(mask) if K is None: return None return K.to_coo_matrix() def domain_from_binary_array(mask, affine=None, nn=0): dim = len(mask.shape) if affine is None: affine = np.eye(dim + 1) mask = mask > 0 vol = np.absolute(np.linalg.det(affine)) * np.ones(np.sum(mask)) coord = array_affine_coord(mask, affine) topology = smatrix_from_nd_array(mask) return StructuredDomain(dim, coord, vol, topology) def domain_from_image(mim, nn=18): if isinstance(mim, string_types): iim = load(mim) else: iim = mim return domain_from_binary_array(iim.get_data(), get_affine(iim), nn) def grid_domain_from_binary_array(mask, affine=None, nn=0): dim = len(mask.shape) shape = mask.shape if affine is None: affine = np.eye(dim + 1) mask = mask > 0 ijk = np.array(np.where(mask)).T vol = np.absolute(np.linalg.det(affine[:3, 0:3])) * np.ones(np.sum(mask)) topology = smatrix_from_nd_idx(ijk, nn) return NDGridDomain(dim, ijk, shape, affine, vol, topology) def grid_domain_from_image(mim, nn=18): if isinstance(mim, string_types): iim = load(mim) else: iim = mim return grid_domain_from_binary_array(iim.get_data(), get_affine(iim), nn) def grid_domain_from_shape(shape, affine=None): dim = len(shape) if affine is None: affine = np.eye(dim + 1) rect = np.ones(shape) ijk = np.array(np.where(rect)).T vol = (np.absolute(np.linalg.det(affine[:3, 0:3])) * np.ones(int(np.sum(rect)))) topology = smatrix_from_nd_idx(ijk, 0) return NDGridDomain(dim, ijk, shape, affine, vol, topology) class MeshDomain(object): def __init__(self, coord, triangles): self.coord = coord self.triangles = triangles self.V = len(coord) def area(self): E = len(self.triangles) narea = np.zeros(self.V) def _area(a, b): c = np.array([a[1] * b[2] - a[2] * b[1], - a[0] * b[2] + a[2] * b[0], a[0] * b[1] - a[1] * b[0]]) return np.sqrt((c ** 2).sum()) for e in range(E): i, j, k = self.triangles[e] a = self.coord[i] - self.coord[k] b = self.coord[j] - self.coord[k] ar = _area(a, b) narea[i] += ar narea[j] += ar narea[k] += ar narea /= 6 return narea def topology(self): E = len(self.triangles) edges = np.zeros((3 * E, 2)) weights = np.ones(3 * E) for i in range(E): sa, sb, sc = self.triangles[i] edges[3 * i] = np.array([sa, sb]) edges[3 * i + 1] = np.array([sa, sc]) edges[3 * i + 2] = np.array([sb, sc]) G = WeightedGraph(self.V, edges, weights) G = G.symmeterize() G = G.cut_redundancies() G.set_euclidian(self.coord) return G.to_coo_matrix() def domain_from_mesh(mesh): if isinstance(mesh, string_types): from nibabel.gifti import read mesh_ = read(mesh) else: mesh_ = mesh if len(mesh_.darrays) == 2: cor, tri = mesh_.darrays elif len(mesh_.darrays) == 3: cor, nor, tri = mesh_.darrays else: raise Exception("%d arrays in gifti file (case not handled)" % len(mesh_.darrays)) mesh_dom = MeshDomain(cor.data, tri.data) vol = mesh_dom.area() topology = mesh_dom.topology() dim = 2 return StructuredDomain(dim, mesh_dom.coord, vol, topology) class DiscreteDomain(object): def __init__(self, dim, coord, local_volume, id='', referential=''): self.dim = dim self.size = coord.shape[0] if np.size(coord) == coord.shape[0]: coord = np.reshape(coord, (np.size(coord), 1)) if np.size(coord) == 0: self.em_dim = dim else: self.em_dim = coord.shape[1] if self.em_dim < dim: raise ValueError('Embedding dimension cannot be smaller than dim') self.coord = coord if np.size(local_volume) != self.size: raise ValueError("Inconsistent Volume size") if (local_volume < 0).any(): raise ValueError('Volume should be positive') self.local_volume = np.ravel(local_volume) self.id = id self.referential = referential self.features = {} def copy(self): new_dom = DiscreteDomain(self.dim, self.coord.copy(), self.local_volume.copy(), self.id, self.referential) for fid in list(self.features.keys()): new_dom.set_feature(fid, self.get_feature(fid).copy()) return new_dom def get_coord(self): return self.coord def get_volume(self): return self.local_volume def connected_components(self): if self.topology is not None: return wgraph_from_coo_matrix(self.topology).cc() else: return [] def mask(self, bmask, id=''): if bmask.size != self.size: raise ValueError('Invalid mask size') svol = self.local_volume[bmask] scoord = self.coord[bmask] DD = DiscreteDomain(self.dim, scoord, svol, id, self.referential) for fid in list(self.features.keys()): f = self.features.pop(fid) DD.set_feature(fid, f[bmask]) return DD def set_feature(self, fid, data, override=True): if data.shape[0] != self.size: raise ValueError('Wrong data size') if (fid in self.features) & (override == False): return self.features.update({fid: data}) def get_feature(self, fid): return self.features[fid]
BSD 3-Clause New or Revised License
alexa/alexa-apis-for-python
ask-smapi-model/ask_smapi_model/v1/skill/interaction_model/catalog/catalog_entity.py
CatalogEntity.__eq__
python
def __eq__(self, other): if not isinstance(other, CatalogEntity): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/alexa/alexa-apis-for-python/blob/bfe5e694daaca71bfb1a4199ca8d2514f1cac6c9/ask-smapi-model/ask_smapi_model/v1/skill/interaction_model/catalog/catalog_entity.py#L104-L110
import pprint import re import six import typing from enum import Enum if typing.TYPE_CHECKING: from typing import Dict, List, Optional, Union, Any from datetime import datetime class CatalogEntity(object): deserialized_types = { 'name': 'str', 'description': 'str' } attribute_map = { 'name': 'name', 'description': 'description' } supports_multiple_types = False def __init__(self, name=None, description=None): self.__discriminator_value = None self.name = name self.description = description def to_dict(self): result = {} for attr, _ in six.iteritems(self.deserialized_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x.value if isinstance(x, Enum) else x, value )) elif isinstance(value, Enum): result[attr] = value.value elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else (item[0], item[1].value) if isinstance(item[1], Enum) else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
Apache License 2.0
tektoncd/experimental
sdk/python/tekton_pipeline/models/v1beta1_embedded_task.py
V1beta1EmbeddedTask.kind
python
def kind(self, kind): self._kind = kind
Sets the kind of this V1beta1EmbeddedTask. :param kind: The kind of this V1beta1EmbeddedTask. # noqa: E501 :type: str
https://github.com/tektoncd/experimental/blob/0ba4e7a2b9d45ed4accaecbb34dac006d665796a/sdk/python/tekton_pipeline/models/v1beta1_embedded_task.py#L184-L192
import pprint import re import six from tekton_pipeline.configuration import Configuration class V1beta1EmbeddedTask(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'description': 'str', 'kind': 'str', 'metadata': 'V1beta1PipelineTaskMetadata', 'params': 'list[V1beta1ParamSpec]', 'resources': 'V1beta1TaskResources', 'results': 'list[V1beta1TaskResult]', 'sidecars': 'list[V1beta1Sidecar]', 'spec': 'K8sIoApimachineryPkgRuntimeRawExtension', 'step_template': 'V1Container', 'steps': 'list[V1beta1Step]', 'volumes': 'list[V1Volume]', 'workspaces': 'list[V1beta1WorkspaceDeclaration]' } attribute_map = { 'api_version': 'apiVersion', 'description': 'description', 'kind': 'kind', 'metadata': 'metadata', 'params': 'params', 'resources': 'resources', 'results': 'results', 'sidecars': 'sidecars', 'spec': 'spec', 'step_template': 'stepTemplate', 'steps': 'steps', 'volumes': 'volumes', 'workspaces': 'workspaces' } def __init__(self, api_version=None, description=None, kind=None, metadata=None, params=None, resources=None, results=None, sidecars=None, spec=None, step_template=None, steps=None, volumes=None, workspaces=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._description = None self._kind = None self._metadata = None self._params = None self._resources = None self._results = None self._sidecars = None self._spec = None self._step_template = None self._steps = None self._volumes = None self._workspaces = None self.discriminator = None if api_version is not None: self.api_version = api_version if description is not None: self.description = description if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if params is not None: self.params = params if resources is not None: self.resources = resources if results is not None: self.results = results if sidecars is not None: self.sidecars = sidecars if spec is not None: self.spec = spec if step_template is not None: self.step_template = step_template if steps is not None: self.steps = steps if volumes is not None: self.volumes = volumes if workspaces is not None: self.workspaces = workspaces @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def description(self): return self._description @description.setter def description(self, description): self._description = description @property def kind(self): return self._kind @kind.setter
Apache License 2.0
flask-admin/flask-admin
flask_admin/contrib/fileadmin/__init__.py
BaseFileAdmin._get_breadcrumbs
python
def _get_breadcrumbs(self, path): accumulator = [] breadcrumbs = [] for n in path.split(self._separator): accumulator.append(n) breadcrumbs.append((n, self._separator.join(accumulator))) return breadcrumbs
Returns a list of tuples with each tuple containing the folder and the tree up to that folder when traversing down the `path`
https://github.com/flask-admin/flask-admin/blob/e39f786374ce0e60db93f583efacb4672de0025c/flask_admin/contrib/fileadmin/__init__.py#L784-L794
import warnings from datetime import datetime import os import os.path as op import platform import re import shutil from operator import itemgetter from flask import flash, redirect, abort, request, send_file from werkzeug.utils import secure_filename from wtforms import fields, validators from flask_admin import form, helpers from flask_admin._compat import urljoin, as_unicode, quote from flask_admin.base import BaseView, expose from flask_admin.actions import action, ActionsMixin from flask_admin.babel import gettext, lazy_gettext class LocalFileStorage(object): def __init__(self, base_path): self.base_path = as_unicode(base_path) self.separator = os.sep if not self.path_exists(self.base_path): raise IOError('FileAdmin path "%s" does not exist or is not accessible' % self.base_path) def get_base_path(self): return op.normpath(self.base_path) def make_dir(self, path, directory): os.mkdir(op.join(path, directory)) def get_files(self, path, directory): items = [] for f in os.listdir(directory): fp = op.join(directory, f) rel_path = op.join(path, f) is_dir = self.is_dir(fp) size = op.getsize(fp) last_modified = op.getmtime(fp) items.append((f, rel_path, is_dir, size, last_modified)) return items def delete_tree(self, directory): shutil.rmtree(directory) def delete_file(self, file_path): os.remove(file_path) def path_exists(self, path): return op.exists(path) def rename_path(self, src, dst): os.rename(src, dst) def is_dir(self, path): return op.isdir(path) def send_file(self, file_path): return send_file(file_path) def read_file(self, path): with open(path, 'rb') as f: return f.read() def write_file(self, path, content): with open(path, 'w') as f: return f.write(content) def save_file(self, path, file_data): file_data.save(path) class BaseFileAdmin(BaseView, ActionsMixin): can_upload = True can_download = True can_delete = True can_delete_dirs = True can_mkdir = True can_rename = True allowed_extensions = None editable_extensions = tuple() list_template = 'admin/file/list.html' upload_template = 'admin/file/form.html' upload_modal_template = 'admin/file/modals/form.html' mkdir_template = 'admin/file/form.html' mkdir_modal_template = 'admin/file/modals/form.html' rename_template = 'admin/file/form.html' rename_modal_template = 'admin/file/modals/form.html' edit_template = 'admin/file/form.html' edit_modal_template = 'admin/file/modals/form.html' form_base_class = form.BaseForm rename_modal = False upload_modal = False mkdir_modal = False edit_modal = False possible_columns = 'name', 'rel_path', 'is_dir', 'size', 'date' column_list = 'name', 'size', 'date' column_sortable_list = column_list default_sort_column = None default_desc = 0 column_labels = dict((column, column.capitalize()) for column in column_list) date_format = '%Y-%m-%d %H:%M:%S' def __init__(self, base_url=None, name=None, category=None, endpoint=None, url=None, verify_path=True, menu_class_name=None, menu_icon_type=None, menu_icon_value=None, storage=None): self.base_url = base_url self.storage = storage self.init_actions() self._on_windows = platform.system() == 'Windows' if (self.allowed_extensions and not isinstance(self.allowed_extensions, set)): self.allowed_extensions = set(self.allowed_extensions) if (self.editable_extensions and not isinstance(self.editable_extensions, set)): self.editable_extensions = set(self.editable_extensions) super(BaseFileAdmin, self).__init__(name, category, endpoint, url, menu_class_name=menu_class_name, menu_icon_type=menu_icon_type, menu_icon_value=menu_icon_value) def is_accessible_path(self, path): return True def get_base_path(self): return self.storage.get_base_path() def get_base_url(self): return self.base_url def get_upload_form(self): class UploadForm(self.form_base_class): upload = fields.FileField(lazy_gettext('File to upload')) def __init__(self, *args, **kwargs): super(UploadForm, self).__init__(*args, **kwargs) self.admin = kwargs['admin'] def validate_upload(self, field): if not self.upload.data: raise validators.ValidationError(gettext('File required.')) filename = self.upload.data.filename if not self.admin.is_file_allowed(filename): raise validators.ValidationError(gettext('Invalid file type.')) return UploadForm def get_edit_form(self): class EditForm(self.form_base_class): content = fields.TextAreaField(lazy_gettext('Content'), (validators.required(),)) return EditForm def get_name_form(self): def validate_name(self, field): regexp = re.compile(r'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$') if not regexp.match(field.data): raise validators.ValidationError(gettext('Invalid name')) class NameForm(self.form_base_class): name = fields.StringField(lazy_gettext('Name'), validators=[validators.Required(), validate_name]) path = fields.HiddenField() return NameForm def get_delete_form(self): class DeleteForm(self.form_base_class): path = fields.HiddenField(validators=[validators.Required()]) return DeleteForm def get_action_form(self): class ActionForm(self.form_base_class): action = fields.HiddenField() url = fields.HiddenField() return ActionForm def upload_form(self): upload_form_class = self.get_upload_form() if request.form: formdata = request.form.copy() formdata.update(request.files) return upload_form_class(formdata, admin=self) elif request.files: return upload_form_class(request.files, admin=self) else: return upload_form_class(admin=self) def name_form(self): name_form_class = self.get_name_form() if request.form: return name_form_class(request.form) elif request.args: return name_form_class(request.args) else: return name_form_class() def edit_form(self): edit_form_class = self.get_edit_form() if request.form: return edit_form_class(request.form) else: return edit_form_class() def delete_form(self): delete_form_class = self.get_delete_form() if request.form: return delete_form_class(request.form) else: return delete_form_class() def action_form(self): action_form_class = self.get_action_form() if request.form: return action_form_class(request.form) else: return action_form_class() def is_file_allowed(self, filename): ext = op.splitext(filename)[1].lower() if ext.startswith('.'): ext = ext[1:] if self.allowed_extensions and ext not in self.allowed_extensions: return False return True def is_file_editable(self, filename): ext = op.splitext(filename)[1].lower() if ext.startswith('.'): ext = ext[1:] if not self.editable_extensions or ext not in self.editable_extensions: return False return True def is_in_folder(self, base_path, directory): return op.normpath(directory).startswith(base_path) def save_file(self, path, file_data): self.storage.save_file(path, file_data) def validate_form(self, form): return helpers.validate_form_on_submit(form) def _get_dir_url(self, endpoint, path=None, **kwargs): if not path: return self.get_url(endpoint, **kwargs) else: if self._on_windows: path = path.replace('\\', '/') kwargs['path'] = path return self.get_url(endpoint, **kwargs) def _get_file_url(self, path, **kwargs): if self._on_windows: path = path.replace('\\', '/') if self.is_file_editable(path): route = '.edit' else: route = '.download' return self.get_url(route, path=path, **kwargs) def _normalize_path(self, path): base_path = self.get_base_path() if path is None: directory = base_path path = '' else: path = op.normpath(path) if base_path: directory = self._separator.join([base_path, path]) else: directory = path directory = op.normpath(directory) if not self.is_in_folder(base_path, directory): abort(404) if not self.storage.path_exists(directory): abort(404) return base_path, directory, path def is_action_allowed(self, name): if name == 'delete' and not self.can_delete: return False elif name == 'edit' and len(self.editable_extensions) == 0: return False return True def on_rename(self, full_path, dir_base, filename): pass def on_edit_file(self, full_path, path): pass def on_file_upload(self, directory, path, filename): pass def on_mkdir(self, parent_dir, dir_name): pass def before_directory_delete(self, full_path, dir_name): pass def before_file_delete(self, full_path, filename): pass def on_directory_delete(self, full_path, dir_name): pass def on_file_delete(self, full_path, filename): pass def is_column_visible(self, column): return column in self.column_list def is_column_sortable(self, column): return column in self.column_sortable_list def column_label(self, column): return self.column_labels[column] def timestamp_format(self, timestamp): return datetime.fromtimestamp(timestamp).strftime(self.date_format) def _save_form_files(self, directory, path, form): filename = self._separator.join([directory, secure_filename(form.upload.data.filename)]) if self.storage.path_exists(filename): secure_name = self._separator.join([path, secure_filename(form.upload.data.filename)]) raise Exception(gettext('File "%(name)s" already exists.', name=secure_name)) else: self.save_file(filename, form.upload.data) self.on_file_upload(directory, path, filename) @property def _separator(self): return self.storage.separator
BSD 3-Clause New or Revised License
qecsim/qecsim
src/qecsim/cli.py
run_ftp
python
def run_ftp(code, time_steps, error_model, decoder, error_probabilities, max_failures, max_runs, measurement_error_probability, output, random_seed): code.validate() logger.info('RUN STARTING: code={}, time_steps={}, error_model={}, decoder={}, error_probabilities={}, ' 'max_failures={}, max_runs={}, measurement_error_probability={}, random_seed={}.' .format(code, time_steps, error_model, decoder, error_probabilities, max_failures, max_runs, measurement_error_probability, random_seed)) data = [] for error_probability in error_probabilities: runs_data = app.run_ftp(code, time_steps, error_model, decoder, error_probability, measurement_error_probability=measurement_error_probability, max_runs=max_runs, max_failures=max_failures, random_seed=random_seed) data.append(runs_data) logger.info('RUN COMPLETE: data={}'.format(data)) _write_data(output, data)
Simulate fault-tolerant (time-periodic) quantum error correction. Arguments: \b CODE Stabilizer code in format name(<args>) #CODE_PARAMETERS# \b TIME_STEPS Number of time steps as INT >= 1 \b ERROR_MODEL Error model in format name(<args>) #ERROR_MODEL_PARAMETERS# \b DECODER Decoder in format name(<args>) #DECODER_PARAMETERS# \b ERROR_PROBABILITY... One or more probabilities as FLOAT in [0.0, 1.0] Examples: qecsim run-ftp -r5 "rotated_planar(13,13)" 13 "generic.bit_phase_flip" "rotated_planar.smwpm" 0.1 0.2 qecsim run-ftp -r5 -m0.05 "rotated_toric(6,6)" 4 "generic.bit_phase_flip" "rotated_toric.smwpm" 0.1 qecsim run-ftp -r5 -o"data.json" "rotated_planar(7,7)" 7 "generic.depolarizing" "rotated_planar.smwpm" 0.1
https://github.com/qecsim/qecsim/blob/24d6b8a320b292461b66b68fe4fba40c9ddc2257/src/qecsim/cli.py#L286-L338
import ast import inspect import json import logging import re import click import pkg_resources import qecsim from qecsim import app from qecsim import util from qecsim.model import ATTR_CLI_DESCRIPTION logger = logging.getLogger(__name__) class _ConstructorParamType(click.ParamType): name = 'constructor' def __init__(self, constructors): self._constructors = constructors def get_metavar(self, param): return '[{}]'.format('|'.join(sorted(self._constructors.keys()))) def get_missing_message(self, param): return '(choose from {})'.format(', '.join(sorted(self._constructors.keys()))) def convert(self, value, param, ctx): constructor_match = re.fullmatch(r''' # match 'toric(3,3)' as {'constructor_name': 'toric', 'constructor_args': '3,3'} (?P<constructor_name>[\w.]+) # capture constructor_name, e.g. 'toric' (?:\(\s* # skip opening parenthesis and leading whitespace (?P<constructor_args>.*?) # capture constructor_args, e.g. '3,3' ,?\s*\))? # skip trailing comma, trailing whitespace and closing parenthesis ''', value, re.VERBOSE) if constructor_match is None: self.fail('{} (format as name(<args>))'.format(value), param, ctx) constructor_name = constructor_match.group('constructor_name') if constructor_name in self._constructors.keys(): constructor = self._constructors[constructor_name] else: self.fail('{} (choose from {})'.format(value, ', '.join(sorted(self._constructors.keys()))), param, ctx) constructor_args = constructor_match.group('constructor_args') if constructor_args: try: arguments = ast.literal_eval(constructor_args + ',') except Exception as ex: self.fail('{} (failed to parse arguments "{}")'.format(value, ex), param, ctx) else: arguments = tuple() try: return constructor(*arguments) except Exception as ex: self.fail('{} (failed to construct "{}")'.format(value, ex), param, ctx) def __repr__(self): return '{}({!r})'.format(type(self).__name__, self._constructors) def _model_argument(model_type): def _decorator(func): entry_point_id = 'qecsim.cli.{}.{}s'.format(func.__name__, model_type) entry_points = sorted(pkg_resources.iter_entry_points(entry_point_id), key=lambda ep: ep.name) constructors = {ep.name: ep.load() for ep in entry_points} func = click.argument(model_type, type=_ConstructorParamType(constructors), metavar=model_type.upper())(func) model_definition_list = [(name, getattr(cls, ATTR_CLI_DESCRIPTION, '')) for name, cls in constructors.items()] formatter = click.HelpFormatter() formatter.indent() if model_definition_list: formatter.write_dl(model_definition_list) model_doc_placeholder = '#{}_PARAMETERS#'.format(model_type.upper()) func.__doc__ = inspect.getdoc(func).replace(model_doc_placeholder, formatter.getvalue()) return func return _decorator def _validate_error_probability(ctx, param, value): if not (0 <= value <= 1): raise click.BadParameter('{} is not in [0.0, 1.0]'.format(value), ctx, param) return value def _validate_error_probabilities(ctx, param, value): for v in value: _validate_error_probability(ctx, param, v) return value def _validate_measurement_error_probability(ctx, param, value): if not (value is None or (0 <= value <= 1)): raise click.BadParameter('{} is not in [0.0, 1.0]'.format(value), ctx, param) return value @click.group() @click.version_option(version=qecsim.__version__, prog_name='qecsim') def cli(): util.init_logging() @cli.command() @_model_argument('code') @_model_argument('error_model') @_model_argument('decoder') @click.argument('error_probabilities', required=True, nargs=-1, type=float, metavar='ERROR_PROBABILITY...', callback=_validate_error_probabilities) @click.option('--max-failures', '-f', type=click.IntRange(min=1), metavar='INT', help='Maximum number of failures for each probability.') @click.option('--max-runs', '-r', type=click.IntRange(min=1), metavar='INT', help='Maximum number of runs for each probability. [default: 1 if max-failures unspecified]') @click.option('--output', '-o', default='-', type=click.Path(allow_dash=True), metavar='FILENAME', help='Output file. (Writes to log if file exists).') @click.option('--random-seed', '-s', type=click.IntRange(min=0), metavar='INT', help='Random seed for qubit error generation. (Re-applied for each probability).') def run(code, error_model, decoder, error_probabilities, max_failures, max_runs, output, random_seed): code.validate() logger.info('RUN STARTING: code={}, error_model={}, decoder={}, error_probabilities={}, max_failures={}, ' 'max_runs={}, random_seed={}.' .format(code, error_model, decoder, error_probabilities, max_failures, max_runs, random_seed)) data = [] for error_probability in error_probabilities: runs_data = app.run(code, error_model, decoder, error_probability, max_runs=max_runs, max_failures=max_failures, random_seed=random_seed) data.append(runs_data) logger.info('RUN COMPLETE: data={}'.format(data)) _write_data(output, data) @cli.command() @_model_argument('code') @click.argument('time_steps', type=click.IntRange(min=1), metavar='TIME_STEPS') @_model_argument('error_model') @_model_argument('decoder') @click.argument('error_probabilities', required=True, nargs=-1, type=float, metavar='ERROR_PROBABILITY...', callback=_validate_error_probabilities) @click.option('--max-failures', '-f', type=click.IntRange(min=1), metavar='INT', help='Maximum number of failures for each probability.') @click.option('--max-runs', '-r', type=click.IntRange(min=1), metavar='INT', help='Maximum number of runs for each probability. [default: 1 if max_failures unspecified]') @click.option('--measurement-error-probability', '-m', type=float, default=None, callback=_validate_measurement_error_probability, help='Measurement error probability [default: 0.0 if TIME_STEPS == 1 else ERROR_PROBABILITY].') @click.option('--output', '-o', default='-', type=click.Path(allow_dash=True), metavar='FILENAME', help='Output file. (Writes to log if file exists).') @click.option('--random-seed', '-s', type=click.IntRange(min=0), metavar='INT', help='Random seed for qubit error generation. (Re-applied for each probability).')
BSD 3-Clause New or Revised License
quantipy/quantipy3
quantipy/sandbox/pptx/PptxPainterClass.py
PptxPainter.draft_autochart
python
def draft_autochart(self, dataframe, chart_type): valid_chart_types = ['pie', 'bar_clustered', 'bar_stacked_100', 'bar', 'column', 'column_clustered', 'line', ] if not isinstance(chart_type, str): raise ValueError('The chart_type argument must be a string') if chart_type not in valid_chart_types: error_msg ='Invalid chart_type {}. Valid chart types are {}' raise ValueError(error_msg.format(chart_type, valid_chart_types)) if chart_type == 'pie': draft = copy.deepcopy(self.chart_pie) elif chart_type == 'bar_clustered' or chart_type == 'bar': draft = copy.deepcopy(self.chart_bar) if len(dataframe.columns) > 1: draft['has_legend'] = True elif chart_type == 'column_clustered' or chart_type == 'column': draft = copy.deepcopy(self.chart_column) if len(dataframe.columns) > 1: draft['has_legend'] = True elif chart_type == 'line': draft = copy.deepcopy(self.chart_line) if len(dataframe.columns) > 1: draft['has_legend'] = True elif chart_type == 'bar_stacked_100': draft = copy.deepcopy(self.chart_bar_stacked100) else: draft = copy.deepcopy(self.chart_bar) draft['dataframe'] = dataframe return draft
Simplified caller for method draft_chart that wont require the settings dict, but will instead pick the default chart setting for the chart type requested Parameters ---------- dataframe: pandas.core.frame.DataFrame chart_type: str A string corresponding to the keys in Dict "chart_type_dct" from "enumerations.py" Returns: self.chart -------
https://github.com/quantipy/quantipy3/blob/8c64fde4130529990f0f9eada45a577d51a6537e/quantipy/sandbox/pptx/PptxPainterClass.py#L636-L685
import re from lxml import etree import warnings from pptx import Presentation from pptx.chart.data import CategoryChartData from pptx.util import ( Emu, Pt, Cm, Inches) try: from pptx import table except: from pptx.shapes import table from pptx.chart.data import ChartData from pptx.enum.chart import XL_CHART_TYPE from pptx.dml.color import RGBColor from .enumerations import ( fill_type_dct, data_label_pos_dct, legend_pos_dct, tick_label_pos_dct, tick_mark_pos_dct, vertical_alignment_pos_dct, paragraph_alignment_pos_dct, theme_color_index_dct, chart_type_dct ) from .PptxDefaultsClass import PptxDefaults from .PptxChainClass import float2String import pandas as pd import copy def chartdata_from_dataframe(df, number_format="0%", xl_number_format='0.00%'): def get_parent(sub_categories, line, pos): for subcat in sub_categories: if subcat.label == line[pos]: return subcat cd = CategoryChartData(number_format=number_format) if isinstance(df.index, pd.MultiIndex): cats = [] for line in df.index.unique().tolist(): for l, lvl in enumerate(line): if l == 0: if not any([lvl == cat.label for cat in cats]): cats.append(cd.add_category(lvl)) else: parent = get_parent(cats, line, 0) if l > 1: for i in range(1, l): parent = get_parent(parent.sub_categories, line, i) sub_categories = parent.sub_categories seen = [lvl == subcat.label for subcat in sub_categories] if not any(seen): parent.add_sub_category(lvl) else: categories = tuple(df.index.values.tolist()) cd.categories = categories for col in df.columns: values = [ value if value == value else None for value in df[col].values.tolist() ] series = (col, tuple(values)) cd.add_series(*series, number_format=xl_number_format) return cd def return_slide_layout_by_name(pptx, slide_layout_name): for slide_layout in pptx.slide_layouts: if slide_layout.name == slide_layout_name: return slide_layout else: raise Exception( 'Slide layout: {sld_layout} not found\n'.format( sld_layout=slide_layout_name)) def convertable(obj, func): try: func(obj) return True except ValueError: return False class PptxPainter(object): def __init__(self, path_to_presentation, slide_layout=None, shape_properties=None): self.presentation = Presentation(path_to_presentation) if slide_layout is None: self.default_slide_layout = None else: self.default_slide_layout = self.set_slide_layout(slide_layout) if shape_properties: self._shape_properties = shape_properties else: self._shape_properties = PptxDefaults() self.textbox = self._shape_properties.textbox self.textbox_header = self._shape_properties.textbox_header self.textbox_footer = self._shape_properties.textbox_footer self.chart = self._shape_properties.chart self.table = self._shape_properties.table self.side_table = self._shape_properties.side_table charts = self._shape_properties.charts self.chart_bar = charts['bar'] self.chart_bar_stacked100 = charts['bar_stacked100'] self.chart_line = charts['line'] self.chart_column = charts['column'] self.chart_pie = charts['pie'] self.slide_kwargs = { 'textboxs': {}, 'charts': {}, 'tables': {}, 'side_tables': {}, } @staticmethod def get_plot_values(plot): series = [ {series.name: [str(s) for s in series.values]} for series in plot.series ] return series def show_data_labels(self, plot, decimals=0): data_labels = plot.data_labels number_format = data_labels.number_format font = data_labels.font plot_values = self.get_plot_values(plot) for s, series in enumerate(plot_values): values = [ value for value in list(series.values())[0] if convertable(value, float) ] for v, value in enumerate(values): if value is not None: if number_format == '0%': value = round(float(value) * 100, decimals) str_value = float2String(value) + '%' else: str_value = str(value) else: str_value = "" point = plot.series[s].points[v] data_label = point.data_label frame = data_label.text_frame frame.text = str_value pgraph = frame.paragraphs[0] for run in pgraph.runs: run.font.bold = font.bold run.font.italic = font.italic run.font.name = font.name run.font.size = font.size run.font.underline = font.underline def edit_datalabel(self, plot, series, point, text, prepend=False, append=False, rgb=None): data_label = plot.series[series].points[point].data_label frame = data_label.text_frame run = frame.paragraphs[0].runs[0] original_text = frame.text if prepend: run.text = '{}{}'.format(text, original_text) elif append: run.text = '{}{}'.format(original_text, text) else: run.text = text if rgb is not None: run.font.color.rgb = RGBColor(*rgb) def queue_slide_items(self, pptx_chain, slide_items, decimal_separator='.', pct_decimals=0, decimals=2, ): valid_slide_items = ['chart','table','side_table'] slide_items = re.sub(' +', '', slide_items) draft = self.draft_textbox_header(pptx_chain.question_text) self.queue_textbox(settings=draft) draft = self.draft_textbox_footer(pptx_chain.base_text) self.queue_textbox(settings=draft) slide_items = slide_items.split('+') for slide_item in slide_items: if slide_item.startswith('table'): cell_items = slide_item.split(':')[1] pptx_frame = pptx_chain.chart_df.get(cell_items).to_table(pct_decimals=pct_decimals, decimals=decimals, decimal_separator=decimal_separator, ) if not pptx_frame().empty: table_draft = self.draft_table(pptx_frame()) self.queue_table(settings=table_draft) if slide_item.startswith('side_table'): cell_items = slide_item.split(':')[1] pptx_frame = pptx_chain.chart_df.get(cell_items).to_table(pct_decimals=pct_decimals, decimals=decimals, decimal_separator=decimal_separator, ) if not pptx_frame().empty: side_table_draft = self.draft_side_table(pptx_frame()) pct_index = [index for index, value in enumerate(pptx_frame.cell_items) if 'is_c_pct' in value] if pct_index: side_table_draft['values_suffix'] = '%' side_table_draft['values_suffix_columns'] = pct_index self.queue_side_table(settings=side_table_draft) if slide_item.startswith('chart'): sig_test = False cell_items = slide_item.split(':')[1] ''' Makes no sense to actually have 'test' as a cell_item. Will remove it from cell_items and set flag sig_test as True ''' cell_items = cell_items.split(',') if 'test' in cell_items: sig_test = True pptx_chain.add_test_letter_to_column_labels() pptx_chain.chart_df = pptx_chain.prepare_dataframe() cell_items.remove('test') cell_items = ','.join(cell_items) pptx_frame = pptx_chain.chart_df.get(cell_items) if not pptx_frame().empty: chart_draft = self.draft_autochart(pptx_frame(), pptx_chain.chart_type) if sig_test: chart_draft['sig_test_visible'] = True chart_draft['sig_test_results'] = pptx_chain.sig_test self.queue_chart(settings=chart_draft) self._check_shapes() return None def _check_shapes(self, adjust='chart'): table_max_left=12240000 table_width=0 for table, table_settings in self.slide_kwargs['side_tables'].items(): if table_settings['left'] < table_max_left: table_max_left = table_settings['left'] table_width = table_settings['width'] for chart, chart_settings in self.slide_kwargs['charts'].items(): if chart_settings['left'] + chart_settings['width'] > table_max_left: chart_settings['width'] -= table_width def clear_tables(self): self.clear_queue('tables') def clear_side_tables(self): self.clear_queue('side_tables') def clear_charts(self): self.clear_queue('charts') def clear_textboxes(self): self.clear_queue('textboxs') def clear_queue(self, key): if key=='all': for item in list(self.slide_kwargs.keys()): self.slide_kwargs[item].clear() elif key=='charts': self.slide_kwargs['charts'].clear() elif key=='textboxes': self.slide_kwargs['textboxs'].clear() elif key=='tables': self.slide_kwargs['tables'].clear() elif key=='side_tables': self.slide_kwargs['side_tables'].clear() def set_slide_layout(self, slide_layout): if isinstance(slide_layout, int): return self.presentation.slide_layouts[slide_layout] else: return return_slide_layout_by_name(self.presentation, slide_layout) def add_slide(self, slide_layout=None): if slide_layout is None: if self.default_slide_layout is None: raise ValueError('No slide layout found! Specify a slide layout or set a default slide layout') else: slide_layout = self.default_slide_layout else: slide_layout = self.set_slide_layout(slide_layout=slide_layout) return self.presentation.slides.add_slide(slide_layout) def draft_textbox(self, settings=None, text=None): if settings: draft = copy.deepcopy(settings) else: draft = copy.deepcopy(self.textbox) draft['text'] = text return draft def draft_textbox_header(self, text=None): draft = copy.deepcopy(self.textbox_header) draft['text'] = text return draft def draft_textbox_footer(self, text=None): draft = copy.deepcopy(self.textbox_footer) draft['text'] = text return draft def draft_chart(self, dataframe, settings=None): if settings: draft = copy.deepcopy(settings) else: draft = copy.deepcopy(self.chart) draft['dataframe'] = dataframe return draft
MIT License
ekimekim/factoriocalc
factoriocalc/calculator.py
Process.rescale
python
def rescale(self, new_throughput): return type(self)(self.item, self.recipe, new_throughput, self.per_process_outputs)
Return a new Process with a modified throughput
https://github.com/ekimekim/factoriocalc/blob/18583ee0ea16a12c061b272db68469edee86606d/factoriocalc/calculator.py#L48-L50
from fractions import Fraction from .util import line_limit, is_liquid class Process(object): def __init__(self, item, recipe, throughput, outputs=None): self.item = item self.recipe = recipe self.throughput = throughput if outputs: self.per_process_outputs = outputs elif self.recipe and self.recipe.is_virtual: self.per_process_outputs = {} else: self.per_process_outputs = {item: 1} @property def is_input(self): return self.recipe is None def buildings(self): return None if self.is_input else self.throughput / self.recipe.throughput def inputs(self): return {} if self.is_input else { k: v * self.throughput for k, v in self.recipe.inputs.items() } def outputs(self): return {k: v * self.throughput for k, v in self.per_process_outputs.items()}
MIT License
tomorrowisanotherday/tensorgo
tensorgo/benchmark/cifar10/cifar10.py
_add_loss_summaries
python
def _add_loss_summaries(total_loss): loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) for l in losses + [total_loss]: tf.summary.scalar(l.op.name + ' (raw)', l) tf.summary.scalar(l.op.name, loss_averages.average(l)) return loss_averages_op
Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses.
https://github.com/tomorrowisanotherday/tensorgo/blob/ff5a3c082bd584d6987db2b7a0cdc1e90cb95437/tensorgo/benchmark/cifar10/cifar10.py#L298-L322
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re import sys import tarfile from six.moves import urllib import tensorflow as tf import cifar10_input FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_integer('batch_size', 128, """Number of images to process in a batch.""") tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data', """Path to the CIFAR-10 data directory.""") tf.app.flags.DEFINE_boolean('use_fp16', False, """Train the model using fp16.""") IMAGE_SIZE = cifar10_input.IMAGE_SIZE NUM_CLASSES = cifar10_input.NUM_CLASSES NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL MOVING_AVERAGE_DECAY = 0.9999 NUM_EPOCHS_PER_DECAY = 350.0 LEARNING_RATE_DECAY_FACTOR = 0.1 INITIAL_LEARNING_RATE = 0.1 TOWER_NAME = 'tower' DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz' def _activation_summary(x): tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.summary.histogram(tensor_name + '/activations', x) tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) def _variable_on_cpu(name, shape, initializer): with tf.device('/cpu:0'): dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var def _variable_with_weight_decay(name, shape, stddev, wd): dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = _variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var def distorted_inputs(): if not FLAGS.data_dir: raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') images, labels = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return images, labels def inputs(eval_data): if not FLAGS.data_dir: raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') images, labels = cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return images, labels def inference(images): with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0) conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) pre_activation = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv1) pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0) conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1)) pre_activation = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv2) norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') with tf.variable_scope('local3') as scope: reshape = tf.reshape(pool2, [FLAGS.batch_size, -1]) dim = reshape.get_shape()[1].value weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) _activation_summary(local3) with tf.variable_scope('local4') as scope: weights = _variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name) _activation_summary(local4) with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=1/192.0, wd=0.0) biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0)) softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name) _activation_summary(softmax_linear) return softmax_linear def loss(logits, labels): labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) return tf.add_n(tf.get_collection('losses'), name='total_loss')
MIT License
thingsboard/python_tb_rest_client
tb_rest_client/models/models_pe/sign_up_request.py
SignUpRequest.last_name
python
def last_name(self): return self._last_name
Gets the last_name of this SignUpRequest. # noqa: E501 :return: The last_name of this SignUpRequest. # noqa: E501 :rtype: str
https://github.com/thingsboard/python_tb_rest_client/blob/87c6a3703974fc8a86e4c72c444168ee2b758ecb/tb_rest_client/models/models_pe/sign_up_request.py#L139-L146
import pprint import re import six class SignUpRequest(object): swagger_types = { 'app_secret': 'str', 'email': 'str', 'first_name': 'str', 'last_name': 'str', 'password': 'str', 'pkg_name': 'str', 'recaptcha_response': 'str' } attribute_map = { 'app_secret': 'appSecret', 'email': 'email', 'first_name': 'firstName', 'last_name': 'lastName', 'password': 'password', 'pkg_name': 'pkgName', 'recaptcha_response': 'recaptchaResponse' } def __init__(self, app_secret=None, email=None, first_name=None, last_name=None, password=None, pkg_name=None, recaptcha_response=None): self._app_secret = None self._email = None self._first_name = None self._last_name = None self._password = None self._pkg_name = None self._recaptcha_response = None self.discriminator = None if app_secret is not None: self.app_secret = app_secret if email is not None: self.email = email if first_name is not None: self.first_name = first_name if last_name is not None: self.last_name = last_name if password is not None: self.password = password if pkg_name is not None: self.pkg_name = pkg_name if recaptcha_response is not None: self.recaptcha_response = recaptcha_response @property def app_secret(self): return self._app_secret @app_secret.setter def app_secret(self, app_secret): self._app_secret = app_secret @property def email(self): return self._email @email.setter def email(self, email): self._email = email @property def first_name(self): return self._first_name @first_name.setter def first_name(self, first_name): self._first_name = first_name @property
Apache License 2.0
charlesshang/detectron-pytorch
libs/nets/model.py
detection_model.get_summaries
python
def get_summaries(self, is_training=True): summaries = [] for key, var in self._score_summaries.items(): summaries.append(self._add_scalar_summary(key, var)) self._score_summaries = {} self._hist_summaries = {} if is_training: for k, var in dict(self.named_parameters()).items(): if var.requires_grad: summaries.append(self._add_scalar_summary('Params/' + k, var)) summaries.append(self._add_scalar_summary('Grads/' + k, var.grad)) return summaries
Run the summary operator: feed the placeholders with corresponding newtork outputs(activations)
https://github.com/charlesshang/detectron-pytorch/blob/468a8050330db4de1e22509c8b741ad236a55d88/libs/nets/model.py#L534-L554
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import tensorboardX as tbx import libs.configs.config as cfg from .focal_loss import FocalLoss from .smooth_l1_loss import smooth_l1_loss from libs.layers.box import decoding_box, apply_nms from libs.nets.utils import everything2numpy, everything2cuda class detection_model(nn.Module): def __init__(self, backbone, num_classes, num_anchors, is_training=True, maxpool5=True): super(detection_model, self).__init__() self.backbone = backbone self.num_classes = num_classes self.num_anchors = num_anchors self.is_training = is_training self.rpn_activation = cfg.class_activation self.rpn_outs = [] self.loss_dict = [] self.with_segment = cfg.with_segment self._score_summaries = {} self._hist_summaries = {} self.global_step = 0 self.anchors = None self.maxpool5 = maxpool5 if is_training: self.rpn_cls_loss_func = FocalLoss(gamma=2, alpha=0.25, activation=self.rpn_activation) if cfg.use_focal_loss else nn.CrossEntropyLoss() def forward(self, input, gt_boxes_list, anchors_np): pass def _objectness(self, probs, activation=None): activation = self.rpn_activation if activation is None else activation if activation == 'softmax': return 1. - probs[:, 0] elif activation == 'sigmoid': return probs.max(dim=1)[0] else: raise ValueError('Unknown activation funtion %s' % self.activation) def _rerange(self, rpn_outs, last_dimension=None): last_dimension = self.num_classes if last_dimension is None else last_dimension n = rpn_outs[0][0].size()[0] c = rpn_outs[0][0].size()[1] cb = rpn_outs[0][1].size()[1] rpn_logit = [rpn[0].view(n, c, -1) for rpn in rpn_outs] rpn_box = [rpn[1].view(n, cb, -1) for rpn in rpn_outs] rpn_logit = torch.cat(rpn_logit, dim=2) rpn_box = torch.cat(rpn_box, dim=2) rpn_logit = rpn_logit.permute(0, 2, 1).contiguous().view(-1, last_dimension) num_endpoints = rpn_logit.size()[0] rpn_box = rpn_box.permute(0, 2, 1).contiguous().view(num_endpoints, -1) return rpn_logit, rpn_box def _stage_one_results(self, rpn_box, rpn_prob, anchors, top_n=2000, overlap_threshold=0.7, top_n_post_nms=None): boxes, probs, img_ids, anchors = self._decode_and_choose_top_n_stage1(rpn_box, rpn_prob, anchors, top_n=top_n) boxes, probs, img_ids, anchors = self._apply_nms_in_batch(boxes, probs, img_ids, anchors, activation=self.rpn_activation, overlap_threshold=overlap_threshold) if top_n_post_nms is not None: return boxes[:top_n_post_nms], probs[:top_n_post_nms], img_ids[:top_n_post_nms] return boxes, probs, img_ids def _thresholding(self, boxes, probs, batch_ids, score_threshold=0.1): objness = self._objectness(probs) inds = objness.data.ge(score_threshold).nonzero().view(-1) if inds.numel() == 0: _, inds = objness.sort(dim=0, descending=True) inds = inds[:10] boxes = boxes[inds] probs = probs[inds] batch_ids = batch_ids[inds] return boxes, probs, batch_ids def build_losses_rpn(self, rpn_logits, rpn_box, rpn_prob, rpn_labels, rpn_bboxes, rpn_bbwghts): rpn_labels = rpn_labels.view(-1).long() assert rpn_logits.size()[0] == rpn_box.size()[0] == rpn_labels.size()[0], 'Dimension dont match %d vs %d vs %d' % (rpn_logits.size()[0], rpn_box.size()[0], rpn_labels.size()[0]) if cfg.use_focal_loss: rpn_logits, rpn_labels = self._sample_valid(rpn_logits, rpn_labels) else: rpn_logits, rpn_labels = self._sample_OHEM(rpn_logits, rpn_labels, rpn_prob, rpn_box, bg_fg_ratio=3) rpn_cls_loss = self.rpn_cls_loss_func(rpn_logits, rpn_labels) rpn_bbwghts = rpn_bbwghts.view(-1, 4) rpn_bboxes = rpn_bboxes.view(-1, 4) bb_nums = torch.sum(rpn_bbwghts.data.gt(0).float()) bbwght_outside = (rpn_bbwghts > 0.0001).float() / max(bb_nums, 1.0) rpn_box_loss = smooth_l1_loss(rpn_box, rpn_bboxes, rpn_bbwghts, bbwght_outside, sigma=1.0) return rpn_cls_loss, rpn_box_loss def build_losses_rpn_faster_rcnn(self, rpn_logits, rpn_box, rpn_prob, rpn_labels, rpn_bboxes, rpn_bbwghts): rpn_labels = rpn_labels.view(-1).long() assert rpn_logits.size()[0] == rpn_box.size()[0] == rpn_labels.size()[0], 'Dimension dont match %d vs %d vs %d' % (rpn_logits.size()[0], rpn_box.size()[0], rpn_labels.size()[0]) rpn_logits, rpn_labels, all_rpn_labels = self._sample_faster_rcnn(rpn_logits, rpn_labels, rpn_prob, rpn_box, rpn_batch_size=256, rpn_fg_fraction=0.5) rpn_cls_loss = F.cross_entropy(rpn_logits, rpn_labels, ignore_index=-1) rpn_bbwghts = rpn_bbwghts.view(-1, 4) rpn_bboxes = rpn_bboxes.view(-1, 4) bb_nums = all_rpn_labels.eq(1).sum().item() bbwght_outside = all_rpn_labels.eq(1).float() / max(bb_nums * 4, 4.0) bbwght_outside = bbwght_outside.view(-1, 1) rpn_box_loss = smooth_l1_loss(rpn_box, rpn_bboxes, rpn_bbwghts, bbwght_outside, sigma=1.0) return rpn_cls_loss, rpn_box_loss def build_losses(self, outputs, targets): pass def loss(self): pass def cls_loss(self): return self.loss_dict['rpn_cls_loss'] def box_loss(self): return self.loss_dict['rpn_box_loss'] def _gather_fg(self, labels, boxes, logits): fg_inds = labels.data.ge(1).nonzero().view(-1) if fg_inds.numel() > 0: return labels[fg_inds], boxes[fg_inds], logits[fg_inds], fg_inds else: return None, None, None, fg_inds def _gather_bg(self, labels, probs, logits, top_n=2000): bg_inds = labels.data.eq(0).nonzero().view(-1) probs = probs[bg_inds] logits = logits[bg_inds] objness = self._objectness(probs) _, inds = objness.sort(dim=0, descending=True) top_n = min(top_n, inds.size(0)) inds = inds[:top_n] return probs[inds], logits[inds], bg_inds[inds.data] def _sample_OHEM(self, rpn_logits, rpn_label, rpn_prob, rpn_boxes, bg_fg_ratio=3): rpn_prob.detach() fg_labels, fg_boxes, fg_logits, fg_inds = self._gather_fg(rpn_label, rpn_boxes, rpn_logits) fg_num = fg_inds.numel() top_n = max(fg_num * bg_fg_ratio, 16) bg_probs, bg_logits, bg_inds = self._gather_bg(rpn_label, rpn_prob, rpn_logits, top_n=top_n) bg_num = bg_inds.numel() bg_objness = self._objectness(bg_probs) if fg_inds is not None: chosen_inds = torch.cat((fg_inds, bg_inds), dim=0) else: chosen_inds = bg_inds labels = rpn_label[chosen_inds] if self.global_step % cfg.log_image == 0 and fg_num > 1: c = rpn_logits.size(1) sampled_fg_losses = 0.5 * torch.abs(self._to_one_hot(fg_labels, c) - rpn_prob[fg_inds]).sum(dim=1) self._score_summaries['Sample/PosLoss'] = sampled_fg_losses self._score_summaries['Sample/PosLossMax'] = sampled_fg_losses.max() bg_probs_all, _, _ = self._gather_bg(rpn_label, rpn_prob, rpn_logits, top_n=float('inf')) bg_objness_all = 1. - bg_probs_all[:, 0] self._score_summaries['Sample/NegLoss'] = bg_objness_all self._score_summaries['Sample/NegLoss_SampledMax'] = bg_objness.max() self._score_summaries['Sample/NegLoss_Sampled'] = bg_objness self._score_summaries['Sample/FG_nums'] = fg_num self._score_summaries['Sample/BG_nums'] = bg_num self.global_step += 1 logits = rpn_logits[chosen_inds] return logits.contiguous(), labels.contiguous() def _sample_faster_rcnn_OHEM(self, rpn_logits, rpn_label, rpn_prob, rpn_boxes, rpn_batch_size=256, rpn_fg_fraction=0.5): rpn_prob.detach() fg_inds = rpn_label.data.ge(1).nonzero().view(-1) fg_num = fg_inds.numel() fg_num_ = min(int(rpn_batch_size * rpn_fg_fraction), fg_num) if fg_num_ > 0: inds = torch.randperm(fg_num)[:fg_num_] fg_inds = fg_inds[inds] bg_inds = rpn_label.data.eq(0).nonzero().view(-1) bg_num = bg_inds.numel() bg_num_ = min(rpn_batch_size - fg_num_, bg_num) bg_probs, bg_logits, bg_inds = self._gather_bg(rpn_label, rpn_prob, rpn_logits, top_n=bg_num_) chosen_inds = torch.cat((fg_inds, bg_inds), dim=0) labels = rpn_label[chosen_inds] logits = rpn_logits[chosen_inds] all_labels = torch.zeros_like(rpn_label) - 1 all_labels[fg_inds] = 1 all_labels[bg_inds] = 0 if self.global_step % cfg.log_image == 0 and fg_num > 1: self._score_summaries['Sample/FG_nums_total'] = fg_num self._score_summaries['Sample/BG_nums_total'] = bg_num self._score_summaries['Sample/FG_nums_train'] = fg_num_ self._score_summaries['Sample/BG_nums_train'] = bg_num_ self.global_step += 1 return logits.contiguous(), labels.contiguous(), all_labels def _sample_faster_rcnn(self, rpn_logits, rpn_label, rpn_prob, rpn_boxes, rpn_batch_size=256, rpn_fg_fraction=0.5): rpn_prob.detach() fg_inds = rpn_label.data.ge(1).nonzero().view(-1) fg_num = fg_inds.numel() fg_num_ = min(int(rpn_batch_size * rpn_fg_fraction), fg_num) if fg_num_ > 0: inds = torch.randperm(fg_num)[:fg_num_] fg_inds = fg_inds[inds] bg_inds = rpn_label.data.eq(0).nonzero().view(-1) bg_num = bg_inds.numel() bg_num_ = min(rpn_batch_size - fg_num_, bg_num) if bg_num_ > 0: inds = torch.randperm(bg_num)[:bg_num_] bg_inds = bg_inds[inds] chosen_inds = torch.cat((fg_inds, bg_inds), dim=0) labels = rpn_label[chosen_inds] logits = rpn_logits[chosen_inds] all_labels = torch.zeros_like(rpn_label) - 1 all_labels[fg_inds] = 1 all_labels[bg_inds] = 0 if self.global_step % cfg.log_image == 0 and fg_num > 1: self._score_summaries['Sample/FG_nums_total'] = fg_num self._score_summaries['Sample/BG_nums_total'] = bg_num self._score_summaries['Sample/FG_nums_train'] = fg_num_ self._score_summaries['Sample/BG_nums_train'] = bg_num_ self.global_step += 1 return logits.contiguous(), labels.contiguous(), all_labels def _sample_valid(self, rpn_logits, rpn_labels): valid_inds = rpn_labels.data.ge(0).nonzero().view(-1) logits, labels = rpn_logits[valid_inds], rpn_labels[valid_inds] return logits.contiguous(), labels.contiguous() def _decode_and_choose_top_n_stage1(self, rpn_box, rpn_prob, anchors, top_n=1000): objness = self._objectness(rpn_prob) _, inds = objness.sort(dim=0, descending=True) inds = inds[:top_n] selected_boxes = rpn_box[inds] selected_probs = rpn_prob[inds] anchor_ids = inds % anchors.size(0) selected_anchors = anchors[anchor_ids] selected_boxes = decoding_box(selected_boxes, selected_anchors, box_encoding=cfg.rpn_box_encoding) selected_img_ids = inds / anchors.size(0) return selected_boxes, selected_probs, selected_img_ids, selected_anchors def _decoding_and_thresholding_stage1(self, rpn_box, rpn_prob, anchors, score_threshold=0.3, max_dets=100): selected_boxes, selected_probs, selected_img_ids, selected_anchors = self._decode_and_choose_top_n_stage1(rpn_box, rpn_prob, anchors, top_n=max_dets * 3) objness = self._objectness(selected_probs) inds = objness.data.ge(score_threshold).nonzero().view(-1) if inds.numel() == 0: _, inds = objness.sort(dim=0, descending=True) inds = inds[:1] selected_boxes = selected_boxes[inds] selected_probs = selected_probs[inds] selected_img_ids = selected_img_ids[inds] selected_anchors = selected_anchors[inds] return selected_boxes, selected_probs, selected_img_ids, selected_anchors @staticmethod def _apply_nms_in_batch(boxes, probs, img_ids, anchors, activation, overlap_threshold=0.5): objness = probs.max(dim=1)[0] if activation == 'sigmoid' else 1. - probs[:, 0] nmax = img_ids.max().cpu().data.numpy() nmin = img_ids.min().cpu().data.numpy() all_keeps = [] for i in range(nmin, nmax + 1): inds = img_ids.data.eq(i).nonzero().view(-1) if inds.numel() > 0: keeps = apply_nms(boxes[inds][:, :4], objness[inds], overlap_threshold=overlap_threshold) all_keeps.append(inds[keeps]) all_keeps = torch.cat(all_keeps, dim=0) if len(all_keeps) > 1 else all_keeps[0] return boxes[all_keeps], probs[all_keeps], img_ids[all_keeps], anchors[all_keeps] @staticmethod def to_Dets(boxes, probs, img_ids): boxes, probs, img_ids = everything2numpy([boxes, probs, img_ids]) Dets = [] for i in range(0, cfg.batch_size): inds = np.where(img_ids == i)[0] probs_ = probs[inds] boxes_ = boxes[inds] if probs_.shape[1] == 2: cls_ids = np.ones((probs_.shape[0], ), dtype=np.int32) cls_probs = probs_[:, 1] else: cls_ids = probs_[:, 1:].argmax(axis=1) + 1 cls_probs = probs_[np.arange(probs_.shape[0]), cls_ids] dets = np.concatenate((boxes_.reshape(-1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]), axis=1) Dets.append(dets) return Dets @staticmethod def to_Dets_sigmoid(boxes, probs, img_ids): boxes, probs, img_ids = everything2numpy([boxes, probs, img_ids]) Dets = [] for i in range(0, cfg.batch_size): inds = np.where(img_ids == i)[0] probs_ = probs[inds] boxes_ = boxes[inds] if probs_.ndim == 1 or probs_.shape[1] == 1: cls_ids = np.ones((probs_.shape[0], ), dtype=np.int32) cls_probs = probs_.view(-1) else: cls_ids = probs_.argmax(axis=1) + 1 cls_probs = probs_.max(axis=1) dets = np.concatenate((boxes_.reshape(-1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]), axis=1) Dets.append(dets) return Dets @staticmethod def to_Dets2(boxes, probs, img_ids, score_threshold=0.1): boxes, probs, img_ids = everything2numpy([boxes, probs, img_ids]) Dets = [] for i in range(0, cfg.batch_size): inds = np.where(img_ids == i)[0] probs_ = probs[inds] boxes_ = boxes[inds] if probs_.shape[1] == 2: cls_ids = np.ones((probs_.shape[0], ), dtype=np.int32) cls_probs = probs_[:, 1] dets = np.concatenate((boxes_.reshape(-1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]), axis=1) else: d0_inds, d1_inds = np.where(probs_[:, 1:] > score_threshold) if d0_inds.size > 0: cls_ids = d1_inds + 1 cls_probs = probs_[d0_inds, cls_ids] boxes_ = boxes_[d0_inds, :] dets = np.concatenate((boxes_.reshape(-1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]), axis=1) else: cls_ids = probs_[:, 1:].argmax(axis=1) + 1 cls_probs = probs_[np.arange(probs_.shape[0]), cls_ids] dets = np.concatenate((boxes_.reshape(-1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]), axis=1) Dets.append(dets) return Dets @staticmethod def to_Dets2_sigmoid(boxes, probs, img_ids, score_threshold=0.1): boxes, probs, img_ids = everything2numpy([boxes, probs, img_ids]) Dets = [] for i in range(0, cfg.batch_size): inds = np.where(img_ids == i)[0] probs_ = probs[inds] boxes_ = boxes[inds] if probs_.ndim == 1 or probs_.shape[1] == 1: cls_ids = np.ones((probs_.shape[0], ), dtype=np.int32) cls_probs = probs_.view(-1) dets = np.concatenate((boxes_.reshape(-1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]), axis=1) else: d0_inds, d1_inds = np.where(probs_ > score_threshold) if d0_inds.size > 0: cls_ids = d1_inds + 1 cls_probs = probs_[d0_inds, d1_inds] boxes_ = boxes_[d0_inds, :] dets = np.concatenate((boxes_.reshape(-1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]), axis=1) else: cls_ids = probs_.argmax(axis=1) + 1 cls_probs = probs_[np.arange(probs_.shape[0]), cls_ids - 1] dets = np.concatenate((boxes_.reshape(-1, 4), cls_probs[:, np.newaxis], cls_ids[:, np.newaxis]), axis=1) Dets.append(dets) return Dets def get_final_results(self, outputs, anchors, **kwargs): pass def get_final_results_stage1(self, rpn_box, rpn_prob, anchors, score_threshold=0.1, max_dets=100, overlap_threshold=0.5): selected_boxes, selected_probs, selected_img_ids, selected_anchors = self._decoding_and_thresholding_stage1(rpn_box, rpn_prob, anchors, score_threshold=score_threshold, max_dets=max_dets * 3) selected_boxes, selected_probs, selected_img_ids, selected_anchors = self._apply_nms_in_batch(selected_boxes, selected_probs, selected_img_ids, selected_anchors, activation=self.rpn_activation, overlap_threshold=overlap_threshold) if self.rpn_activation == 'softmax': Dets = self.to_Dets2(selected_boxes, selected_probs, selected_img_ids, score_threshold) elif self.rpn_activation == 'sigmoid': Dets = self.to_Dets2_sigmoid(selected_boxes, selected_probs, selected_img_ids, score_threshold) else: raise ValueError('Unknown activation function %s' % self.rpn_activation) return Dets def get_pos_anchors(self, score_threshold=0.1, max_dets=100): _, selected_probs, selected_img_ids, selected_anchors = self._decoding_and_thresholding_stage1(score_threshold=score_threshold, max_dets=max_dets) if self.rpn_activation == 'softmax': Dets = self.to_Dets(selected_anchors, selected_probs, selected_img_ids) elif self.rpn_activation == 'sigmoid': Dets = self.to_Dets_sigmoid(selected_anchors, selected_probs, selected_img_ids) else: raise ValueError('Unknown activation function %s' % self.rpn_activation) return Dets def _to_one_hot(self, y, num_classes): c = num_classes + 1 if self.rpn_activation == 'sigmoid' else num_classes y_ = torch.FloatTensor(y.size()[0], c).zero_() y_ = y_.scatter_(1, y.view(-1, 1).data.cpu(), 1.0).cuda() if self.rpn_activation == 'sigmoid': y_ = y_[:, 1:] if y.is_cuda: y_ = y_.cuda() return y_ def de_frozen_backbone(self): self.backbone.de_frozen() def _add_scalar_summary(self, key, tensor): if isinstance(tensor, torch.Tensor): return tbx.summary.scalar(key + '/L1', torch.abs(tensor).mean().data.cpu().numpy()) elif isinstance(tensor, float) or isinstance(tensor, int): return tbx.summary.scalar(key, tensor) def _add_hist_summary(self, key, tensor): return tbx.summary.histogram(key, tensor.data.cpu().numpy(), bins='auto')
Apache License 2.0
clovaai/ext_portrait_segmentation
models/SINet.py
S2module.forward
python
def forward(self, input): output1 = self.c1(input) output1= channel_shuffle(output1, self.group_n) for i in range(self.group_n): var_name = 'd{}'.format(i + 1) result_d = self.__dict__["_modules"][var_name](output1) if i == 0: combine = result_d else: combine = torch.cat([combine, result_d], 1) if self.add: combine = input + combine output = self.BR(combine) return output
:param input: input feature map :return: transformed feature map
https://github.com/clovaai/ext_portrait_segmentation/blob/9bc1bada1cb7bd17a3a80a2964980f4b4befef5b/models/SINet.py#L326-L347
import torch import torch.nn as nn import torch.nn.functional as F BN_moment = 0.1 def channel_shuffle(x, groups): batchsize, num_channels, height, width = x.data.size() channels_per_group = num_channels // groups x = x.view(batchsize, groups, channels_per_group, height, width) x = torch.transpose(x, 1, 2).contiguous() x = x.view(batchsize, -1, height, width) return x class CBR(nn.Module): def __init__(self, nIn, nOut, kSize, stride=1): super().__init__() padding = int((kSize - 1) / 2) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) self.bn = nn.BatchNorm2d(nOut, eps=1e-03, momentum= BN_moment) self.act = nn.PReLU(nOut) def forward(self, input): output = self.conv(input) output = self.bn(output) output = self.act(output) return output class separableCBR(nn.Module): def __init__(self, nIn, nOut, kSize, stride=1): super().__init__() padding = int((kSize - 1) / 2) self.conv = nn.Sequential( nn.Conv2d(nIn, nIn, (kSize, kSize), stride=stride, padding=(padding, padding), groups=nIn, bias=False), nn.Conv2d(nIn, nOut, kernel_size=1, stride=1, bias=False), ) self.bn = nn.BatchNorm2d(nOut, eps=1e-03, momentum= BN_moment) self.act = nn.PReLU(nOut) def forward(self, input): output = self.conv(input) output = self.bn(output) output = self.act(output) return output class SqueezeBlock(nn.Module): def __init__(self, exp_size, divide=4.0): super(SqueezeBlock, self).__init__() if divide > 1: self.dense = nn.Sequential( nn.Linear(exp_size, int(exp_size / divide)), nn.PReLU(int(exp_size / divide)), nn.Linear(int(exp_size / divide), exp_size), nn.PReLU(exp_size), ) else: self.dense = nn.Sequential( nn.Linear(exp_size, exp_size), nn.PReLU(exp_size) ) def forward(self, x): batch, channels, height, width = x.size() out = torch.nn.functional.avg_pool2d(x, kernel_size=[height, width]).view(batch, -1) out = self.dense(out) out = out.view(batch, channels, 1, 1) return out * x class SEseparableCBR(nn.Module): def __init__(self, nIn, nOut, kSize, stride=1, divide=2.0): super().__init__() padding = int((kSize - 1) / 2) self.conv = nn.Sequential( nn.Conv2d(nIn, nIn, (kSize, kSize), stride=stride, padding=(padding, padding), groups=nIn, bias=False), SqueezeBlock(nIn,divide=divide), nn.Conv2d(nIn, nOut, kernel_size=1, stride=1, bias=False), ) self.bn = nn.BatchNorm2d(nOut, eps=1e-03, momentum= BN_moment) self.act = nn.PReLU(nOut) def forward(self, input): output = self.conv(input) output = self.bn(output) output = self.act(output) return output class BR(nn.Module): def __init__(self, nOut): super().__init__() self.bn = nn.BatchNorm2d(nOut, eps=1e-03, momentum= BN_moment) self.act = nn.PReLU(nOut) def forward(self, input): output = self.bn(input) output = self.act(output) return output class CB(nn.Module): def __init__(self, nIn, nOut, kSize, stride=1): super().__init__() padding = int((kSize - 1) / 2) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) self.bn = nn.BatchNorm2d(nOut, eps=1e-03, momentum= BN_moment) def forward(self, input): output = self.conv(input) output = self.bn(output) return output class C(nn.Module): def __init__(self, nIn, nOut, kSize, stride=1,group=1): super().__init__() padding = int((kSize - 1) / 2) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False, groups=group) def forward(self, input): output = self.conv(input) return output class S2block(nn.Module): def __init__(self, nIn, nOut, config): super().__init__() kSize = config[0] avgsize = config[1] self.resolution_down = False if avgsize >1: self.resolution_down = True self.down_res = nn.AvgPool2d(avgsize, avgsize) self.up_res = nn.UpsamplingBilinear2d(scale_factor=avgsize) self.avgsize = avgsize padding = int((kSize - 1) / 2 ) self.conv = nn.Sequential( nn.Conv2d(nIn, nIn, kernel_size=(kSize, kSize), stride=1, padding=(padding, padding), groups=nIn, bias=False), nn.BatchNorm2d(nIn, eps=1e-03, momentum=BN_moment)) self.act_conv1x1 = nn.Sequential( nn.PReLU(nIn), nn.Conv2d(nIn, nOut, kernel_size=1, stride=1, bias=False), ) self.bn = nn.BatchNorm2d(nOut, eps=1e-03, momentum=BN_moment) def forward(self, input): if self.resolution_down: input = self.down_res(input) output = self.conv(input) output = self.act_conv1x1(output) if self.resolution_down: output = self.up_res(output) return self.bn(output) class S2module(nn.Module): def __init__(self, nIn, nOut, add=True, config= [[3,1],[5,1]]): super().__init__() print("This module has " + str(config)) group_n = len(config) n = int(nOut / group_n) n1 = nOut - group_n * n self.c1 = C(nIn, n, 1, 1, group=group_n) for i in range(group_n): var_name = 'd{}'.format(i + 1) if i == 0: self.__dict__["_modules"][var_name] = S2block(n, n + n1, config[i]) else: self.__dict__["_modules"][var_name] = S2block(n, n, config[i]) self.BR = BR(nOut) self.add = add self.group_n = group_n
MIT License
ccpgames/pypackage
pypackage/context.py
ManifestContext.__enter__
python
def __enter__(self): keys = ["package_data", "data_files"] if not any([getattr(self.config, key, None) for key in keys]): return self add_to_manifest = [] for _, files in getattr(self.config, "package_data", {}).items(): for file_ in files: include_line = "include {}".format(file_) if include_line not in self.previously_existing: add_to_manifest.append(include_line) try: for _, files in getattr(self.config, "data_files", []): for file_ in files: include_line = "include {}".format(file_) if include_line not in self.previously_existing: add_to_manifest.append(include_line) except ValueError: raise SystemExit("Malformed data_files: {!r}".format( getattr(self.config, "data_files", []) )) if add_to_manifest: self._clean = True with open("MANIFEST.in", "a") as openmanifest: openmanifest.write("{}\n".format("\n".join(add_to_manifest))) return self
Write the MANIFEST.in file if there are data files in use.
https://github.com/ccpgames/pypackage/blob/fa414bece96fa38a2471e380881f5ae4b20794de/pypackage/context.py#L58-L88
import os class SetupContext(object): def __init__(self, config, options): self.config = config self.options = options def __enter__(self): with open("setup.py", "w") as opensetup: opensetup.write("{}\n".format(str(self.config))) return self def __exit__(self, *args): if not any([self.options.interactive, self.options.metadata, self.options.setup]): os.remove("setup.py") class ManifestContext(object): def __init__(self, config, options): self.config = config self.options = options self._clean = False try: with open("MANIFEST.in", "r") as openmanifest: self.previously_existing = openmanifest.read().splitlines() except: self.previously_existing = []
MIT License
ducted/duct
duct/service.py
DuctService.setupSources
python
def setupSources(self, config): sources = config.get('sources', []) for source in sources: src = self.createSource(source) self.setupTriggers(source, src) self.sources.append(src)
Sets up source objects from the given config
https://github.com/ducted/duct/blob/a175700a1794c324a1799cee43633eca16f0f9cc/duct/service.py#L144-L152
import time import sys import os import importlib import re import copy from twisted.application import service from twisted.internet import task, reactor, defer from twisted.python import log class DuctService(service.Service): def __init__(self, config): self.running = 0 self.sources = [] self.lastEvents = {} self.outputs = {} self.evCache = {} self.critical = {} self.warn = {} self.hostConnectorCache = {} self.eventCounter = 0 self.factory = None self.protocol = None self.watchdog = None self.config = config if os.path.exists('/var/lib/duct'): sys.path.append('/var/lib/duct') self.debug = float(self.config.get('debug', False)) self.ttl = float(self.config.get('ttl', 60.0)) self.stagger = float(self.config.get('stagger', 0.2)) self.server = self.config.get('server', None) self.port = int(self.config.get('port', 5555)) self.proto = self.config.get('proto', 'tcp') self.inter = self.config.get('interval', 60.0) if self.debug: print("config:", repr(config)) self.setupSources(self.config) def setupOutputs(self, config): if self.server: if self.proto == 'tcp': defaultOutput = { 'output': 'duct.outputs.riemann.RiemannTCP', 'server': self.server, 'port': self.port } else: defaultOutput = { 'output': 'duct.outputs.riemann.RiemannUDP', 'server': self.server, 'port': self.port } outputs = config.get('outputs', [defaultOutput]) else: outputs = config.get('outputs', []) for output in outputs: if 'debug' not in output: output['debug'] = self.debug cl = output['output'].split('.')[-1] path = '.'.join(output['output'].split('.')[:-1]) outputObj = getattr( importlib.import_module(path), cl)(output, self) name = output.get('name', None) if name in self.outputs: self.outputs[name].append(outputObj) else: self.outputs[name] = [outputObj] reactor.callLater(0, outputObj.createClient) def createSource(self, source): if source.get('path'): path = source['path'] if path not in sys.path: sys.path.append(path) cl = source['source'].split('.')[-1] path = '.'.join(source['source'].split('.')[:-1]) sourceObj = getattr(importlib.import_module(path), cl) if 'debug' not in source: source['debug'] = self.debug if 'ttl' not in source.keys(): source['ttl'] = self.ttl if 'interval' not in source.keys(): source['interval'] = self.inter return sourceObj(source, self.sendEvent, self) def setupTriggers(self, source, sobj): if source.get('critical'): self.critical[sobj] = [(re.compile(key), val) for key, val in source['critical'].items()] if source.get('warning'): self.warn[sobj] = [(re.compile(key), val) for key, val in source['warning'].items()]
MIT License
tektoncd/experimental
sdk/python/tekton_pipeline/models/pod_template.py
PodTemplate.runtime_class_name
python
def runtime_class_name(self): return self._runtime_class_name
Gets the runtime_class_name of this PodTemplate. # noqa: E501 RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14. # noqa: E501 :return: The runtime_class_name of this PodTemplate. # noqa: E501 :rtype: str
https://github.com/tektoncd/experimental/blob/0ba4e7a2b9d45ed4accaecbb34dac006d665796a/sdk/python/tekton_pipeline/models/pod_template.py#L366-L374
import pprint import re import six from tekton_pipeline.configuration import Configuration class PodTemplate(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'affinity': 'V1Affinity', 'automount_service_account_token': 'bool', 'dns_config': 'V1PodDNSConfig', 'dns_policy': 'str', 'enable_service_links': 'bool', 'host_aliases': 'list[V1HostAlias]', 'host_network': 'bool', 'image_pull_secrets': 'list[V1LocalObjectReference]', 'node_selector': 'dict(str, str)', 'priority_class_name': 'str', 'runtime_class_name': 'str', 'scheduler_name': 'str', 'security_context': 'V1PodSecurityContext', 'tolerations': 'list[V1Toleration]', 'volumes': 'list[V1Volume]' } attribute_map = { 'affinity': 'affinity', 'automount_service_account_token': 'automountServiceAccountToken', 'dns_config': 'dnsConfig', 'dns_policy': 'dnsPolicy', 'enable_service_links': 'enableServiceLinks', 'host_aliases': 'hostAliases', 'host_network': 'hostNetwork', 'image_pull_secrets': 'imagePullSecrets', 'node_selector': 'nodeSelector', 'priority_class_name': 'priorityClassName', 'runtime_class_name': 'runtimeClassName', 'scheduler_name': 'schedulerName', 'security_context': 'securityContext', 'tolerations': 'tolerations', 'volumes': 'volumes' } def __init__(self, affinity=None, automount_service_account_token=None, dns_config=None, dns_policy=None, enable_service_links=None, host_aliases=None, host_network=None, image_pull_secrets=None, node_selector=None, priority_class_name=None, runtime_class_name=None, scheduler_name=None, security_context=None, tolerations=None, volumes=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._affinity = None self._automount_service_account_token = None self._dns_config = None self._dns_policy = None self._enable_service_links = None self._host_aliases = None self._host_network = None self._image_pull_secrets = None self._node_selector = None self._priority_class_name = None self._runtime_class_name = None self._scheduler_name = None self._security_context = None self._tolerations = None self._volumes = None self.discriminator = None if affinity is not None: self.affinity = affinity if automount_service_account_token is not None: self.automount_service_account_token = automount_service_account_token if dns_config is not None: self.dns_config = dns_config if dns_policy is not None: self.dns_policy = dns_policy if enable_service_links is not None: self.enable_service_links = enable_service_links if host_aliases is not None: self.host_aliases = host_aliases if host_network is not None: self.host_network = host_network if image_pull_secrets is not None: self.image_pull_secrets = image_pull_secrets if node_selector is not None: self.node_selector = node_selector if priority_class_name is not None: self.priority_class_name = priority_class_name if runtime_class_name is not None: self.runtime_class_name = runtime_class_name if scheduler_name is not None: self.scheduler_name = scheduler_name if security_context is not None: self.security_context = security_context if tolerations is not None: self.tolerations = tolerations if volumes is not None: self.volumes = volumes @property def affinity(self): return self._affinity @affinity.setter def affinity(self, affinity): self._affinity = affinity @property def automount_service_account_token(self): return self._automount_service_account_token @automount_service_account_token.setter def automount_service_account_token(self, automount_service_account_token): self._automount_service_account_token = automount_service_account_token @property def dns_config(self): return self._dns_config @dns_config.setter def dns_config(self, dns_config): self._dns_config = dns_config @property def dns_policy(self): return self._dns_policy @dns_policy.setter def dns_policy(self, dns_policy): self._dns_policy = dns_policy @property def enable_service_links(self): return self._enable_service_links @enable_service_links.setter def enable_service_links(self, enable_service_links): self._enable_service_links = enable_service_links @property def host_aliases(self): return self._host_aliases @host_aliases.setter def host_aliases(self, host_aliases): self._host_aliases = host_aliases @property def host_network(self): return self._host_network @host_network.setter def host_network(self, host_network): self._host_network = host_network @property def image_pull_secrets(self): return self._image_pull_secrets @image_pull_secrets.setter def image_pull_secrets(self, image_pull_secrets): self._image_pull_secrets = image_pull_secrets @property def node_selector(self): return self._node_selector @node_selector.setter def node_selector(self, node_selector): self._node_selector = node_selector @property def priority_class_name(self): return self._priority_class_name @priority_class_name.setter def priority_class_name(self, priority_class_name): self._priority_class_name = priority_class_name @property
Apache License 2.0
mblayman/homeschool
homeschool/courses/models.py
Course.belongs_to
python
def belongs_to(self, user): grade_levels = GradeLevel.objects.filter( school_year__school__admin=user ).values_list("id", flat=True) return Course.objects.filter(id=self.id, grade_levels__in=grade_levels).exists()
Check if the course belongs to the user.
https://github.com/mblayman/homeschool/blob/2647b958051cab8c55eda62478af51724bcb02bc/homeschool/courses/models.py#L51-L56
from typing import Optional from django.conf import settings from django.db import models from django.utils.functional import cached_property from hashid_field import HashidAutoField from ordered_model.models import OrderedModel, OrderedModelQuerySet from homeschool.core.models import DaysOfWeekModel from homeschool.schools.models import GradeLevel from homeschool.users.models import User from .exceptions import NoSchoolYearError class Course(DaysOfWeekModel): id = HashidAutoField(primary_key=True, salt=f"course{settings.HASHID_FIELD_SALT}") name = models.CharField(max_length=256) grade_levels = models.ManyToManyField( "schools.GradeLevel", related_name="courses", through="courses.GradeLevelCoursesThroughModel", ) default_task_duration = models.IntegerField( default=30, help_text="The default task duration in minutes" ) is_active = models.BooleanField( default=True, help_text="Is this course active in the schedule?" ) @property def is_running(self): return self.days_of_week != self.NO_DAYS @cached_property def has_many_grade_levels(self): return self.grade_levels.count() > 1 @cached_property def school_year(self): grade_level = self.grade_levels.select_related("school_year").first() if grade_level: return grade_level.school_year raise NoSchoolYearError("The course has no school year.")
MIT License
sassoftware/python-sasctl
src/sasctl/_services/model_repository.py
ModelRepository.default_repository
python
def default_repository(cls): all_repos = cls.list_repositories() if all_repos: repo = all_repos[0] for r in all_repos: if r.name in ('Repository 1', 'Public'): repo = r break return repo
Get the built-in default repository. Returns ------- RestObj
https://github.com/sassoftware/python-sasctl/blob/ab6387b86a26f6b0b08fbb36d0c94fe18be59b5f/src/sasctl/_services/model_repository.py#L433-L454
from warnings import warn from .service import Service from ..core import current_session, get, delete, sasctl_command, HTTPError FUNCTIONS = { 'Analytical', 'Classification', 'Clustering', 'Forecasting', 'Prediction', 'Text categorization', 'Text extraction', 'Text sentiment', 'Text topics', 'Sentiment', } def _get_filter(x): return dict(properties='(name, %s)' % x) class ModelRepository(Service): _SERVICE_ROOT = '/modelRepository' list_repositories, _, update_repository, delete_repository = Service._crud_funcs( '/repositories', 'repository', get_filter=_get_filter ) list_projects, get_project, update_project, delete_project = Service._crud_funcs( '/projects', 'project', get_filter=_get_filter ) list_models, get_model, update_model, delete_model = Service._crud_funcs( '/models', 'model', get_filter=_get_filter ) @classmethod def get_astore(cls, model): link = cls.get_model_link(model, 'analyticStore', refresh=True) if link is not None: return link.get('href') @classmethod def get_model_link(cls, model, rel, refresh=False): if isinstance(model, dict): link = cls.get_link(model, rel) if link is not None or not refresh: return link model = cls.get_model(model, refresh=refresh) return cls.get_link(model, rel) @classmethod def get_score_code(cls, model): link = cls.get_model_link(model, 'scoreCode', refresh=True) if link is not None: scorecode_uri = link.get('href') return get( scorecode_uri, headers={'Accept': 'text/vnd.sas.models.score.code.ds2package'}, ) @classmethod def get_model_contents(cls, model): link = cls.get_model_link(model, 'contents', refresh=True) contents = cls.request_link(link, 'contents') if isinstance(contents, list): return contents return [contents] @classmethod @sasctl_command('get', 'repositories') def get_repository(cls, repository, refresh=False): if isinstance(repository, dict) and all( k in repository for k in ('id', 'name') ): if refresh: repository = repository['id'] else: return repository if cls.is_uuid(repository): try: return cls.get('/repositories/{id}'.format(id=repository)) except HTTPError as e: if e.code != 403: raise e results = cls.list_repositories() for result in results: if result['name'] == str(repository) or result['id'] == str(repository): try: if cls.get_link(result, 'self'): return cls.request_link(result, 'self') id_ = result.get('id', result['name']) return cls.get('/repositories/{id}'.format(id=id_)) except HTTPError as e: if e.code != 403: raise e return result @classmethod def create_model( cls, model, project, description=None, modeler=None, function=None, algorithm=None, tool=None, score_code_type=None, training_table=None, event_prob_variable=None, event_target_value=None, is_champion=False, is_challenger=False, location=None, target_variable=None, is_retrainable=False, is_immutable=False, properties=None, input_variables=None, output_variables=None, ): properties = properties or {} if isinstance(model, str): model = {'name': model} p = cls.get_project(project) if p is None: raise ValueError("Unable to find project '%s'" % project) model['projectId'] = p['id'] model['modeler'] = modeler or model.get('modeler') or current_session().username model['description'] = description or model.get('description') model['function'] = function or model.get('function') model['algorithm'] = algorithm or model.get('algorithm') model['tool'] = tool or model.get('tool') model['champion'] = is_champion or model.get('champion') if is_champion: model['role'] = 'champion' elif is_challenger: model['role'] = 'challenger' model.setdefault( 'properties', [{'name': k, 'value': v} for k, v in properties.items()] ) model['scoreCodeType'] = score_code_type or model.get('scoreCodeType') model['trainTable'] = training_table or model.get('trainTable') model[ 'classificationEventProbabilityVariableName' ] = event_prob_variable or model.get( 'classificationEventProbabilityVariableName' ) model['classificationTargetEventValue'] = event_target_value or model.get( 'classificationTargetEventValue' ) model['location'] = location or model.get('location') model['targetVariable'] = target_variable or model.get('targetVariable') model['retrainable'] = is_retrainable or model.get('retrainable') model['immutable'] = is_immutable or model.get('immutable') model['inputVariables'] = input_variables or model.get('inputVariables', []) model['outputVariables'] = output_variables or model.get('outputVariables', []) model['version'] = '2' return cls.post( '/models', json=model, headers={'Content-Type': 'application/vnd.sas.models.model+json'}, ) @classmethod def add_model_content(cls, model, file, name, role=None, content_type=None): if cls.is_uuid(model): id_ = model elif isinstance(model, dict) and 'id' in model: id_ = model['id'] else: model = cls.get_model(model) id_ = model['id'] if content_type is None and isinstance(file, bytes): content_type = 'application/octet-stream' if content_type is not None: files = {name: (name, file, content_type)} else: files = {name: file.read()} metadata = {'role': role, 'name': name} try: return cls.post( '/models/{}/contents'.format(id_), files=files, data=metadata ) except HTTPError as e: if e.code == 409: model_contents = cls.get_model_contents(id_) for item in model_contents: if item.name == name: cls.delete('/models/{}/contents/{}'.format(id_, item.id)) return cls.post( '/models/{}/contents'.format(id_), files=files, data=metadata, ) else: raise e @classmethod
Apache License 2.0
takelab/podium
podium/experimental/models/impl/simple_trainers.py
SimpleTrainer._check_kwargs
python
def _check_kwargs(self, **kwargs): if self.MAX_EPOCH_KEY not in kwargs: raise ValueError( f"Missing training parameter: {self.MAX_EPOCH_KEY} " "(used for determining stop criterion)" )
Method checks if kwargs contains necessary training parameters. Parameters ---------- kwargs : dict training parameters
https://github.com/takelab/podium/blob/11ef32d889e483d4d77a44b61e0b5da956ee3a54/podium/experimental/models/impl/simple_trainers.py#L39-L52
from podium.datasets import Iterator from podium.experimental.models.trainer import AbstractTrainer class SimpleTrainer(AbstractTrainer): MAX_EPOCH_KEY = "max_epoch" def train( self, model, dataset, feature_transformer, label_transform_fun, max_epoch, iterator=None, ): if iterator is None: iterator = Iterator() for _ in range(max_epoch): for batch in iterator(dataset): x = feature_transformer.transform(batch) y = label_transform_fun(batch) model.fit(X=x, y=y)
BSD 3-Clause New or Revised License
hpac/elaps
elaps/signature.py
ArgWithMin.__cmp__
python
def __cmp__(self, other): return Arg.__cmp__(self, other) or cmp((self.minstr, self.maxstr), (other.minstr, other.maxstr))
Compare with other.
https://github.com/hpac/elaps/blob/390bbe8cbeb056ef57adbc91cdf5bcd1f7cbe187/elaps/signature.py#L414-L417
import numbers from elaps import symbolic named_attributes = ("lower", "upper", "symm", "herm", "spd", "hpd", "work") datatype_prefixes = { "i": "integer", "s": "single precision", "d": "double precision", "c": "single precision complex", "z": "double precision complex" } class Signature(list): def __init__(self, *args, **kwargs): list.__init__(self, args) self.flopsstr = None self.flops = None if not isinstance(self[0], Name): self[0] = Name(self[0]) self.init_lambdas(kwargs) self.argtypelookup = {} def init_lambdas(self, kwargs): lambdaargs = ", ".join(arg.name for arg in self) if "complexity" in kwargs and "flops" not in kwargs: kwargs["flops"] = kwargs["complexity"] if "flops" in kwargs: self.flopsstr = kwargs["flops"] self.flops = eval("lambda %s: %s" % (lambdaargs, kwargs["flops"]), symbolic.__dict__) for arg in self: arg.min = None arg.max = None if isinstance(arg, ArgWithMin): if arg.minstr: arg.min = eval("lambda %s: %s" % (lambdaargs, arg.minstr), symbolic.__dict__) if arg.maxstr: arg.max = eval("lambda %s: %s" % (lambdaargs, arg.maxstr), symbolic.__dict__) arg.properties = lambda *args: () if arg.propertiesstr: lambdarhs = arg.propertiesstr for attrname in named_attributes: lambdarhs = lambdarhs.replace(attrname, repr(attrname)) arg.properties = eval("lambda %s: filter(None, (%s,))" % (lambdaargs, lambdarhs), symbolic.__dict__) self.check_lambdas() def check_lambdas(self): args = range(len(self)) if self.flops: try: self.flops(*args) except NameError as e: raise NameError("Unknown argument %r used in flops" % str(e).split("'")[1]) for arg in self: if arg.min: try: arg.min(*args) except NameError as e: raise NameError("Unknown argument %r used in min for %s" % (str(e).split("'")[1], arg)) if arg.max: try: arg.max(*args) except NameError as e: raise NameError("Unknown argument %r used in max for %s" % (str(e).split("'")[1], arg)) if arg.properties: try: arg.properties(*args) except NameError as e: raise NameError("Unknown argument or property %r " "used in properties for %s" % (str(e).split("'")[1], arg)) def __str__(self): return "%s(%s)" % (self[0], ", ".join(arg.name for arg in self[1:])) def __repr__(self): args = map(repr, [str(self[0])] + self[1:]) if self.flops: args.append("flops=%r" % self.flopsstr) return "%s(%s)" % (type(self).__name__, ", ".join(args)) def __call__(self, *args, **kwargs): if len(args) == 0: args = tuple(arg.default() for arg in self[1:]) return Call(self, *args, **kwargs) def __getattr__(self, name): try: return self[self.argpos(name)] except: pass return list.__getattr__(self, name) def argpos(self, name): for argid, arg in enumerate(self): if arg.name == name: return argid raise IndexError("Unknown argument: %s" % name) def argsbytype(self, type_, *types): if types: return sorted(set(self.argsbytype(type_) + self.argsbytype(*types))) if type_ not in self.argtypelookup: self.argtypelookup[type_] = [i for i, arg in enumerate(self) if isinstance(arg, type_)] return self.argtypelookup[type_] def dataargs(self): return self.argsbytype(Data) def datatype(self): return self[self.dataargs()[0]].typename class BasicCall(list): def __init__(self, sig, *args): if not args: args = tuple("" if arg == "char*" else 0 for arg in sig[1:]) if len(sig) != 1 + len(args): raise TypeError("%s takes %d arguments (%d given)" % (sig[0], len(sig) - 1, len(args))) list.__init__(self, (str(sig[0]),) + args) self.__dict__["sig"] = sig def __str__(self): return "%s(%s)" % (self[0], ", ".join(map(str, self[1:]))) def __repr__(self): args = map(repr, [self.sig] + self[1:]) return "%s(%s)" % (type(self).__name__, ", ".join(args)) def __copy__(self): return type(self)(self.sig, *self[1:]) def copy(self): return self.__copy__() class Call(BasicCall): def __init__(self, sig, *args, **kwargs): if not isinstance(sig, Signature): raise TypeError("a Signature is required as first argument") BasicCall.__init__(self, sig, *args) for arg, val in kwargs.iteritems(): setattr(self, arg, val) def __getattr__(self, name): try: return self[self.sig.argpos(name)] except: pass return BasicCall.__getattr__(self, name) def __setattr__(self, name, value): try: self[self.sig.argpos(name)] = value return value except: pass list.__setattr__(self, name, value) def argdict(self): return dict((arg.name, val) for arg, val in zip(self.sig, self)) def restrict_once(self): l = list(self) for i, arg in enumerate(self.sig): if self[i] is not None and arg.min: try: self[i] = max(self[i], arg.min(*l)) except TypeError: pass if self[i] is not None and arg.max: try: self[i] = min(self[i], arg.max(*l)) except TypeError: pass def restrict(self): calls = [] while self[1:] not in calls: calls.append(self[1:]) self.restrict_once() def complete_once(self): l = list(self) for i, arg in enumerate(self.sig): if self[i] is None: if arg.min: try: self[i] = arg.min(*l) except TypeError: pass else: self[i] = arg.default() def complete(self): calls = [] while self[1:] not in calls: calls.append(self[1:]) self.complete_once() def properties(self, argid=None): if argid: return self.sig[argid].properties(*self) return tuple(arg.properties(*self) for arg in self.sig) def flops(self): if self.sig.flops is not None: return self.sig.flops(*self) return None def format_sampler(self): return [arg.format_sampler(val) for arg, val in zip(self.sig, self)] class Arg(object): class __metaclass__(type): def __repr__(cls): return cls.__name__ def __init__(self, name, attr=None): self.name = name self.propertiesstr = attr def __repr__(self): args = [self.name] if self.propertiesstr: args.append(self.propertiesstr) args = map(repr, args) return "%s(%s)" % (type(self).__name__, ", ".join(args)) def __str__(self): return str(self.name) def __cmp__(self, other): return cmp(type(self), type(other)) or cmp( (self.name, self.propertiesstr), (other.name, other.propertiesstr) ) @staticmethod def format_sampler(val): return val class Name(Arg): def __cmp__(self, other): if self.name == other: return 0 return Arg.__cmp__(self, other) def default(self): return self.name class Flag(Arg): def __init__(self, name, flags, attr=None): Arg.__init__(self, name, attr) self.flags = flags def __repr__(self): args = [self.name, self.flags] if self.propertiesstr: args.append(self.propertiesstr) args = map(repr, args) return "%s(%s)" % (type(self).__name__, ", ".join(args)) def __cmp__(self, other): return Arg.__cmp__(self, other) or cmp(self.flags, other.flags) def default(self): return self.flags[0] def _create_Flag(classname, defaultname, flags): def __init__(self, name=defaultname, attr=None): Flag.__init__(self, name, flags, attr) def __repr__(self): args = [] if self.name != defaultname: args.append(self.name) if self.propertiesstr: args.append(self.propertiesstr) args = map(repr, args) elif self.propertiesstr: args.append("attr=%r" % self.propertiesstr) return "%s(%s)" % (type(self).__name__, ", ".join(args)) globals()[classname] = type(classname, (Flag,), { "__init__": __init__, "__repr__": __repr__ }) _create_Flag("Side", "side", ("L", "R")) _create_Flag("Uplo", "uplo", ("L", "U")) _create_Flag("Trans", "trans", ("N", "T")) _create_Flag("cTrans", "trans", ("N", "T", "C")) _create_Flag("Diag", "diag", ("N", "U")) class ArgWithMin(Arg): def __init__(self, name, min=None, attr=None, max=None): Arg.__init__(self, name, attr) self.minstr = min self.maxstr = max def __repr__(self): args = [self.name] if self.minstr: args.append(self.minstr) if self.propertiesstr: if not self.minstr: args.append(None) args.append(self.propertiesstr) if self.maxstr: if not self.minstr: args.append(None) if not self.propertiesstr: args.append(None) args.append(self.maxstr) args = map(repr, args) return "%s(%s)" % (type(self).__name__, ", ".join(args))
BSD 3-Clause New or Revised License
ibm-security/ibmsecurity
ibmsecurity/isam/base/activation.py
get
python
def get(isamAppliance, id=None, check_mode=False, force=False): if force is True or check(isamAppliance, id) is True: return isamAppliance.invoke_get("Retrieve a specified activation offering", "/isam/capabilities/{0}/v1".format(id)) return isamAppliance.create_return_object()
Retrieve a specified activation offering
https://github.com/ibm-security/ibmsecurity/blob/da098f7d555e571a99a0d7cd47a51add483feb6f/ibmsecurity/isam/base/activation.py#L15-L23
import logging import ibmsecurity.utilities.tools logger = logging.getLogger(__name__) def get_all(isamAppliance, check_mode=False, force=False): return isamAppliance.invoke_get("Retrieving activations", "/isam/capabilities/v1")
Apache License 2.0
sebascuri/hucrl
exps/inverted_pendulum/util.py
solve_mpo
python
def solve_mpo( dynamical_model, action_cost, num_iter, num_sim_steps, batch_size, num_gradient_steps, num_trajectories, num_action_samples, num_episodes, epsilon, epsilon_mean, epsilon_var, regularization, lr, ): reward_model = PendulumReward(action_cost) freeze_parameters(dynamical_model) value_function = NNValueFunction( dim_state=(2,), layers=[64, 64], biased_head=False, input_transform=StateTransform(), ) policy = NNPolicy( dim_state=(2,), dim_action=(1,), layers=[64, 64], biased_head=False, squashed_output=True, input_transform=StateTransform(), ) init_distribution = torch.distributions.Uniform( torch.tensor([-np.pi, -0.05]), torch.tensor([np.pi, 0.05]) ) mpo = MBMPO( dynamical_model, reward_model, policy, value_function, gamma=0.99, epsilon=epsilon, epsilon_mean=epsilon_mean, epsilon_var=epsilon_var, regularization=regularization, num_action_samples=num_action_samples, criterion=nn.MSELoss, ) optimizer = optim.Adam([p for p in mpo.parameters() if p.requires_grad], lr=lr) test_state = torch.tensor(np.array([np.pi, 0.0]), dtype=torch.get_default_dtype()) policy_losses, value_losses, kl_div, returns, entropy = [], [], [], [], [] model_rewards, trajectory = 0, None for _ in range(num_episodes): with gpytorch.settings.fast_pred_var(), gpytorch.settings.detach_test_caches(): vloss_, ploss_, kl_div_, return_, entropy_, = train_mpo( mpo, init_distribution, optimizer, num_iter=num_iter, num_trajectories=num_trajectories, num_simulation_steps=num_sim_steps, num_gradient_steps=num_gradient_steps, batch_size=batch_size, num_subsample=1, ) policy_losses += ploss_ value_losses += vloss_ returns += return_ entropy += entropy_ kl_div += kl_div_ test_policy_on_model( mpo.dynamical_model, mpo.reward_model, mpo.policy, test_state ) _, trajectory = test_policy_on_model( mpo.dynamical_model, mpo.reward_model, lambda x: ( mpo.policy(x)[0][: mpo.dynamical_model.dim_action], torch.zeros(1), ), test_state, policy_str="Expected Policy", ) model_rewards, _ = test_policy_on_model( mpo.dynamical_model, mpo.reward_model, mpo.policy, test_state ) environment = SystemEnvironment( InvertedPendulum(mass=0.3, length=0.5, friction=0.005, step_size=1 / 80), reward=reward_model, ) environment_rewards, trajectory = test_policy_on_environment( environment, mpo.policy, test_state ) environment_rewards, _ = test_policy_on_environment( environment, lambda x: ( mpo.policy(x)[0][: mpo.dynamical_model.dim_action], torch.zeros(1), ), test_state, policy_str="Expected Policy", ) plot_values_and_policy( value_function, policy, trajectory=trajectory, num_entries=[200, 200], bounds=[(-2 * np.pi, 2 * np.pi), (-12, 12)], ) plot_returns_entropy_kl(returns, entropy, kl_div) plot_learning_losses(policy_losses, value_losses, horizon=20) return model_rewards
Solve MPO optimization problem.
https://github.com/sebascuri/hucrl/blob/f9cb24a7fdbf8cb2e9c667ec8d254d7d102694b8/exps/inverted_pendulum/util.py#L287-L425
import argparse from typing import List import gpytorch import numpy as np import torch import torch.distributions import torch.nn as nn import torch.optim as optim from rllib.dataset.transforms import ActionScaler, DeltaState, MeanFunction from rllib.dataset.utilities import stack_list_of_tuples from rllib.environment.system_environment import SystemEnvironment from rllib.environment.systems import InvertedPendulum from rllib.model.abstract_model import AbstractModel from rllib.policy import NNPolicy from rllib.reward.utilities import tolerance from rllib.util.neural_networks.utilities import freeze_parameters from rllib.util.rollout import rollout_model, rollout_policy from rllib.value_function import NNValueFunction from torch.distributions import MultivariateNormal from tqdm import tqdm from exps.inverted_pendulum.plotters import ( plot_learning_losses, plot_returns_entropy_kl, plot_trajectory_states_and_rewards, plot_values_and_policy, ) from exps.util import get_mb_mpo_agent, get_mpc_agent from hucrl.algorithms.mbmpo import MBMPO from hucrl.environment.hallucination_wrapper import HallucinationWrapper class StateTransform(nn.Module): extra_dim = 1 def forward(self, states_): angle, angular_velocity = torch.split(states_, 1, dim=-1) states_ = torch.cat( (torch.cos(angle), torch.sin(angle), angular_velocity), dim=-1 ) return states_ def inverse(self, states_): cos, sin, angular_velocity = torch.split(states_, 1, dim=-1) angle = torch.atan2(sin, cos) states_ = torch.cat((angle, angular_velocity), dim=-1) return states_ def large_state_termination(state, action, next_state=None): if not isinstance(state, torch.Tensor): state = torch.tensor(state) if not isinstance(action, torch.Tensor): action = torch.tensor(action) done = torch.any(torch.abs(state) > 200, dim=-1) | torch.any( torch.abs(action) > 200, dim=-1 ) return ( torch.zeros(*done.shape, 2) .scatter_(dim=-1, index=(~done).long().unsqueeze(-1), value=-float("inf")) .squeeze(-1) ) class PendulumReward(AbstractModel): def __init__(self, action_cost=0): super().__init__(dim_state=(2,), dim_action=(1,), model_kind="rewards") self.action_cost = action_cost self.reward_offset = 0 def forward(self, state, action, next_state): if not isinstance(state, torch.Tensor): state = torch.tensor(state, dtype=torch.get_default_dtype()) if not isinstance(action, torch.Tensor): action = torch.tensor(action, dtype=torch.get_default_dtype()) cos_angle = torch.cos(state[..., 0]) velocity = state[..., 1] angle_tolerance = tolerance(cos_angle, lower=0.95, upper=1.0, margin=0.1) velocity_tolerance = tolerance(velocity, lower=-0.5, upper=0.5, margin=0.5) state_cost = angle_tolerance * velocity_tolerance action_tolerance = tolerance(action[..., 0], lower=-0.1, upper=0.1, margin=0.1) action_cost = self.action_cost * (action_tolerance - 1) cost = state_cost + action_cost return cost.unsqueeze(-1), torch.zeros(1) class PendulumModel(AbstractModel): def __init__( self, mass, length, friction, step_size=1 / 80, noise: MultivariateNormal = None ): super().__init__(dim_state=(2,), dim_action=(1,)) self.mass = mass self.length = length self.friction = friction self.step_size = step_size self.noise = noise def forward(self, state, action): action = torch.clamp(action, -1.0, 1.0) mass = self.mass gravity = 9.81 length = self.length friction = self.friction inertia = mass * length ** 2 dt = self.step_size angle, angular_velocity = torch.split(state, 1, dim=-1) for _ in range(1): x_ddot = ( (gravity / length) * torch.sin(angle) + action * (1 / inertia) - (friction / inertia) * angular_velocity ) angle = angle + dt * angular_velocity angular_velocity = angular_velocity + dt * x_ddot next_state = torch.cat((angle, angular_velocity), dim=-1) if self.noise is None: return next_state, torch.zeros(1) else: return next_state + self.noise.mean, self.noise.covariance_matrix def test_policy_on_model( dynamical_model, reward_model, policy, test_state, policy_str="Sampled Policy" ): with torch.no_grad(): trajectory = rollout_model( dynamical_model, reward_model, policy, max_steps=400, initial_state=test_state.unsqueeze(0).unsqueeze(1), ) trajectory = stack_list_of_tuples(trajectory) states = trajectory.state[:, 0] rewards = trajectory.reward plot_trajectory_states_and_rewards(states, rewards) model_rewards = torch.sum(rewards).item() print(f"Model with {policy_str} Cumulative reward: {model_rewards:.2f}") return model_rewards, trajectory def test_policy_on_environment( environment, policy, test_state, policy_str="Sampled Policy" ): environment.state = test_state.numpy() environment.initial_state = lambda: test_state.numpy() trajectory = rollout_policy(environment, policy, max_steps=400, render=False)[0] trajectory = stack_list_of_tuples(trajectory) env_rewards = torch.sum(trajectory.reward).item() print(f"Environment with {policy_str} Cumulative reward: {env_rewards:.2f}") return env_rewards, trajectory def train_mpo( mpo: MBMPO, initial_distribution, optimizer, num_iter, num_trajectories, num_simulation_steps, num_gradient_steps, batch_size, num_subsample, ): value_losses = [] policy_losses = [] returns = [] kl_div = [] entropy = [] for i in tqdm(range(num_iter)): state_batches = _simulate_model( mpo, initial_distribution, num_trajectories, num_simulation_steps, batch_size, num_subsample, returns, entropy, ) policy_episode_loss, value_episode_loss, episode_kl_div = _optimize_policy( mpo, state_batches, optimizer, num_gradient_steps ) value_losses.append(value_episode_loss / len(state_batches)) policy_losses.append(policy_episode_loss / len(state_batches)) kl_div.append(episode_kl_div) return value_losses, policy_losses, kl_div, returns, entropy def _simulate_model( mpo, initial_distribution, num_trajectories, num_simulation_steps, batch_size, num_subsample, returns, entropy, ): with torch.no_grad(): test_states = torch.tensor([np.pi, 0]).repeat(num_trajectories // 2, 1) initial_states = initial_distribution.sample((num_trajectories // 2,)) initial_states = torch.cat((initial_states, test_states), dim=0) trajectory = rollout_model( mpo.dynamical_model, reward_model=mpo.reward_model, policy=mpo.policy, initial_state=initial_states, max_steps=num_simulation_steps, ) trajectory = stack_list_of_tuples(trajectory) returns.append(trajectory.reward.sum(dim=0).mean().item()) entropy.append(trajectory.entropy.mean()) states = trajectory.state.reshape(-1, trajectory.state.shape[-1]) np.random.shuffle(states.numpy()) state_batches = torch.split(states, batch_size)[::num_subsample] return state_batches def _optimize_policy(mpo, state_batches, optimizer, num_gradient_steps): policy_episode_loss = 0.0 value_episode_loss = 0.0 episode_kl_div = 0.0 mpo.reset() for _ in range(num_gradient_steps): idx = np.random.choice(len(state_batches)) states = state_batches[idx] optimizer.zero_grad() losses = mpo(states) losses.loss.backward() optimizer.step() value_episode_loss += losses.critic_loss.item() policy_episode_loss += losses.policy_loss.item() mpo.update() return policy_episode_loss, value_episode_loss, episode_kl_div
MIT License
pythonistaguild/wavelink
wavelink/client.py
Client.shard_count
python
def shard_count(self) -> int: return self.bot.shard_count or 1
Return the bots Shard Count as an int. Returns --------- int: An int of the bots shard count.
https://github.com/pythonistaguild/wavelink/blob/3e11c16516dd89791c1247032045385979736554/wavelink/client.py#L78-L86
import aiohttp import asyncio import logging from discord.ext import commands from functools import partial from json import dumps from typing import Optional, Union from .errors import * from .player import Player from .node import Node __log__ = logging.getLogger(__name__) class Client: def __new__(cls, *args, **kwargs): cls.__qualname__ = 'wavelink.Client' try: bot = kwargs['bot'] except KeyError: msg = 'wavelink.Client: bot is a required keyword only argument which is missing.' raise WavelinkException(msg) if not isinstance(bot, (commands.Bot, commands.AutoShardedBot)): msg = f'wavelink.Client expected type <commands.Bot or commands.AutoShardedBot> not {type(bot)}' raise TypeError(msg) try: update_handlers = bot.extra_events['on_socket_response'] except KeyError: return super().__new__(cls) for handler in update_handlers: if handler.__self__.__class__.__qualname__ == 'wavelink.Client': bot.remove_listener(handler, 'on_socket_response') return super().__new__(cls) def __init__(self, bot: Union[commands.Bot, commands.AutoShardedBot], *, session: aiohttp.ClientSession = None): self.bot = bot self.loop = bot.loop or asyncio.get_event_loop() self.session = session or aiohttp.ClientSession() self.nodes = {} self._dumps = dumps bot.add_listener(self.update_handler, 'on_socket_response') @property
MIT License
sinhrks/daskperiment
daskperiment/core/trial/local.py
LocalTrialManager.trial_id
python
def trial_id(self): if self.is_locked(): msg = ('Unable to use TrialManager.trial_id during trial. ' 'Use .current_trial_id for safety.') raise LockedTrialError(msg) return self._trial_id
Return latest trial ID.
https://github.com/sinhrks/daskperiment/blob/63f5a18a0a0dc447698fb90947653b86a3c6160c/daskperiment/core/trial/local.py#L22-L30
from daskperiment.core.errors import LockedTrialError from daskperiment.core.trial.base import _TrialManager from daskperiment.util.log import get_logger logger = get_logger(__name__) class LocalTrialManager(_TrialManager): def __init__(self, backend): super().__init__(backend) self._trial_id = 0 self._parameters_history = {} self._result_history = {} self._hashes = {} @property
BSD 3-Clause New or Revised License
vmware/vsphere-automation-sdk-python
samples/vsphere/common/vapiconnect.py
login
python
def login(stub_config, user, pwd): user_password_security_context = create_user_password_security_context(user, pwd) stub_config.connector.set_security_context(user_password_security_context) session_svc = Session(stub_config) session_id = session_svc.create() session_security_context = create_session_security_context(session_id) stub_config.connector.set_security_context(session_security_context) return stub_config
Create an authenticated session with vCenter. Returns a stub_config that stores the session identifier that can be used to issue authenticated requests against vCenter.
https://github.com/vmware/vsphere-automation-sdk-python/blob/73624d9e20083002af770cf8763683f3c4681a16/samples/vsphere/common/vapiconnect.py#L55-L77
__author__ = 'VMware, Inc.' __copyright__ = 'Copyright 2016 VMware, Inc. All rights reserved.' import requests from com.vmware.cis_client import Session from vmware.vapi.lib.connect import get_requests_connector from vmware.vapi.security.session import create_session_security_context from vmware.vapi.security.user_password import create_user_password_security_context from vmware.vapi.stdlib.client.factories import StubConfigurationFactory def get_jsonrpc_endpoint_url(host): return "https://{}/api".format(host) def connect(host, user, pwd, skip_verification=False, cert_path=None, suppress_warning=True): host_url = get_jsonrpc_endpoint_url(host) session = requests.Session() if skip_verification: session = create_unverified_session(session, suppress_warning) elif cert_path: session.verify = cert_path connector = get_requests_connector(session=session, url=host_url) stub_config = StubConfigurationFactory.new_std_configuration(connector) return login(stub_config, user, pwd)
MIT License
pkgcore/pkgcore
src/pkgcore/ebuild/eclass.py
_rst_header
python
def _rst_header(char, text, leading=False, newline=False): sep = char * len(text) data = [text, sep] if leading: data = [sep] + data if newline: data.append('') return data
Create rST header data from a given character and header text.
https://github.com/pkgcore/pkgcore/blob/6c57606c15101590f4eed81636ae583f3f900d6a/src/pkgcore/ebuild/eclass.py#L37-L45
import os import re import shlex import subprocess from datetime import datetime from functools import partial from snakeoil import klass from snakeoil.mappings import ImmutableDict, OrderedSet from snakeoil.strings import pluralism from snakeoil.version import get_version from .. import __title__ from ..log import logger from . import conditionals from .eapi import EAPI class AttrDict(ImmutableDict): def __getattr__(self, name): try: object.__getattribute__(self, name) except AttributeError as e: try: return object.__getattribute__(self, '_dict')[name] except KeyError: raise e def __dir__(self): return sorted(dir(self._dict) + list(self._dict))
BSD 3-Clause New or Revised License
mcleonard/sampyl
sampyl/distributions.py
fails_constraints
python
def fails_constraints(*conditions): for each in conditions: if not np.all(each): return True else: return False
Utility function for catching out of bound parameters. Returns True if any of the conditions aren't met. Typically you'll use this at the beginning of defining the log P(X) functions. Example :: def logp(x, y): # Bound x and y to be greater than 0 if outofbounds(x > 0, y > 0): return -np.inf
https://github.com/mcleonard/sampyl/blob/3849fa688d477b4ed723e08b931c73043f8471c5/sampyl/distributions.py#L17-L33
import numbers from sampyl.core import np from scipy.special import gamma
MIT License
gnocchixyz/gnocchi
gnocchi/utils.py
StopWatch.__enter__
python
def __enter__(self): self.start() return self
Starts the watch.
https://github.com/gnocchixyz/gnocchi/blob/1207c4ad2d82ebd5a1d463a761deed9be76a8369/gnocchi/utils.py#L264-L267
import datetime import distutils.util import errno import itertools import multiprocessing import os import uuid from concurrent import futures import daiquiri import iso8601 import monotonic import numpy import pytimeparse import six from stevedore import driver import tenacity LOG = daiquiri.getLogger(__name__) RESOURCE_ID_NAMESPACE = uuid.UUID('0a7a15ff-aa13-4ac2-897c-9bdf30ce175b') def ResourceUUID(value, creator): if isinstance(value, uuid.UUID): return value if '/' in value: raise ValueError("'/' is not supported in resource id") try: return uuid.UUID(value) except ValueError: if len(value) <= 255: if creator is None: creator = "\x00" if six.PY2: value = value.encode('utf-8') creator = creator.encode('utf-8') return uuid.uuid5(RESOURCE_ID_NAMESPACE, value + "\x00" + creator) raise ValueError( 'transformable resource id >255 max allowed characters') def UUID(value): try: return uuid.UUID(value) except Exception as e: raise ValueError(e) unix_universal_start64 = numpy.datetime64("1970") def to_timestamps(values): try: if len(values) == 0: return [] if isinstance(values[0], (numpy.datetime64, datetime.datetime)): times = numpy.array(values) else: try: float(values[0]) except ValueError: try: numpy.datetime64(values[0]) except ValueError: times = numpy.fromiter( numpy.add(numpy.datetime64(utcnow()), [to_timespan(v, True) for v in values]), dtype='datetime64[ns]', count=len(values)) else: times = numpy.array(values, dtype='datetime64[ns]') else: times = numpy.array(values, dtype='float') * 10e8 except ValueError: raise ValueError("Unable to convert timestamps") times = times.astype('datetime64[ns]') if (times < unix_universal_start64).any(): raise ValueError('Timestamp must be after Epoch') return times def to_timestamp(value): return to_timestamps([value])[0] def to_datetime(value): return timestamp_to_datetime(to_timestamp(value)) def timestamp_to_datetime(v): return datetime.datetime.utcfromtimestamp( v.astype(float) / 10e8).replace(tzinfo=iso8601.iso8601.UTC) def to_timespan(value, allow_le_zero=False): if value is None: raise ValueError("Invalid timespan") try: seconds = float(value) except Exception: seconds = pytimeparse.parse(value) if seconds is None: raise ValueError("Unable to parse timespan") seconds = numpy.timedelta64(int(seconds * 10e8), 'ns') if not allow_le_zero and seconds <= numpy.timedelta64(0, 'ns'): raise ValueError("Timespan must be positive") return seconds _ONE_SECOND = numpy.timedelta64(1, 's') def timespan_total_seconds(td): return td / _ONE_SECOND def utcnow(): return datetime.datetime.now(tz=iso8601.iso8601.UTC) def normalize_time(timestamp): offset = timestamp.utcoffset() if offset is None: return timestamp return timestamp.replace(tzinfo=None) - offset def datetime_utc(*args): return datetime.datetime(*args, tzinfo=iso8601.iso8601.UTC) unix_universal_start = datetime_utc(1970, 1, 1) def datetime_to_unix(timestamp): return (timestamp - unix_universal_start).total_seconds() def dt_in_unix_ns(timestamp): return int(datetime_to_unix(timestamp) * int(10e8)) def get_default_workers(): try: default_workers = multiprocessing.cpu_count() or 1 except NotImplementedError: default_workers = 1 return default_workers def grouper(iterable, n): it = iter(iterable) while True: chunk = tuple(itertools.islice(it, n)) if not chunk: return yield chunk def ensure_paths(paths): for p in paths: try: os.makedirs(p) except OSError as e: if e.errno != errno.EEXIST: raise def strtobool(v): if isinstance(v, bool): return v return bool(distutils.util.strtobool(v)) class StopWatch(object): _STARTED = object() _STOPPED = object() def __init__(self): self._started_at = None self._stopped_at = None self._state = None def start(self): if self._state == self._STARTED: return self self._started_at = monotonic.monotonic() self._state = self._STARTED return self @staticmethod def _delta_seconds(earlier, later): return max(0.0, later - earlier) def elapsed(self): if self._state not in (self._STARTED, self._STOPPED): raise RuntimeError("Can not get the elapsed time of a stopwatch" " if it has not been started/stopped") if self._state == self._STOPPED: elapsed = self._delta_seconds(self._started_at, self._stopped_at) else: elapsed = self._delta_seconds( self._started_at, monotonic.monotonic()) return elapsed
Apache License 2.0
jeroenzegers/nabu-msss
nabu/neuralnetworks/trainers/task_trainer.py
TaskTrainer.gather_grads
python
def gather_grads(self, optimizer): with tf.variable_scope(self.task_name): self.batch_loss = tf.get_variable( name='batch_loss', shape=[], dtype=tf.float32, initializer=tf.constant_initializer(0), trainable=False) self.batch_loss_norm = tf.get_variable( name='batch_loss_norm', shape=[], dtype=tf.float32, initializer=tf.constant_initializer(0), trainable=False) with tf.variable_scope('normalize_loss'): self.normalized_loss = self.batch_loss / self.batch_loss_norm self.process_minibatch = [] for set_ind, linkedset in enumerate(self.linkedsets): inputs = dict() seq_lengths = dict() targets = dict() data, seq_length = input_pipeline.input_pipeline( data_queue=self.data_queue[linkedset], batch_size=self.batch_size, numbuckets=int(self.trainerconf['numbuckets']), dataconfs=self.input_dataconfs[linkedset] + self.target_dataconfs[linkedset] ) for ind, input_name in enumerate(self.input_names): inputs[input_name] = data[ind] seq_lengths[input_name] = seq_length[ind] for ind, target_name in enumerate(self.target_names): targets[target_name] = data[len(self.input_names) + ind] logits, used_models = run_multi_model.run_multi_model( models=self.models, model_nodes=self.model_nodes, model_links=self.model_links, inputs=inputs, inputs_links=self.inputs_links, nodes_output_names=self.nodes_output_names, output_names=self.output_names, seq_lengths=seq_lengths, is_training=True) task_minibatch_loss, task_minibatch_loss_norm = self.loss_computer(targets, logits, seq_lengths) task_minibatch_loss *= self.linkedset_weighting[linkedset] task_minibatch_loss_norm *= self.linkedset_weighting[linkedset] used_variables = run_multi_model.get_variables(used_models) task_minibatch_grads_and_vars = optimizer.compute_gradients(task_minibatch_loss, var_list=used_variables) (task_minibatch_grads, task_vars) = zip(*task_minibatch_grads_and_vars) if set_ind == 0: self.params = task_vars self.grads = [tf.get_variable( param.op.name, param.get_shape().as_list(), initializer=tf.constant_initializer(0), trainable=False) for param in self.params] with tf.variable_scope('update_gradients_%s' % linkedset): update_gradients = [ grad.assign_add(batchgrad) for batchgrad, grad in zip(task_minibatch_grads, self.grads) if batchgrad is not None] acc_loss = self.batch_loss.assign_add(task_minibatch_loss) acc_loss_norm = self.batch_loss_norm.assign_add(task_minibatch_loss_norm) self.process_minibatch.append(tf.group( *(update_gradients + [acc_loss] + [acc_loss_norm]), name='update_grads_loss_norm_%s' % linkedset)) reset_batch_loss = self.batch_loss.assign(0.0) reset_batch_loss_norm = self.batch_loss_norm.assign(0.0) reset_grad = tf.variables_initializer(self.grads) with tf.variable_scope('normalize_gradients'): if self.trainerconf['normalize_gradients'] == 'True': self.normalize_gradients = [ grad.assign(tf.divide(grad, self.batch_loss_norm)) for grad in self.grads] else: self.normalize_gradients = [grad.assign(grad) for grad in self.grads] self.reset_grad_loss_norm = tf.group(*( [reset_grad, reset_batch_loss, reset_batch_loss_norm]), name='reset_grad_loss_norm') batch_grads_and_vars = zip(self.grads, task_vars) with tf.variable_scope('clip'): clip_value = float(self.trainerconf['clip_grad_value']) batch_grads_and_vars = [ (tf.clip_by_value(grad, -clip_value, clip_value), var) for grad, var in batch_grads_and_vars] return batch_grads_and_vars
Gather gradients for this task
https://github.com/jeroenzegers/nabu-msss/blob/5e862cbf846d45b8a317f87588533f3fde9f0726/nabu/neuralnetworks/trainers/task_trainer.py#L151-L269
import tensorflow as tf from nabu.neuralnetworks.loss_computers import loss_computer_factory from nabu.neuralnetworks.evaluators import evaluator_factory from nabu.processing import input_pipeline from nabu.neuralnetworks.models import run_multi_model import numpy as np class TaskTrainer(object): def __init__(self, task_name, trainerconf, taskconf, models, modelconf, dataconf, evaluatorconf, lossconf, batch_size): self.task_name = task_name self.trainerconf = trainerconf self.taskconf = taskconf self.models = models self.modelconf = modelconf self.evaluatorconf = evaluatorconf self.batch_size = batch_size self.output_names = taskconf['outputs'].split(' ') self.input_names = taskconf['inputs'].split(' ') self.target_names = taskconf['targets'].split(' ') if self.target_names == ['']: self.target_names = [] self.model_nodes = taskconf['nodes'].split(' ') if 'linkedsets' in taskconf: set_names = taskconf['linkedsets'].split(' ') self.linkedsets = dict() for set_name in set_names: set_input_names = ['%s_%s' % (set_name, in_name) for in_name in self.input_names] set_target_names = ['%s_%s' % (set_name, tar_name) for tar_name in self.target_names] self.linkedsets[set_name] = {'inputs': set_input_names, 'targets': set_target_names} if 'linkedset_weighting' in taskconf: linkedset_weighting = np.array(map(float, taskconf['linkedset_weighting'].split(' '))) linkedset_weighting /= linkedset_weighting[0] else: linkedset_weighting = np.array([1.0] * len(self.linkedsets)) self.linkedset_weighting = {set_name: weight for set_name, weight in zip(set_names, linkedset_weighting)} else: self.linkedsets = {'set0': {'inputs': self.input_names, 'targets': self.target_names}} self.linkedset_weighting = {'set0': 1.0} self.input_dataconfs = dict() self.target_dataconfs = dict() for linkedset in self.linkedsets: self.input_dataconfs[linkedset] = [] for input_name in self.linkedsets[linkedset]['inputs']: dataconfs_for_input = [] sections = taskconf[input_name].split(' ') for section in sections: dataconfs_for_input.append(dict(dataconf.items(section))) self.input_dataconfs[linkedset].append(dataconfs_for_input) self.target_dataconfs[linkedset] = [] for target_name in self.linkedsets[linkedset]['targets']: dataconfs_for_target = [] sections = taskconf[target_name].split(' ') for section in sections: dataconfs_for_target.append(dict(dataconf.items(section))) self.target_dataconfs[linkedset].append(dataconfs_for_target) self.model_links = dict() self.inputs_links = dict() self.nodes_output_names = dict() for node in self.model_nodes: self.model_links[node] = taskconf['%s_model' % node] self.inputs_links[node] = taskconf['%s_inputs' % node].split(' ') if '%s_output_names' % node in taskconf: self.nodes_output_names[node] = taskconf['%s_output_names' % node].split(' ') else: self.nodes_output_names[node] = node if lossconf: loss_type = lossconf['loss_type'] else: loss_type = taskconf['loss_type'] self.loss_computer = loss_computer_factory.factory(loss_type)(lossconf, self.batch_size) evaltype = evaluatorconf.get('evaluator', 'evaluator') if evaltype != 'None': self.evaluator = evaluator_factory.factory(evaltype)( conf=evaluatorconf, dataconf=dataconf, lossconf=lossconf, models=self.models, task=task_name) def set_dataqueues(self): self.data_queue = dict() for linkedset in self.linkedsets: data_queue_name = 'data_queue_%s_%s' % (self.task_name, linkedset) data_queue_elements, _ = input_pipeline.get_filenames( self.input_dataconfs[linkedset] + self.target_dataconfs[linkedset]) number_of_elements = len(data_queue_elements) if 'trainset_frac' in self.taskconf: number_of_elements = int(float(number_of_elements) * float(self.taskconf['trainset_frac'])) print '%d utterances will be used for training' % number_of_elements data_queue_elements = data_queue_elements[:number_of_elements] self.data_queue[linkedset] = tf.train.string_input_producer( string_tensor=data_queue_elements, shuffle=False, seed=None, capacity=self.batch_size*2, shared_name=data_queue_name) if int(self.trainerconf['numbatches_to_aggregate']) == 0: num_steps = int(self.trainerconf['num_epochs']) * len(data_queue_elements) / self.batch_size else: num_steps = int(self.trainerconf['num_epochs']) * len(data_queue_elements) / (self.batch_size * int(self.trainerconf['numbatches_to_aggregate'])) done_ops = [tf.no_op()] return num_steps, done_ops
MIT License
seetaresearch/dragon
torch/core/nn/functional.py
conv2d
python
def conv2d( input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, ): return _conv('Conv', utils._pair, **locals())
r"""Apply the 2d convolution to input. Parameters ---------- input : dragon.vm.torch.Tensor The input tensor. weight : dragon.vm.torch.Tensor The weight tensor. bias : dragon.vm.torch.Tensor, optional The bias tensor. stride : Union[int, Sequence[int]], optional, default=1 The stride of convolution window. padding : Union[int, Sequence[int]], optional, default=0 The zero padding size. dilation : Union[int, Sequence[int]], optional, default=1 The rate of dilated kernel. groups : int, optional, default=1 The number of groups to split input channels. Returns ------- dragon.vm.torch.Tensor The output tensor. See Also -------- `torch.nn.Conv2d(...)`_
https://github.com/seetaresearch/dragon/blob/3dfb6ea55d90d2fb2da9b1b471f5e1e7d7667810/torch/core/nn/functional.py#L422-L460
from __future__ import absolute_import from __future__ import division from __future__ import print_function from dragon.core.util import nest from dragon.vm.torch.core.autograd.function_impl import FunctionLib from dragon.vm.torch.core.nn import _reduction from dragon.vm.torch.core.nn.modules import utils def adaptive_avg_pool1d(input, output_size): args = utils._get_adaptive_pool_args( input.size()[-1:], utils._single(output_size)) return _pool('AVG', utils._single, input, **args) def adaptive_avg_pool2d(input, output_size): args = utils._get_adaptive_pool_args( input.size()[-2:], utils._pair(output_size)) return _pool('AVG', utils._pair, input, **args) def adaptive_avg_pool3d(input, output_size): args = utils._get_adaptive_pool_args( input.size()[-3:], utils._triple(output_size)) return _pool('AVG', utils._triple, input, **args) def adaptive_max_pool1d(input, output_size): args = utils._get_adaptive_pool_args( input.size()[-1:], utils._single(output_size)) return _pool('MAX', utils._single, input, **args) def adaptive_max_pool2d(input, output_size): args = utils._get_adaptive_pool_args( input.size()[-2:], utils._pair(output_size)) return _pool('MAX', utils._pair, input, **args) def adaptive_max_pool3d(input, output_size): args = utils._get_adaptive_pool_args( input.size()[-3:], utils._triple(output_size)) return _pool('MAX', utils._triple, input, **args) def avg_pool1d(input, kernel_size, stride=1, padding=0, ceil_mode=False): return _pool('AVG', utils._single, **locals()) def avg_pool2d(input, kernel_size, stride=1, padding=0, ceil_mode=False): return _pool('AVG', utils._pair, **locals()) def avg_pool3d(input, kernel_size, stride=1, padding=0, ceil_mode=False): return _pool('AVG', utils._triple, **locals()) def batch_norm( input, running_mean, running_var, weight, bias, training=False, momentum=0.1, eps=1e-5, ): return FunctionLib.apply( 'BatchNorm', input.device, [input, weight, bias, running_mean, running_var], axis=1, epsilon=eps, use_stats=int(not training), momentum=1.0 - momentum) def binary_cross_entropy_with_logits( input, target, weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None, ): if size_average is not None or reduce is not None: reduction = _reduction.legacy_get_string(size_average, reduce) else: reduction = reduction return FunctionLib.apply( 'SigmoidCrossEntropyLoss', input.device, [input, target], reduction=reduction.upper()) def channel_shuffle(input, groups): return FunctionLib.apply( 'ChannelShuffle', input.device, [input], axis=1, group=groups) def conv1d( input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, ): return _conv('Conv', utils._single, **locals())
BSD 2-Clause Simplified License
wangsr126/rdsnet
mmdet/core/anchor/guided_anchor_target.py
images_to_levels
python
def images_to_levels(target, num_level_anchors): target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_level_anchors: end = start + n level_targets.append(target[:, start:end].squeeze(0)) start = end return level_targets
Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...]
https://github.com/wangsr126/rdsnet/blob/0fd3abbcedde4f2d3a379d93675f54ec1e7d9684/mmdet/core/anchor/guided_anchor_target.py#L205-L217
import torch from ..bbox import PseudoSampler, build_assigner, build_sampler from ..utils import multi_apply, unmap def calc_region(bbox, ratio, featmap_size=None): x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1] - 1) y1 = y1.clamp(min=0, max=featmap_size[0] - 1) x2 = x2.clamp(min=0, max=featmap_size[1] - 1) y2 = y2.clamp(min=0, max=featmap_size[0] - 1) return (x1, y1, x2, y2) def ga_loc_target(gt_bboxes_list, featmap_sizes, anchor_scale, anchor_strides, center_ratio=0.2, ignore_ratio=0.5): img_per_gpu = len(gt_bboxes_list) num_lvls = len(featmap_sizes) r1 = (1 - center_ratio) / 2 r2 = (1 - ignore_ratio) / 2 all_loc_targets = [] all_loc_weights = [] all_ignore_map = [] for lvl_id in range(num_lvls): h, w = featmap_sizes[lvl_id] loc_targets = torch.zeros( img_per_gpu, 1, h, w, device=gt_bboxes_list[0].device, dtype=torch.float32) loc_weights = torch.full_like(loc_targets, -1) ignore_map = torch.zeros_like(loc_targets) all_loc_targets.append(loc_targets) all_loc_weights.append(loc_weights) all_ignore_map.append(ignore_map) for img_id in range(img_per_gpu): gt_bboxes = gt_bboxes_list[img_id] scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() for gt_id in range(gt_bboxes.size(0)): lvl = target_lvls[gt_id].item() gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[lvl]) ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( gt_, r1, featmap_sizes[lvl]) all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 0 all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 if lvl > 0: d_lvl = lvl - 1 gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[d_lvl]) all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 if lvl < num_lvls - 1: u_lvl = lvl + 1 gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[u_lvl]) all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 for lvl_id in range(num_lvls): all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) & (all_ignore_map[lvl_id] > 0)] = 0 all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 loc_avg_factor = sum( [t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200 return all_loc_targets, all_loc_weights, loc_avg_factor def ga_shape_target(approx_list, inside_flag_list, square_list, gt_bboxes_list, img_metas, approxs_per_octave, cfg, gt_bboxes_ignore_list=None, sampling=True, unmap_outputs=True): num_imgs = len(img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs num_level_squares = [squares.size(0) for squares in square_list[0]] inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( ga_shape_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, gt_bboxes_list, gt_bboxes_ignore_list, img_metas, approxs_per_octave=approxs_per_octave, cfg=cfg, sampling=sampling, unmap_outputs=unmap_outputs) if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]): return None num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) bbox_anchors_list = images_to_levels(all_bbox_anchors, num_level_squares) bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_squares) return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, num_total_pos, num_total_neg)
Apache License 2.0
bitshares/uptick
uptick/account.py
setproxy
python
def setproxy(ctx, proxy_account, account): print_tx(ctx.bitshares.set_proxy(proxy_account, account=account))
Set the proxy account for an account
https://github.com/bitshares/uptick/blob/5035ff93745dddfbbd2878f818d9c302c8aa4244/uptick/account.py#L318-L321
import json import click from tqdm import tqdm from prettytable import PrettyTable from bitshares.block import Block, BlockHeader from bitshares.account import Account from .decorators import onlineChain, unlockWallet, unlock from .ui import print_permissions, pprintOperation, print_table, print_tx from .main import main, config @main.command() @click.pass_context @onlineChain @click.argument("foreign_account", required=False, type=str) @click.option( "--account", default=config["default_account"], type=str, help="Account to be modified", ) @click.option( "--permission", default="active", type=str, help="Permission/Role to be modified" ) @click.option("--threshold", type=int, help="Threshold for the Role") @click.option("--weight", type=int, help="Weight of the new key/account") @unlockWallet def allow(ctx, foreign_account, permission, weight, threshold, account): if not foreign_account: from bitsharesbase.account import PasswordKey pwd = click.prompt( "Password for Key Derivation", hide_input=True, confirmation_prompt=True ) foreign_account = format( PasswordKey(account, pwd, permission).get_public(), "BTS" ) print_tx( ctx.bitshares.allow( foreign_account, weight=weight, account=account, permission=permission, threshold=threshold, ) ) @main.command() @click.pass_context @onlineChain @click.argument("foreign_account", type=str) @click.option( "--account", default=config["default_account"], help="Account to be modified", type=str, ) @click.option( "--permission", default="active", help="Permission/Role to be modified", type=str ) @click.option("--threshold", help="Threshold for the Role", type=int) @unlockWallet def disallow(ctx, foreign_account, permission, threshold, account): print_tx( ctx.bitshares.disallow( foreign_account, account=account, permission=permission, threshold=threshold ) ) @main.command() @click.pass_context @onlineChain @click.argument("account", nargs=-1) @click.option("--csv/--table", help="Show output as csv or table", default=False) @click.option( "--type", type=str, help="Only show operations of this type", multiple=True ) @click.option("--exclude", type=str, help="Exclude certain types", multiple=True) @click.option("--limit", type=int, help="Limit number of elements", default=15) @click.option("--raw/--no-raw", default=False) @click.option("--memo/--no-memo", default=False) def history(ctx, account, limit, type, csv, exclude, raw, memo): from bitsharesbase.operations import getOperationNameForId if memo: pwd = click.prompt("Current Wallet Passphrase", hide_input=True) ctx.bitshares.wallet.unlock(pwd) t = [["#", "time (block)", "operation", "details"]] for a in account: account = Account(a, bitshares_instance=ctx.bitshares) for b in tqdm(account.history(limit=limit, only_ops=type, exclude_ops=exclude)): block = BlockHeader(b["block_num"]) row = [ b["id"], "%s (%s)" % (block.time(), b["block_num"]), "{} ({})".format(getOperationNameForId(b["op"][0]), b["op"][0]), pprintOperation(b, memo, ctx) if not raw else json.dumps(b, indent=4), ] t.append(row) print_table(t) @main.command() @click.pass_context @onlineChain @click.argument("to", nargs=1, type=str) @click.argument("amount", nargs=1, type=float) @click.argument("asset", nargs=1, type=str) @click.argument("memo", required=False, type=str, default=None) @click.option( "--account", default=config["default_account"], help="Account to send from" ) @unlockWallet def transfer(ctx, to, amount, asset, memo, account): print_tx(ctx.bitshares.transfer(to, amount, asset, memo=memo, account=account)) @main.command() @click.pass_context @onlineChain @click.argument("accounts", nargs=-1) def balance(ctx, accounts): t = [["Account", "Amount"]] for a in accounts: account = Account(a, bitshares_instance=ctx.bitshares) for b in account.balances: t.append([str(a), str(b)]) print_table(t) @main.command() @click.pass_context @onlineChain @click.argument("account", nargs=1) def permissions(ctx, account): print_permissions(Account(account)) @main.command() @click.pass_context @onlineChain @click.argument("accountname", nargs=1, type=str) @click.option( "--account", default=config["default_account"], help="Account to pay the registration fee", ) @click.option( "--password", prompt="Account Password", hide_input=True, confirmation_prompt=True, help="Account Password", ) @unlockWallet def newaccount(ctx, accountname, account, password): print_tx( ctx.bitshares.create_account(accountname, registrar=account, password=password) ) @main.command() @click.pass_context @onlineChain @click.argument("account", nargs=1, default=config["default_account"], type=str) @unlockWallet def upgrade(ctx, account): print_tx(ctx.bitshares.upgrade_account(account)) @main.command() @click.pass_context @onlineChain @click.option( "--feepayer", nargs=1, default=None, help="Account to pay the fee from", type=str ) @click.option( "--account", nargs=1, default=config["default_account"], help="Account to clone", type=str, ) @click.argument("account_name", nargs=1, type=str) @unlockWallet def cloneaccount(ctx, account_name, account, feepayer): from bitsharesbase import transactions, operations account = Account(account) if feepayer is None: feepayer = account else: feepayer = Account(feepayer) op = { "fee": {"amount": 0, "asset_id": "1.3.0"}, "registrar": feepayer["id"], "referrer": feepayer["id"], "referrer_percent": 100, "name": account_name, "owner": account["owner"], "active": account["active"], "options": account["options"], "extensions": {}, "prefix": ctx.bitshares.rpc.chain_params["prefix"], } op = operations.Account_create(**op) print_tx(ctx.bitshares.finalizeOp(op, account, "active")) @main.command() @click.pass_context @onlineChain @click.option("--key", prompt="Memo Key", type=str) @click.option( "--account", default=config["default_account"], type=str, help="Account to be modified", ) @unlockWallet def changememokey(ctx, key, account): print_tx(ctx.bitshares.update_memo_key(key, account=account)) @main.command() @click.pass_context @onlineChain @click.argument("whitelist_account", type=str) @click.option( "--account", default=config["default_account"], type=str, help="Account to be modified", ) @unlockWallet def whitelist(ctx, whitelist_account, account): account = Account(account, blockchain_instance=ctx.blockchain) print_tx(account.whitelist(whitelist_account)) @main.command() @click.pass_context @onlineChain @click.argument("blacklist_account", type=str) @click.option( "--account", default=config["default_account"], type=str, help="Account to be modified", ) @unlockWallet def blacklist(ctx, blacklist_account, account): account = Account(account, blockchain_instance=ctx.blockchain) print_tx(account.blacklist(blacklist_account)) @main.command() @click.pass_context @onlineChain @click.argument("unlist_account", type=str) @click.option( "--account", default=config["default_account"], type=str, help="Account to be modified", ) @unlockWallet def unlist(ctx, unlist_account, account): account = Account(account, blockchain_instance=ctx.blockchain) print_tx(account.nolist(unlist_account)) @main.command() @click.pass_context @onlineChain @click.argument("proxy_account", type=str) @click.option( "--account", default=config["default_account"], help="Account to be modified", type=str, ) @unlockWallet
MIT License
codyberenson/pgma-modernized
Fagalicious.bundle/Contents/Libraries/Shared/twodict.py
TwoWayOrderedDict._iterate
python
def _iterate(self, reverse=False): index = self._PREV if reverse else self._NEXT curr = self._items[index] while curr is not self._items: yield curr[self._KEY] curr = curr[index]
Generator that iterates over the dictionary keys.
https://github.com/codyberenson/pgma-modernized/blob/f8d8771809d6b619f9e356e55bfcbfa7368c4abd/Fagalicious.bundle/Contents/Libraries/Shared/twodict.py#L180-L187
import sys import collections __all__ = ["TwoWayOrderedDict"] __version__ = "1.2" __license__ = "Unlicense" _DEFAULT_OBJECT = object() class DictKeysView(collections.KeysView): def __init__(self, data): super(DictKeysView, self).__init__(data) self.__data = data def __repr__(self): return "dict_keys({data})".format(data=list(self)) def __contains__(self, key): return key in [key for key in self.__data] class DictValuesView(collections.ValuesView): def __init__(self, data): super(DictValuesView, self).__init__(data) self.__data = data def __repr__(self): return "dict_values({data})".format(data=list(self)) def __contains__(self, value): return value in [self.__data[key] for key in self.__data] class DictItemsView(collections.ItemsView): def __init__(self, data): super(DictItemsView, self).__init__(data) self.__data = data def __repr__(self): return "dict_items({data})".format(data=list(self)) def __contains__(self, item): return item in [(key, self.__data[key]) for key in self.__data] class TwoWayOrderedDict(dict): _PREV = 0 _KEY = 1 _NEXT = 2 def __init__(self, *args, **kwargs): super(TwoWayOrderedDict, self).__init__() self.clear() self.update(*args, **kwargs) def __setitem__(self, key, value): if key in self: if key != self[key]: self._remove_mapped_key(self[key]) dict.__delitem__(self, self[key]) if value in self: if key != value: self._remove_mapped_key(value) self._remove_mapped_key(self[value]) if self[value] in self: dict.__delitem__(self, self[value]) if key not in self._items_map: last = self._items[self._PREV] last[self._NEXT] = self._items[self._PREV] = self._items_map[key] = [last, key, self._items] dict.__setitem__(self, key, value) dict.__setitem__(self, value, key) def __delitem__(self, key): self._remove_mapped_key(self[key]) self._remove_mapped_key(key) dict.__delitem__(self, self[key]) if key in self: dict.__delitem__(self, key) def __len__(self): return len(self._items_map) def __iter__(self): return self._iterate() def __reversed__(self): return self._iterate(reverse=True) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, list(self.items())) def __eq__(self, other): if not isinstance(other, self.__class__): return False return self.items() == other.items() def __ne__(self, other): return not self == other def _remove_mapped_key(self, key): if key in self._items_map: prev_item, _, next_item = self._items_map.pop(key) prev_item[self._NEXT] = next_item next_item[self._PREV] = prev_item
MIT License
google/objax
objax/zoo/wide_resnet.py
WideResNetGeneral.__init__
python
def __init__(self, nin: int, nclass: int, blocks_per_group: List[int], width: int, bn: Callable = functools.partial(objax.nn.BatchNorm2D, momentum=BN_MOM, eps=BN_EPS)): widths = [int(v * width) for v in [16 * (2 ** i) for i in range(len(blocks_per_group))]] n = 16 ops = [objax.nn.Conv2D(nin, n, 3, **conv_args(3, n))] for i, (block, width) in enumerate(zip(blocks_per_group, widths)): stride = 2 if i > 0 else 1 ops.append(WRNBlock(n, width, stride, bn)) for b in range(1, block): ops.append(WRNBlock(width, width, 1, bn)) n = width ops += [bn(n), objax.functional.relu, self.mean_reduce, objax.nn.Linear(n, nclass, w_init=objax.nn.init.xavier_truncated_normal) ] super().__init__(ops)
Creates WideResNetGeneral instance. Args: nin: number of channels in the input image. nclass: number of output classes. blocks_per_group: number of blocks in each block group. width: multiplier to the number of convolution filters. bn: module which used as batch norm function.
https://github.com/google/objax/blob/647d28bfd2fa47daa416f0f77f5f09c1bcda3c6f/objax/zoo/wide_resnet.py#L89-L119
__all__ = ['WRNBlock', 'WideResNetGeneral', 'WideResNet'] import functools from typing import Callable, List import objax from objax.typing import JaxArray BN_MOM = 0.9 BN_EPS = 1e-5 def conv_args(kernel_size: int, nout: int): stddev = objax.functional.rsqrt(0.5 * kernel_size * kernel_size * nout) return dict(w_init=functools.partial(objax.random.normal, stddev=stddev), use_bias=False, padding=objax.constants.ConvPadding.SAME) class WRNBlock(objax.Module): def __init__(self, nin: int, nout: int, stride: int = 1, bn: Callable = functools.partial(objax.nn.BatchNorm2D, momentum=BN_MOM, eps=BN_EPS)): if nin != nout or stride > 1: self.proj_conv = objax.nn.Conv2D(nin, nout, 1, strides=stride, **conv_args(1, nout)) else: self.proj_conv = None self.norm_1 = bn(nin) self.conv_1 = objax.nn.Conv2D(nin, nout, 3, strides=stride, **conv_args(3, nout)) self.norm_2 = bn(nout) self.conv_2 = objax.nn.Conv2D(nout, nout, 3, strides=1, **conv_args(3, nout)) def __call__(self, x: JaxArray, training: bool) -> JaxArray: o1 = objax.functional.relu(self.norm_1(x, training)) y = self.conv_1(o1) o2 = objax.functional.relu(self.norm_2(y, training)) z = self.conv_2(o2) return z + self.proj_conv(o1) if self.proj_conv else z + x class WideResNetGeneral(objax.nn.Sequential): @staticmethod def mean_reduce(x: JaxArray) -> JaxArray: return x.mean((2, 3))
Apache License 2.0
whr94621/njunmt-pytorch
src/models/base.py
NMTModel.encode
python
def encode(self, src_seq): raise NotImplementedError
Encode the source side
https://github.com/whr94621/njunmt-pytorch/blob/443918980231541ec36a1c893301258467f22c3b/src/models/base.py#L52-L56
import torch.nn as nn class NMTModel(nn.Module): def __init__(self): super().__init__() def forward(self, src_seq, tgt_seq, **kwargs): raise NotImplementedError def init_decoder(self, enc_outputs, expand_size=1): raise NotImplementedError
MIT License
openkinome/kinoml
kinoml/docking/OEDocking.py
pose_molecules
python
def pose_molecules( receptor: oechem.OEMolBase, molecules: List[oechem.OEMolBase], pKa_norm: bool = True, ) -> Union[List[oechem.OEGraphMol], None]: from openeye import oedocking from ..modeling.OEModeling import generate_reasonable_conformations def probability(molecule: oechem.OEGraphMol): value = oechem.OEGetSDData(molecule, "POSIT::Probability") return float(value) options = oedocking.OEPositOptions() options.SetIgnoreNitrogenStereo(True) options.SetPoseRelaxMode(oedocking.OEPoseRelaxMode_ALL) poser = oedocking.OEPosit() poser.Initialize(receptor) posed_molecules = list() for molecule in molecules: conformations_ensemble = generate_reasonable_conformations( molecule, dense=True, pKa_norm=pKa_norm ) posed_conformations = list() for conformations in conformations_ensemble: result = oedocking.OESinglePoseResult() return_code = poser.Dock(result, conformations) if return_code != oedocking.OEDockingReturnCode_Success: print( f"POsing failed for molecule with title {conformations.GetTitle()} with error code " f"{oedocking.OEDockingReturnCodeGetName(return_code)}." ) continue else: posed_conformation = result.GetPose() oechem.OESetSDData( posed_conformation, "POSIT::Probability", str(result.GetProbability()) ) posed_conformations.append(oechem.OEGraphMol(posed_conformation)) posed_conformations.sort(key=probability, reverse=True) posed_molecules.append(posed_conformations[0]) if len(posed_molecules) == 0: return None return posed_molecules
Generate a binding pose of molecules in a prepared receptor with OpenEye's Posit method. Parameters ---------- receptor: oechem.OEMolBase An OpenEye molecule holding the prepared receptor. molecules: list of oechem.OEMolBase A list of OpenEye molecules holding prepared molecules for docking. pKa_norm: bool, default=True Assign the predominant ionization state at pH ~7.4. Returns ------- posed_molecules: list of oechem.OEGraphMol or None A list of OpenEye molecules holding the docked molecules.
https://github.com/openkinome/kinoml/blob/afe169e6eefc6fddbaaf81cf720a4e224c82696d/kinoml/docking/OEDocking.py#L139-L213
from typing import List, Tuple, Union from openeye import oechem def create_hybrid_receptor( protein: oechem.OEMolBase, ligand: oechem.OEMolBase ) -> oechem.OEGraphMol: from openeye import oedocking receptor = oechem.OEGraphMol() oedocking.OEMakeReceptor(receptor, protein, ligand) return receptor def create_hint_receptor( protein: oechem.OEMolBase, hintx: Union[float, int], hinty: Union[float, int], hintz: Union[float, int], ) -> oechem.OEGraphMol: from openeye import oedocking receptor = oechem.OEGraphMol() oedocking.OEMakeReceptor(receptor, protein, hintx, hinty, hintz) return receptor def resids_to_box( protein: oechem.OEMolBase, resids: List[int] ) -> Tuple[float, float, float, float, float, float]: coordinates = oechem.OEFloatArray(protein.NumAtoms() * 3) oechem.OEGetPackedCoords(protein, coordinates) x_coordinates = [] y_coordinates = [] z_coordinates = [] for i, atom in enumerate(protein.GetAtoms()): if oechem.OEAtomGetResidue(atom).GetResidueNumber() in resids: x_coordinates.append(coordinates[i * 3]) y_coordinates.append(coordinates[(i * 3) + 1]) z_coordinates.append(coordinates[(i * 3) + 2]) box_dimensions = ( max(x_coordinates), max(y_coordinates), max(z_coordinates), min(x_coordinates), min(y_coordinates), min(z_coordinates), ) return box_dimensions def create_box_receptor( protein: oechem.OEMolBase, box_dimensions: Tuple[float, float, float, float, float, float], ) -> oechem.OEGraphMol: from openeye import oedocking box = oedocking.OEBox(*box_dimensions) receptor = oechem.OEGraphMol() oedocking.OEMakeReceptor(receptor, protein, box) return receptor
MIT License
googlecloudplatform/healthcare-api-dicomweb-cli
tests/test_delete.py
DeleteResponse.request_callback
python
def request_callback(self, request, uri, response_headers): self.requested = True return [200, response_headers, self.data]
Returns body and sets flag
https://github.com/googlecloudplatform/healthcare-api-dicomweb-cli/blob/0098960dc08a9be57649d9445f22ca439f853b6f/tests/test_delete.py#L96-L99
import json import random import httpretty from dcmweb import dcmweb @httpretty.activate def test_delete(): httpretty.register_uri( httpretty.GET, "https://dicom.com/v1/dicomWeb/studies?limit=1", match_querystring=True ) dcmweb_cli = dcmweb.Dcmweb("https://dicom.com/v1/dicomWeb/", False, None) empty_response = DeleteResponse('{}') httpretty.register_uri( httpretty.DELETE, "https://dicom.com/v1/dicomWeb/studies/1", status=200, match_querystring=True, body=empty_response.request_callback ) dcmweb_cli.delete("studies/1") assert empty_response.requested operation_response = DeleteResponse('{"name":"/operation/1"}') operation_progress = OperationProgress() httpretty.register_uri( httpretty.DELETE, "https://dicom.com/v1/dicomWeb/studies/2", status=200, match_querystring=True, body=operation_response.request_callback ) httpretty.register_uri( httpretty.GET, "https://dicom.com/v1/operation/1", status=200, match_querystring=True, body=operation_progress.request_callback ) dcmweb_cli.delete("studies/2") assert operation_progress.requests < 1 assert operation_response.requested operation_response = DeleteResponse('{"name":"/operation/2"}') httpretty.register_uri( httpretty.DELETE, "https://dicom.com/v1/dicomWeb/studies/3", status=200, match_querystring=True, body=operation_response.request_callback ) httpretty.register_uri( httpretty.GET, "https://dicom.com/v1/operation/2", status=404, match_querystring=True, ) assert dcmweb_cli.delete("studies/3") == "/operation/2" class OperationProgress: def __init__(self): self.requests = random.randint(1, 5) def request_callback(self, request, uri, response_headers): self.requests -= 1 resp_body = json.dumps({}) if self.requests < 1: resp_body = json.dumps({"done": True}) return [200, response_headers, resp_body] class DeleteResponse: def __init__(self, data): self.data = data self.requested = False
Apache License 2.0
pdbpp/pdbpp
src/pdbpp.py
PdbMeta.__call__
python
def __call__(cls, *args, **kwargs): if getattr(local, "_pdbpp_in_init", False): class OrigPdb(pdb.Pdb, object): def set_trace(self, frame=None): print("pdb++: using pdb.Pdb for recursive set_trace.") if frame is None: frame = sys._getframe().f_back super(OrigPdb, self).set_trace(frame) orig_pdb = OrigPdb.__new__(OrigPdb) kwargs.pop("Config", None) orig_pdb.__init__(*args, **kwargs) local._pdbpp_in_init = False return orig_pdb local._pdbpp_in_init = True global_pdb = getattr(local, "GLOBAL_PDB", None) if global_pdb: use_global_pdb = kwargs.pop( "use_global_pdb", ( not global_pdb._in_interaction and os.environ.get("PDBPP_REUSE_GLOBAL_PDB", "1") == "1" ), ) else: use_global_pdb = kwargs.pop("use_global_pdb", True) frame = sys._getframe().f_back called_for_set_trace = PdbMeta.called_for_set_trace(frame) if ( use_global_pdb and global_pdb and called_for_set_trace and ( hasattr(global_pdb, "_force_use_as_global_pdb") or cls.use_global_pdb_for_class(global_pdb, cls) ) ): if hasattr(global_pdb, "botframe"): sys.settrace(None) global_pdb.set_continue() global_pdb._set_trace_use_next = True stdout = kwargs.get("stdout", sys.stdout) global_pdb._setup_streams(stdout=stdout) local._pdbpp_in_init = False return global_pdb obj = cls.__new__(cls) if called_for_set_trace: kwargs.setdefault("start_filename", called_for_set_trace.f_code.co_filename) kwargs.setdefault("start_lineno", called_for_set_trace.f_lineno) if "set_global_pdb" in kwargs: set_global_pdb = kwargs.pop("set_global_pdb", use_global_pdb) if set_global_pdb: obj._force_use_as_global_pdb = True else: set_global_pdb = use_global_pdb obj.__init__(*args, **kwargs) if set_global_pdb: obj._env = {"HOME": os.environ.get("HOME")} local.GLOBAL_PDB = obj local._pdbpp_in_init = False return obj
Reuse an existing instance with ``pdb.set_trace()``.
https://github.com/pdbpp/pdbpp/blob/07f043d39563419ccdd5c8413684362b63776ada/src/pdbpp.py#L239-L312
from __future__ import print_function import sys import os.path import inspect import code import codecs import contextlib import types import traceback import subprocess import threading import pprint import re import signal from collections import OrderedDict import fancycompleter import six from fancycompleter import Color, Completer, ConfigurableClass __author__ = 'Antonio Cuni <anto.cuni@gmail.com>' __url__ = 'http://github.com/antocuni/pdb' __version__ = fancycompleter.LazyVersion('pdbpp') try: from inspect import signature except ImportError: try: from funcsigs import signature except ImportError: def signature(obj): return ' [pip install funcsigs to show the signature]' try: from functools import lru_cache except ImportError: from functools import wraps def lru_cache(maxsize): def dec(fn, *args): cache = {} @wraps(fn) def wrapper(*args): key = args try: ret = cache[key] except KeyError: ret = cache[key] = fn(*args) return ret return wrapper return dec side_effects_free = re.compile(r'^ *[_0-9a-zA-Z\[\].]* *$') RE_COLOR_ESCAPES = re.compile("(\x1b[^m]+m)+") RE_REMOVE_FANCYCOMPLETER_ESCAPE_SEQS = re.compile(r"\x1b\[[\d;]+m") if sys.version_info < (3, ): from io import BytesIO as StringIO else: from io import StringIO local = threading.local() local.GLOBAL_PDB = None local._pdbpp_completing = False local._pdbpp_in_init = False def __getattr__(name): if name == "GLOBAL_PDB": return local.GLOBAL_PDB raise AttributeError("module '{}' has no attribute '{}'".format(__name__, name)) def import_from_stdlib(name): import code result = types.ModuleType(name) stdlibdir, _ = os.path.split(code.__file__) pyfile = os.path.join(stdlibdir, name + '.py') with open(pyfile) as f: src = f.read() co_module = compile(src, pyfile, 'exec', dont_inherit=True) exec(co_module, result.__dict__) return result pdb = import_from_stdlib('pdb') def _newfunc(func, newglobals): newfunc = types.FunctionType(func.__code__, newglobals, func.__name__, func.__defaults__, func.__closure__) if sys.version_info >= (3, ): newfunc.__annotations__ = func.__annotations__ newfunc.__kwdefaults__ = func.__kwdefaults__ return newfunc def rebind_globals(func, newglobals): if hasattr(func, "__code__"): return _newfunc(func, newglobals) import functools if isinstance(func, functools.partial): return functools.partial( _newfunc(func.func, newglobals), *func.args, **func.keywords ) raise ValueError("cannot handle func {!r}".format(func)) class DefaultConfig(object): prompt = '(Pdb++) ' highlight = True sticky_by_default = False use_pygments = None pygments_formatter_class = None pygments_formatter_kwargs = {} bg = 'dark' colorscheme = None editor = None stdin_paste = None truncate_long_lines = True exec_if_unfocused = None disable_pytest_capturing = False encodings = ('utf-8', 'latin-1') enable_hidden_frames = True show_hidden_frames_count = True line_number_color = Color.turquoise filename_color = Color.yellow current_line_color = "39;49;7" show_traceback_on_error = True show_traceback_on_error_limit = None default_pdb_kwargs = { } def setup(self, pdb): pass def before_interaction_hook(self, pdb): pass def setbgcolor(line, color): import re setbg = '\x1b[%sm' % color regexbg = '\\1;%sm' % color result = setbg + re.sub('(\x1b\\[.*?)m', regexbg, line) + '\x1b[00m' if os.environ.get('TERM') == 'eterm-color': result = result.replace(setbg, '\x1b[37;%dm' % color) result = result.replace('\x1b[00;%dm' % color, '\x1b[37;%dm' % color) result = result.replace('\x1b[39;49;00;', '\x1b[37;') return result CLEARSCREEN = '\033[2J\033[1;1H' def lasti2lineno(code, lasti): import dis linestarts = list(dis.findlinestarts(code)) linestarts.reverse() for i, lineno in linestarts: if lasti >= i: return lineno return 0 class Undefined: def __repr__(self): return '<undefined>' undefined = Undefined() class ArgWithCount(str): def __new__(cls, value, count, **kwargs): obj = super(ArgWithCount, cls).__new__(cls, value) obj.cmd_count = count return obj def __repr__(self): return "<{} cmd_count={!r} value={}>".format( self.__class__.__name__, self.cmd_count, super(ArgWithCount, self).__repr__(), ) class PdbMeta(type):
BSD 3-Clause New or Revised License
berkeley-cocosci/wallace
wallace/models.py
Node.__json__
python
def __json__(self): return { "id": self.id, "type": self.type, "network_id": self.network_id, "creation_time": self.creation_time, "time_of_death": self.time_of_death, "failed": self.failed, "participant_id": self.participant_id, "property1": self.property1, "property2": self.property2, "property3": self.property3, "property4": self.property4, "property5": self.property5 }
The json of a node.
https://github.com/berkeley-cocosci/wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/models.py#L634-L649
from datetime import datetime from .db import Base from sqlalchemy import ForeignKey, or_, and_ from sqlalchemy import (Column, String, Text, Enum, Integer, Boolean, DateTime, Float) from sqlalchemy.orm import relationship, validates import inspect DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f" def timenow(): return datetime.now() class SharedMixin(object): id = Column(Integer, primary_key=True, index=True) creation_time = Column(DateTime, nullable=False, default=timenow) property1 = Column(String(256), nullable=True, default=None) property2 = Column(String(256), nullable=True, default=None) property3 = Column(String(256), nullable=True, default=None) property4 = Column(String(256), nullable=True, default=None) property5 = Column(String(256), nullable=True, default=None) failed = Column(Boolean, nullable=False, default=False, index=True) time_of_death = Column(DateTime, default=None) class Participant(Base, SharedMixin): __tablename__ = "participant" type = Column(String(50)) __mapper_args__ = { 'polymorphic_on': type, 'polymorphic_identity': 'participant' } worker_id = Column(String(50), nullable=False) assignment_id = Column(String(50), nullable=False, index=True) unique_id = Column(String(50), nullable=False, index=True) hit_id = Column(String(50), nullable=False) mode = Column(String(50), nullable=False) end_time = Column(DateTime) base_pay = Column(Float) bonus = Column(Float) status = Column(Enum("working", "submitted", "approved", "rejected", "returned", "abandoned", "did_not_attend", "bad_data", "missing_notification", name="participant_status"), nullable=False, default="working", index=True) def __init__(self, worker_id, assignment_id, hit_id, mode): self.worker_id = worker_id self.assignment_id = assignment_id self.hit_id = hit_id self.unique_id = worker_id + ":" + assignment_id self.mode = mode def __json__(self): return { "id": self.id, "type": self.type, "worker_id": self.worker_id, "assignment_id": self.assignment_id, "unique_id": self.unique_id, "hit_id": self.hit_id, "mode": self.mode, "end_time": self.end_time, "base_pay": self.base_pay, "bonus": self.bonus, "status": self.status, "creation_time": self.creation_time, "failed": self.failed, "time_of_death": self.time_of_death, "property1": self.property1, "property2": self.property2, "property3": self.property3, "property4": self.property4, "property5": self.property5 } def nodes(self, type=None, failed=False): if type is None: type = Node if not issubclass(type, Node): raise(TypeError("{} is not a valid node type.".format(type))) if failed not in ["all", False, True]: raise ValueError("{} is not a valid node failed".format(failed)) if failed == "all": return type .query .filter_by(participant_id=self.id) .all() else: return type .query .filter_by(failed=failed, participant_id=self.id) .all() def questions(self, type=None): if type is None: type = Question if not issubclass(type, Question): raise(TypeError("{} is not a valid question type.".format(type))) return type .query .filter_by(participant_id=self.id) .all() def infos(self, type=None, failed=False): nodes = self.nodes(failed="all") infos = [] for n in nodes: infos.extend(n.infos(type=type, failed=failed)) return infos def fail(self): if self.failed is True: raise AttributeError( "Cannot fail {} - it has already failed.".format(self)) else: self.failed = True self.time_of_death = timenow() for n in self.nodes(): n.fail() class Question(Base, SharedMixin): __tablename__ = "question" type = Column(String(50)) __mapper_args__ = { 'polymorphic_on': type, 'polymorphic_identity': 'question' } participant_id = Column(Integer, ForeignKey('participant.id')) participant = relationship(Participant, backref='all_questions') number = Column(Integer, nullable=False) question = Column(String(250), nullable=False) response = Column(String(1000), nullable=False) def __init__(self, participant, question, response, number): if participant.failed: raise ValueError("{} cannot create a question as it has failed" .format(participant)) self.participant = participant self.participant_id = participant.id self.number = number self.question = question self.response = response def fail(self): if self.failed is True: raise AttributeError( "Cannot fail {} - it has already failed.".format(self)) else: self.failed = True self.time_of_death = timenow() def __json__(self): return { "id": self.id, "number": self.number, "type": self.type, "participant_id": self.participant_id, "question": self.question, "response": self.response, "failed": self.failed, "time_of_death": self.time_of_death, "creation_time": self.creation_time, "property1": self.property1, "property2": self.property2, "property3": self.property3, "property4": self.property4, "property5": self.property5 } class Network(Base, SharedMixin): __tablename__ = "network" type = Column(String(50)) __mapper_args__ = { 'polymorphic_on': type, 'polymorphic_identity': 'network' } max_size = Column(Integer, nullable=False, default=1e6) full = Column(Boolean, nullable=False, default=False, index=True) role = Column(String(26), nullable=False, default="default", index=True) def __repr__(self): return ("<Network-{}-{} with {} nodes, {} vectors, {} infos, " "{} transmissions and {} transformations>").format( self.id, self.type, len(self.nodes()), len(self.vectors()), len(self.infos()), len(self.transmissions()), len(self.transformations())) def __json__(self): return { "id": self.id, "type": self.type, "max_size": self.max_size, "full": self.full, "role": self.role, "creation_time": self.creation_time, "failed": self.failed, "time_of_death": self.time_of_death, "property1": self.property1, "property2": self.property2, "property3": self.property3, "property4": self.property4, "property5": self.property5 } """ ################################### Methods that get things about a Network ################################### """ def nodes(self, type=None, failed=False, participant_id=None): if type is None: type = Node if not issubclass(type, Node): raise(TypeError("{} is not a valid node type.".format(type))) if failed not in ["all", False, True]: raise ValueError("{} is not a valid node failed".format(failed)) if participant_id is not None: if failed == "all": return type .query .filter_by(network_id=self.id, participant_id=participant_id) .all() else: return type .query .filter_by(network_id=self.id, participant_id=participant_id, failed=failed) .all() else: if failed == "all": return type .query .filter_by(network_id=self.id) .all() else: return type .query .filter_by(failed=failed, network_id=self.id) .all() def size(self, type=None, failed=False): return len(self.nodes(type=type, failed=failed)) def infos(self, type=None, failed=False): if type is None: type = Info if failed not in ["all", False, True]: raise ValueError("{} is not a valid failed".format(failed)) if failed == "all": return type.query .filter_by(network_id=self.id) .all() else: return type.query.filter_by( network_id=self.id, failed=failed).all() def transmissions(self, status="all", failed=False): if status not in ["all", "pending", "received"]: raise(ValueError("You cannot get transmission of status {}." .format(status) + "Status can only be pending, received or all")) if failed not in ["all", False, True]: raise ValueError("{} is not a valid failed".format(failed)) if status == "all": if failed == "all": return Transmission.query .filter_by(network_id=self.id) .all() else: return Transmission.query .filter_by(network_id=self.id, failed=failed) .all() else: if failed == "all": return Transmission.query .filter_by(network_id=self.id, status=status) .all() else: return Transmission.query .filter_by( network_id=self.id, status=status, failed=failed) .all() def transformations(self, type=None, failed=False): if type is None: type = Transformation if failed not in ["all", True, False]: raise ValueError("{} is not a valid failed".format(failed)) if failed == "all": return type.query .filter_by(network_id=self.id) .all() else: return type.query .filter_by(network_id=self.id, failed=failed) .all() def latest_transmission_recipient(self): from operator import attrgetter ts = Transmission.query .filter_by(status="received", network_id=self.id, failed=False) .all() if ts: t = max(ts, key=attrgetter('receive_time')) return t.destination else: return None def vectors(self, failed=False): if failed not in ["all", False, True]: raise ValueError("{} is not a valid vector failed".format(failed)) if failed == "all": return Vector.query .filter_by(network_id=self.id) .all() else: return Vector.query .filter_by(network_id=self.id, failed=failed) .all() """ ################################### Methods that make Networks do things ################################### """ def add_node(self, node): raise NotImplementedError def fail(self): if self.failed is True: raise AttributeError( "Cannot fail {} - it has already failed.".format(self)) else: self.failed = True self.time_of_death = timenow() for n in self.nodes(): n.fail() def calculate_full(self): self.full = len(self.nodes()) >= self.max_size def print_verbose(self): print "Nodes: " for a in (self.nodes(failed="all")): print a print "\nVectors: " for v in (self.vectors(failed="all")): print v print "\nInfos: " for i in (self.infos(failed="all")): print i print "\nTransmissions: " for t in (self.transmissions(failed="all")): print t print "\nTransformations: " for t in (self.transformations(failed="all")): print t class Node(Base, SharedMixin): __tablename__ = "node" type = Column(String(50)) __mapper_args__ = { 'polymorphic_on': type, 'polymorphic_identity': 'node' } network_id = Column(Integer, ForeignKey('network.id'), index=True) network = relationship(Network, backref="all_nodes") participant_id = Column(Integer, ForeignKey('participant.id'), index=True) participant = relationship(Participant, backref='all_nodes') def __init__(self, network, participant=None): if network.failed: raise ValueError("Cannot create node in {} as it has failed" .format(network)) if participant is not None and participant.failed: raise ValueError("{} cannot create a node as it has failed" .format(participant)) if participant is not None and participant.status != "working": raise ValueError("{} cannot create a node as they are not working" .format(participant)) self.network = network self.network_id = network.id network.calculate_full() if participant is not None: self.participant = participant self.participant_id = participant.id def __repr__(self): return "Node-{}-{}".format(self.id, self.type)
MIT License
wavii/listy-django-cache
listy/list_cache.py
ListCache.add
python
def add(self, **kwargs): o = self._set_db(**kwargs) if not o: log.debug("Not setting add because it already exists in the database") return kwargs['pk'] = self.backing_store.pk_for_object(o) for cache in self.caches(): filtered = [t for t in kwargs.items() if t[0] in cache] key = self.key(cache, filtered) self.prepend_or_set_memcache(key, o, self.replacement_function(cache, filtered)) self._update_counters(o, dict(filtered), 1) return o
Sets the value, taking into account enabled and deleted flags, and updates the various caches. 1. If it exists and deleted is set to True, get the existing object and set deleted to False 2. If it doesn't exist, create an object 3. For each cached list, attempt to prepend the new object onto the list 4. If the list doesn't exist get all of the objects from the db and set it on the cache
https://github.com/wavii/listy-django-cache/blob/080152fdf18b89387d9f6fb9ce5e9fc1c7ed8a21/listy/list_cache.py#L261-L283
from datetime import date, datetime, timedelta from dateutil.rrule import rrule, DAILY, WEEKLY, MONTHLY, YEARLY from dateutil.relativedelta import * import time import random import urllib import logging import memcache import itertools import cPickle as pickle try: from collections import Counter except ImportError: from listy.counter import Counter from listy.utils import dump_hex, dict_merge from listy.on_demand_pickle import OnDemandPickle MAX_KEY_DATA = 1500 log = logging.getLogger(__name__) list_caches = [] def aggregator(freq, dt): dt = dt.date() if isinstance(dt, datetime) else dt if freq == DAILY: return dt elif freq == MONTHLY: return date(dt.year, dt.month, 1) elif freq == YEARLY: return date(dt.year, 1, 1) elif freq == WEEKLY: return dt - timedelta(days=dt.weekday()) def to_datetime(day): return datetime(day.year, day.month, day.day) RELATIVEDELTA = { DAILY: relativedelta(days=+1), MONTHLY: relativedelta(months=+1), YEARLY: relativedelta(years=+1), WEEKLY: relativedelta(weeks=+1), } class NotCachableException(Exception): pass class ListCache(object): def __init__(self, backing_store, caches, soft_delete_field=None, deleted_timestamp_field=None, enabled_field=None, timestamp_field=None, disable_cache=False, address='127.0.0.1:11211', filter_out_soft_deletes=True): self.backing_store = backing_store self.name = self.backing_store.name() self.pickler = self.backing_store.pickler() self.soft_delete_field = soft_delete_field self.deleted_timestamp_field = deleted_timestamp_field self.filter_out_soft_deletes = filter_out_soft_deletes self.enabled_field = enabled_field self.timestamp_field = timestamp_field self.disable_cache = disable_cache self.address = ('%s:11211' % address()) if callable(address) else address self.mc = memcache.Client([self.address], pickleProtocol=pickle.HIGHEST_PROTOCOL, pickler=self.pickler, unpickler=self.pickler) self.caches_mc = memcache.Client([self.address], pickleProtocol=pickle.HIGHEST_PROTOCOL) self.generation = 1 self.configured_caches = set([tuple(sorted(c)) for c in caches]) self.caches_key = 'caches:%s' % self.name self.caches() self.reset_local_stats() list_caches.append(self) def caches(self, update_if_missing=True): if self.disable_cache: return [] installed_cache = self.caches_mc.gets(self.caches_key) or set() if installed_cache >= self.configured_caches: return installed_cache for i in range(0, 10): c = installed_cache | self.configured_caches if self.caches_mc.cas(self.caches_key, c): return c if not installed_cache: self.caches_mc.cas_ids = {} c = self.caches_mc.gets(self.caches_key) or set() t = 0.001 * random.randint(1, 5) * i log.warn("Failed to update 'caches' cache for %r, trying again (%s) in %.3f seconds", self.caches_key, self.address, t) time.sleep(t) raise RuntimeError("Failed to update 'caches' cache for %r, giving up (%s)", self.caches_key, self.address) def count(self, **kwargs): key = self.count_key(kwargs) count = self.mc.get(key) if count is None: if self.soft_delete_field: kwargs = dict_merge(kwargs, { self.soft_delete_field: False }) count = self.backing_store.count(kwargs) self.mc.set(key, count) return count def daily_count(self, **kwargs): return self._count(DAILY, kwargs) def daily_counts(self, **kwargs): return self._counts(DAILY, kwargs) def weekly_count(self, **kwargs): return self._count(WEEKLY, kwargs) def weekly_counts(self, **kwargs): return self._counts(WEEKLY, kwargs) def monthly_count(self, **kwargs): return self._count(MONTHLY, kwargs) def monthly_counts(self, **kwargs): return self._counts(MONTHLY, kwargs) def yearly_count(self, **kwargs): return self._count(YEARLY, kwargs) def yearly_counts(self, **kwargs): return self._counts(YEARLY, kwargs) def get_one(self, pk): objects = list(self.get(pk=pk)) assert len(objects) <= 1 return objects[0] if objects else None def get_first(self, **kwargs): try: return iter(self.get(**kwargs)).next() except StopIteration: return None def get(self, **kwargs): return self.get_multi([kwargs], lambda filter_args: 'results')['results'] def get_multi(self, filters, key_func): start = time.time() key_map = self.generate_key_map(filters, key_func) result_set = {} if not self.disable_cache: for key_batch in self.batch_keys(key_map.keys()): result_set.update(self.mc.get_multi(key_batch)) missing_keys = set(key_map.keys()) - set(result_set.keys()) for missing_key in missing_keys: filter_args, cache, user_key = key_map[missing_key] results = self.replacement_function(cache, filter_args.items())() log.debug("Key %r not in cache, updating with database's results? %r", missing_key, not self.disable_cache) if not self.disable_cache: if not self.mc.set(missing_key, results): log.info("Set for memcache_key=%s failed, someone must have set it before me.", missing_key) result_set[missing_key] = results self.hits += (len(key_map) - len(missing_keys)) self.misses += len(missing_keys) self.time += (time.time() - start) return self.map_results(key_map, result_set) def get_multi_list(self, filters): return list(itertools.chain(*self.get_multi(filters, lambda f: repr(f.items())).values()))
MIT License
kappa-dev/regraph
regraph/backends/networkx/hierarchies.py
NXHierarchy.add_graph
python
def add_graph(self, graph_id, graph, attrs=None): if graph_id in self.nodes(): raise HierarchyError( "Node '{}' already exists in the hierarchy!".format(graph_id)) self.add_node(graph_id) if attrs is not None: normalize_attrs(attrs) else: attrs = dict() self.update_node_attrs( graph_id, { "graph": graph, "attrs": attrs }, normalize=False) return
Add a new graph to the hierarchy. Parameters ---------- graph_id : hashable Id of a new node in the hierarchy graph : regraph.Graph Graph object corresponding to the new node of the hierarchy graph_attrs : dict, optional Dictionary containing attributes of the new node
https://github.com/kappa-dev/regraph/blob/bb148a7cbd94e87f622443263e04c3fae2d4d00b/regraph/backends/networkx/hierarchies.py#L197-L224
import copy import networkx as nx import warnings from regraph.exceptions import (HierarchyError, ReGraphError, InvalidHomomorphism, RewritingError, ReGraphWarning) from regraph.hierarchies import Hierarchy from regraph.backends.networkx.graphs import NXGraph from regraph.category_utils import (compose, pushout, get_unique_map_to_pullback, check_homomorphism, right_relation_dict, pullback_complement) from regraph.utils import (normalize_attrs, normalize_relation, keys_by_value,) class NXHierarchy(Hierarchy, NXGraph): rel_dict_factory = dict def graphs(self, data=False): if data: return [ (n, n_data["attrs"]) for n, n_data in self.nodes(True) if "graph" in n_data] else: return [n for n, n_data in self.nodes(True) if "graph" in n_data] def typings(self, data=False): if data: return [ (s, t, e_data["attrs"]) for s, t, e_data in self.edges(True) if "mapping" in e_data] else: return [ (s, t) for s, t, e_data in self.edges(True) if "mapping" in e_data] def relations(self, data=False): if data: return [ (l, r, attrs) for (l, r), attrs in self.relation_edges.items() ] else: return list(set(self.relation_edges.keys())) def successors(self, node_id): return self._graph.successors(node_id) def predecessors(self, node_id): return self._graph.predecessors(node_id) def get_graph(self, graph_id): if graph_id not in self.nodes(): raise HierarchyError( "Hierarchy node '{}' does not exist!".format(graph_id)) if not self.is_graph(graph_id): raise HierarchyError( "Hierarchy node '{}' is a rule!".format(graph_id)) return self.get_node(graph_id)["graph"] def get_typing(self, source, target): if (source, target) in self.edges(): if self.is_graph(source): return self.get_edge(source, target)["mapping"] else: edge = self.get_edge(source, target) return (edge["lhs_mapping"], edge["rhs_mapping"]) else: try: path = nx.shortest_path(self._graph, source, target) except: raise HierarchyError( "No path from '{}' to '{}' in the hierarchy".format( source, target)) return self.compose_path_typing(path) def get_relation(self, left, right): return self.relation_edges[(left, right)]["rel"] def get_graph_attrs(self, graph_id): return self.get_node(graph_id)["attrs"] def set_graph_attrs(self, node_id, new_attrs): normalize_attrs(new_attrs) attrs = self.get_node(node_id)["attrs"] for k, v in new_attrs.items(): attrs[k] = v nx.set_node_attributes(self._graph, {node_id: {"attrs": attrs}}) def get_typing_attrs(self, source, target): return self.get_edge(source, target)["attrs"] def set_typing_attrs(self, source, target, attrs): return self.set_edge_attrs(source, target, {"attrs": attrs}) def get_relation_attrs(self, left, right): return self.relation_edges[(left, right)]["attrs"] def set_relation_attrs(self, left, right, attrs): normalize_attrs(attrs) for k, v in attrs.items(): self.relation_edges[(left, right)]["attrs"][k] = v self.relation_edges[(right, left)]["attrs"][k] = v def set_node_relation(self, left_graph, right_graph, left_node, right_node): if left_node in self.relation_edges[ left_graph, right_graph]["rel"].keys(): self.relation_edges[left_graph, right_graph]["rel"][left_node].add( right_node) else: self.relation_edges[left_graph, right_graph]["rel"][left_node] = { right_node} if right_node in self.relation_edges[ right_graph, left_graph]["rel"].keys(): self.relation_edges[ right_graph, left_graph]["rel"][right_node].add(left_node) else: self.relation_edges[right_graph, left_graph]["rel"][right_node] = { left_node}
MIT License
haoheliu/voicefixer
voicefixer/vocoder/model/modules.py
FiLMConv1d.forward
python
def forward(self, x, c): if self.ins_norm: c = self.norm(c) for i in range(self.loop): x = self.mlps[i](x) x = F.relu(x) x = self.films[i](x, c) return x
x: (B, input_dim, seq) c: (B, attribute_dim, seq)
https://github.com/haoheliu/voicefixer/blob/8bbe7aff7a088cd4001eec319c61d852ed905ca2/voicefixer/vocoder/model/modules.py#L808-L820
import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from voicefixer.vocoder.config import Config class SineGen(torch.nn.Module): def __init__(self, samp_rate=24000, harmonic_num=0, sine_amp=0.1, noise_std=0.003, voiced_threshold=0, flag_for_pulse=False): super(SineGen, self).__init__() self.sine_amp = sine_amp self.noise_std = noise_std self.harmonic_num = harmonic_num self.dim = self.harmonic_num + 1 self.sampling_rate = samp_rate self.voiced_threshold = voiced_threshold self.flag_for_pulse = flag_for_pulse def _f02uv(self, f0): uv = torch.ones_like(f0) uv = uv * (f0 > self.voiced_threshold) return uv def _f02sine(self, f0_values): rad_values = (f0_values / self.sampling_rate) % 1 rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], device=f0_values.device) rand_ini[:, 0] = 0 rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini if not self.flag_for_pulse: tmp_over_one = torch.cumsum(rad_values, 1) % 1 tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 cumsum_shift = torch.zeros_like(rad_values) cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi) else: uv = self._f02uv(f0_values) uv_1 = torch.roll(uv, shifts=-1, dims=1) uv_1[:, -1, :] = 1 u_loc = (uv < 1) * (uv_1 > 0) tmp_cumsum = torch.cumsum(rad_values, dim=1) for idx in range(f0_values.shape[0]): temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] tmp_cumsum[idx, :, :] = 0 tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) sines = torch.cos(i_phase * 2 * np.pi) return sines def forward(self, f0): with torch.no_grad(): f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) f0_buf[:, :, 0] = f0[:, :, 0] for idx in np.arange(self.harmonic_num): f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2) sine_waves = self._f02sine(f0_buf) * self.sine_amp uv = self._f02uv(f0) noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 noise = noise_amp * torch.randn_like(sine_waves) sine_waves = sine_waves * uv + noise return sine_waves, uv, noise class LowpassBlur(nn.Module): def __init__(self, channels=128, filt_size=3, pad_type='reflect', pad_off=0): super(LowpassBlur, self).__init__() self.filt_size = filt_size self.pad_off = pad_off self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))] self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes] self.off = 0 self.channels = channels if (self.filt_size == 1): a = np.array([1., ]) elif (self.filt_size == 2): a = np.array([1., 1.]) elif (self.filt_size == 3): a = np.array([1., 2., 1.]) elif (self.filt_size == 4): a = np.array([1., 3., 3., 1.]) elif (self.filt_size == 5): a = np.array([1., 4., 6., 4., 1.]) elif (self.filt_size == 6): a = np.array([1., 5., 10., 10., 5., 1.]) elif (self.filt_size == 7): a = np.array([1., 6., 15., 20., 15., 6., 1.]) filt = torch.Tensor(a) filt = filt / torch.sum(filt) self.register_buffer('filt', filt[None, None, :].repeat((self.channels, 1, 1))) self.pad = get_pad_layer_1d(pad_type)(self.pad_sizes) def forward(self, inp): if self.filt_size == 1: return inp return F.conv1d(self.pad(inp), self.filt, groups=inp.shape[1]) def get_pad_layer_1d(pad_type): if (pad_type in ['refl', 'reflect']): PadLayer = nn.ReflectionPad1d elif (pad_type in ['repl', 'replicate']): PadLayer = nn.ReplicationPad1d elif (pad_type == 'zero'): PadLayer = nn.ZeroPad1d else: print('Pad type [%s] not recognized' % pad_type) return PadLayer class MovingAverageSmooth(torch.nn.Conv1d): def __init__(self, channels, window_len=3): super(MovingAverageSmooth, self).__init__(in_channels=channels, out_channels=channels, kernel_size=1, groups=channels, bias=False) torch.nn.init.constant_(self.weight, 1.0 / window_len) for p in self.parameters(): p.requires_grad = False def forward(self, data): return super(MovingAverageSmooth, self).forward(data) class Conv1d(torch.nn.Conv1d): def __init__(self, *args, **kwargs): super(Conv1d, self).__init__(*args, **kwargs) def reset_parameters(self): torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu") if self.bias is not None: torch.nn.init.constant_(self.bias, 0.0) class Stretch2d(torch.nn.Module): def __init__(self, x_scale, y_scale, mode="nearest"): super(Stretch2d, self).__init__() self.x_scale = x_scale self.y_scale = y_scale self.mode = mode def forward(self, x): return F.interpolate( x, scale_factor=(self.y_scale, self.x_scale), mode=self.mode) class Conv2d(torch.nn.Conv2d): def __init__(self, *args, **kwargs): super(Conv2d, self).__init__(*args, **kwargs) def reset_parameters(self): self.weight.data.fill_(1. / np.prod(self.kernel_size)) if self.bias is not None: torch.nn.init.constant_(self.bias, 0.0) class UpsampleNetwork(torch.nn.Module): def __init__(self, upsample_scales, nonlinear_activation=None, nonlinear_activation_params={}, interpolate_mode="nearest", freq_axis_kernel_size=1, use_causal_conv=False, ): super(UpsampleNetwork, self).__init__() self.use_causal_conv = use_causal_conv self.up_layers = torch.nn.ModuleList() for scale in upsample_scales: stretch = Stretch2d(scale, 1, interpolate_mode) self.up_layers += [stretch] assert (freq_axis_kernel_size - 1) % 2 == 0, "Not support even number freq axis kernel size." freq_axis_padding = (freq_axis_kernel_size - 1) // 2 kernel_size = (freq_axis_kernel_size, scale * 2 + 1) if use_causal_conv: padding = (freq_axis_padding, scale * 2) else: padding = (freq_axis_padding, scale) conv = Conv2d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) self.up_layers += [conv] if nonlinear_activation is not None: nonlinear = getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params) self.up_layers += [nonlinear] def forward(self, c): c = c.unsqueeze(1) for f in self.up_layers: if self.use_causal_conv and isinstance(f, Conv2d): c = f(c)[..., :c.size(-1)] else: c = f(c) return c.squeeze(1) class ConvInUpsampleNetwork(torch.nn.Module): def __init__(self, upsample_scales=[3, 4, 5, 5], nonlinear_activation="ReLU", nonlinear_activation_params={}, interpolate_mode="nearest", freq_axis_kernel_size=1, aux_channels=80, aux_context_window=0, use_causal_conv=False ): super(ConvInUpsampleNetwork, self).__init__() self.aux_context_window = aux_context_window self.use_causal_conv = use_causal_conv and aux_context_window > 0 kernel_size = aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1 self.conv_in = Conv1d(aux_channels, aux_channels, kernel_size=kernel_size, bias=False) self.upsample = UpsampleNetwork( upsample_scales=upsample_scales, nonlinear_activation=nonlinear_activation, nonlinear_activation_params=nonlinear_activation_params, interpolate_mode=interpolate_mode, freq_axis_kernel_size=freq_axis_kernel_size, use_causal_conv=use_causal_conv, ) def forward(self, c): c_ = self.conv_in(c) c = c_[:, :, :-self.aux_context_window] if self.use_causal_conv else c_ return self.upsample(c) class DownsampleNet(nn.Module): def __init__(self, input_size, output_size, upsample_factor, hp=None, index=0): super(DownsampleNet, self).__init__() self.input_size = input_size self.output_size = output_size self.upsample_factor = upsample_factor self.skip_conv = nn.Conv1d(input_size, output_size, kernel_size=1) self.index = index layer = nn.Conv1d(input_size, output_size, kernel_size=upsample_factor * 2, stride=upsample_factor, padding=upsample_factor // 2 + upsample_factor % 2) self.layer = nn.utils.weight_norm(layer) def forward(self, inputs): B, C, T = inputs.size() res = inputs[:, :, ::self.upsample_factor] skip = self.skip_conv(res) outputs = self.layer(inputs) outputs = outputs + skip return outputs class UpsampleNet(nn.Module): def __init__(self, input_size, output_size, upsample_factor, hp=None, index=0): super(UpsampleNet, self).__init__() self.up_type = Config.up_type self.use_smooth = Config.use_smooth self.use_drop = Config.use_drop self.input_size = input_size self.output_size = output_size self.upsample_factor = upsample_factor self.skip_conv = nn.Conv1d(input_size, output_size, kernel_size=1) self.index = index if self.use_smooth: window_lens = [5, 5, 4, 3] self.window_len = window_lens[index] if self.up_type != "pn" or self.index < 3: layer = nn.ConvTranspose1d(input_size, output_size, upsample_factor * 2, upsample_factor, padding=upsample_factor // 2 + upsample_factor % 2, output_padding=upsample_factor % 2) self.layer = nn.utils.weight_norm(layer) else: self.layer = nn.Sequential( nn.ReflectionPad1d(1), nn.utils.weight_norm(nn.Conv1d(input_size, output_size * upsample_factor, kernel_size=3)), nn.LeakyReLU(), nn.ReflectionPad1d(1), nn.utils.weight_norm( nn.Conv1d(output_size * upsample_factor, output_size * upsample_factor, kernel_size=3)), nn.LeakyReLU(), nn.ReflectionPad1d(1), nn.utils.weight_norm( nn.Conv1d(output_size * upsample_factor, output_size * upsample_factor, kernel_size=3)), nn.LeakyReLU(), ) if hp is not None: self.org = Config.up_org self.no_skip = Config.no_skip else: self.org = False self.no_skip = True if self.use_smooth: self.mas = nn.Sequential( MovingAverageSmooth(output_size, self.window_len), ) def forward(self, inputs): if not self.org: inputs = inputs + torch.sin(inputs) B, C, T = inputs.size() res = inputs.repeat(1, self.upsample_factor, 1).view(B, C, -1) skip = self.skip_conv(res) if self.up_type == "repeat": return skip outputs = self.layer(inputs) if self.up_type == "pn" and self.index > 2: B, c, l = outputs.size() outputs = outputs.view(B, -1, l * self.upsample_factor) if self.no_skip: return outputs if not self.org: outputs = outputs + skip if self.use_smooth: outputs = self.mas(outputs) if self.use_drop: outputs = F.dropout(outputs, p=0.05) return outputs class ResStack(nn.Module): def __init__(self, channel, kernel_size=3, resstack_depth=4, hp=None): super(ResStack, self).__init__() self.use_wn = Config.use_wn self.use_shift_scale = Config.use_shift_scale self.channel = channel def get_padding(kernel_size, dilation=1): return int((kernel_size * dilation - dilation) / 2) if self.use_shift_scale: self.scale_conv = nn.utils.weight_norm( nn.Conv1d(channel, 2 * channel, kernel_size=kernel_size, dilation=1, padding=1)) if not self.use_wn: self.layers = nn.ModuleList([ nn.Sequential( nn.LeakyReLU(), nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=kernel_size, dilation=3 ** (i % 10), padding=get_padding(kernel_size, 3 ** (i % 10)))), nn.LeakyReLU(), nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=kernel_size, dilation=1, padding=get_padding(kernel_size, 1))), ) for i in range(resstack_depth) ]) else: self.wn = WaveNet( in_channels=channel, out_channels=channel, cin_channels=-1, num_layers=resstack_depth, residual_channels=channel, gate_channels=channel, skip_channels=channel, causal=False, use_downup=False, ) def forward(self, x): if not self.use_wn: for layer in self.layers: x = x + layer(x) else: x = self.wn(x) if self.use_shift_scale: m_s = self.scale_conv(x) m_s = m_s[:, :, :-1] m, s = torch.split(m_s, self.channel, dim=1) s = F.softplus(s) x = m + s * x[:, :, 1:] x = F.pad(x, pad=(1, 0), mode='constant', value=0) return x class WaveNet(nn.Module): def __init__(self, in_channels=1, out_channels=1, num_layers=10, residual_channels=64, gate_channels=64, skip_channels=64, kernel_size=3, dilation_rate=2, cin_channels=80, hp=None, causal=False, use_downup=False, ): super(WaveNet, self).__init__() self.in_channels = in_channels self.causal = causal self.num_layers = num_layers self.out_channels = out_channels self.gate_channels = gate_channels self.residual_channels = residual_channels self.skip_channels = skip_channels self.cin_channels = cin_channels self.kernel_size = kernel_size self.use_downup = use_downup self.front_conv = nn.Sequential( nn.Conv1d(in_channels=self.in_channels, out_channels=self.residual_channels, kernel_size=3, padding=1), nn.ReLU() ) if self.use_downup: self.downup_conv = nn.Sequential( nn.Conv1d(in_channels=self.residual_channels, out_channels=self.residual_channels, kernel_size=3, stride=2, padding=1), nn.ReLU(), nn.Conv1d(in_channels=self.residual_channels, out_channels=self.residual_channels, kernel_size=3, stride=2, padding=1), nn.ReLU(), UpsampleNet(self.residual_channels, self.residual_channels, 4, hp), ) self.res_blocks = nn.ModuleList() for n in range(self.num_layers): self.res_blocks.append(ResBlock(self.residual_channels, self.gate_channels, self.skip_channels, self.kernel_size, dilation=dilation_rate ** n, cin_channels=self.cin_channels, local_conditioning=(self.cin_channels > 0), causal=self.causal, mode='SAME')) self.final_conv = nn.Sequential( nn.ReLU(), Conv(self.skip_channels, self.skip_channels, 1, causal=self.causal), nn.ReLU(), Conv(self.skip_channels, self.out_channels, 1, causal=self.causal), ) def forward(self, x, c=None): return self.wavenet(x, c) def wavenet(self, tensor, c=None): h = self.front_conv(tensor) if self.use_downup: h = self.downup_conv(h) skip = 0 for i, f in enumerate(self.res_blocks): h, s = f(h, c) skip += s out = self.final_conv(skip) return out def receptive_field_size(self): num_dir = 1 if self.causal else 2 dilations = [2 ** (i % self.num_layers) for i in range(self.num_layers)] return num_dir * (self.kernel_size - 1) * sum(dilations) + 1 + (self.front_channels - 1) def remove_weight_norm(self): for f in self.res_blocks: f.remove_weight_norm() class Conv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, causal=False, mode='SAME'): super(Conv, self).__init__() self.causal = causal self.mode = mode if self.causal and self.mode == 'SAME': self.padding = dilation * (kernel_size - 1) elif self.mode == 'SAME': self.padding = dilation * (kernel_size - 1) // 2 else: self.padding = 0 self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, dilation=dilation, padding=self.padding) self.conv = nn.utils.weight_norm(self.conv) nn.init.kaiming_normal_(self.conv.weight) def forward(self, tensor): out = self.conv(tensor) if self.causal and self.padding is not 0: out = out[:, :, :-self.padding] return out def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv) class ResBlock(nn.Module): def __init__(self, in_channels, out_channels, skip_channels, kernel_size, dilation, cin_channels=None, local_conditioning=True, causal=False, mode='SAME'): super(ResBlock, self).__init__() self.causal = causal self.local_conditioning = local_conditioning self.cin_channels = cin_channels self.mode = mode self.filter_conv = Conv(in_channels, out_channels, kernel_size, dilation, causal, mode) self.gate_conv = Conv(in_channels, out_channels, kernel_size, dilation, causal, mode) self.res_conv = nn.Conv1d(out_channels, in_channels, kernel_size=1) self.skip_conv = nn.Conv1d(out_channels, skip_channels, kernel_size=1) self.res_conv = nn.utils.weight_norm(self.res_conv) self.skip_conv = nn.utils.weight_norm(self.skip_conv) if self.local_conditioning: self.filter_conv_c = nn.Conv1d(cin_channels, out_channels, kernel_size=1) self.gate_conv_c = nn.Conv1d(cin_channels, out_channels, kernel_size=1) self.filter_conv_c = nn.utils.weight_norm(self.filter_conv_c) self.gate_conv_c = nn.utils.weight_norm(self.gate_conv_c) def forward(self, tensor, c=None): h_filter = self.filter_conv(tensor) h_gate = self.gate_conv(tensor) if self.local_conditioning: h_filter += self.filter_conv_c(c) h_gate += self.gate_conv_c(c) out = torch.tanh(h_filter) * torch.sigmoid(h_gate) res = self.res_conv(out) skip = self.skip_conv(out) if self.mode == 'SAME': return (tensor + res) * math.sqrt(0.5), skip else: return (tensor[:, :, 1:] + res) * math.sqrt(0.5), skip def remove_weight_norm(self): self.filter_conv.remove_weight_norm() self.gate_conv.remove_weight_norm() nn.utils.remove_weight_norm(self.res_conv) nn.utils.remove_weight_norm(self.skip_conv) nn.utils.remove_weight_norm(self.filter_conv_c) nn.utils.remove_weight_norm(self.gate_conv_c) @torch.jit.script def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): n_channels_int = n_channels[0] in_act = input_a + input_b t_act = torch.tanh(in_act[:, :n_channels_int]) s_act = torch.sigmoid(in_act[:, n_channels_int:]) acts = t_act * s_act return acts @torch.jit.script def fused_res_skip(tensor, res_skip, n_channels): n_channels_int = n_channels[0] res = res_skip[:, :n_channels_int] skip = res_skip[:, n_channels_int:] return (tensor + res), skip class ResStack2D(nn.Module): def __init__(self, channels=16, kernel_size=3, resstack_depth=4, hp=None): super(ResStack2D, self).__init__() channels = 16 kernel_size = 3 resstack_depth = 2 self.channels = channels def get_padding(kernel_size, dilation=1): return int((kernel_size * dilation - dilation) / 2) self.layers = nn.ModuleList([ nn.Sequential( nn.LeakyReLU(), nn.utils.weight_norm(nn.Conv2d(1, self.channels, kernel_size, dilation=(1, 3 ** (i)), padding=(1, get_padding(kernel_size, 3 ** (i))))), nn.LeakyReLU(), nn.utils.weight_norm(nn.Conv2d(self.channels, self.channels, kernel_size, dilation=(1, 3 ** (i)), padding=(1, get_padding(kernel_size, 3 ** (i))))), nn.LeakyReLU(), nn.utils.weight_norm(nn.Conv2d(self.channels, 1, kernel_size=1))) for i in range(resstack_depth)]) def forward(self, tensor): x = tensor.unsqueeze(1) for layer in self.layers: x = x + layer(x) x = x.squeeze(1) return x class FiLM(nn.Module): def __init__(self, input_dim, attribute_dim): super().__init__() self.input_dim = input_dim self.generator = nn.Conv1d(attribute_dim, input_dim * 2, kernel_size=3, padding=1) def forward(self, x, c): c = self.generator(c) m, s = torch.split(c, self.input_dim, dim=1) return x * s + m class FiLMConv1d(nn.Module): def __init__(self, in_size, out_size, attribute_dim, ins_norm=True, loop=1): super().__init__() self.loop = loop self.mlps = nn.ModuleList( [nn.Conv1d(in_size, out_size, kernel_size=3, padding=1)] + [nn.Conv1d(out_size, out_size, kernel_size=3, padding=1) for i in range(loop - 1)]) self.films = nn.ModuleList([FiLM(out_size, attribute_dim) for i in range(loop)]) self.ins_norm = ins_norm if self.ins_norm: self.norm = nn.InstanceNorm1d(attribute_dim)
MIT License
elsander/goodenoughalgs
Visualizations.py
LongMC3
python
def LongMC3(fname=None): if fname: run = scipy.genfromtxt(fname) else: bestSol, run = tsp.TSP(200, 'MC3', 20000, 10, seed=None, coordfile='tmp.txt') fname = 'ExampleOutput/MC3-Long.txt' run = scipy.array(run) scipy.savetxt(fname, run) Xs = range(0, run.shape[0] * 1000, 1000) pl.plot(Xs, run) pl.show()
Plot a single long MC3 run to demonstrate high performance but slow convergence.
https://github.com/elsander/goodenoughalgs/blob/0c78435e02bcc8a13129ad5867c2c1f5aec34dd8/Visualizations.py#L28-L43
import TravelingSalesperson as tsp import scipy import pylab as pl def PlotMultipleRuns(Alg, nruns=20, fname=None): if fname: runs = scipy.genfromtxt(fname) else: runs = [] for i in range(nruns): bestSol, fitHistory = tsp.TSP(200, Alg, 3000, 30, seed=None, coordfile='tmp.txt') runs.append(fitHistory) fname = 'MultRuns-' + str(Alg) + '.txt' runs = scipy.array(runs) scipy.savetxt(fname, runs) Xs = scipy.linspace(0, runs.shape[1] * 1000, runs.shape[1]) for i in range(runs.shape[0]): pl.plot(Xs, runs[i, :]) pl.show()
MIT License
photosynthesis-team/piq
piq/dss.py
_dct_matrix
python
def _dct_matrix(size: int) -> torch.Tensor: p = torch.arange(1, size).reshape((size - 1, 1)) q = torch.arange(1, 2 * size, 2) return torch.cat(( math.sqrt(1 / size) * torch.ones((1, size)), math.sqrt(2 / size) * torch.cos(math.pi / (2 * size) * p * q)), 0)
r""" Computes the matrix coefficients for DCT transform using the following formula: https://fr.mathworks.com/help/images/discrete-cosine-transform.html Args: size : size of DCT matrix to create. (`size`, `size`)
https://github.com/photosynthesis-team/piq/blob/c35a60f8c38159516154f8a344f06c4059f2f3b2/piq/dss.py#L162-L173
import math import functools import torch import torch.nn.functional as F from typing import Union from torch.nn.modules.loss import _Loss from piq.utils import _validate_input, _reduce from piq.functional import gaussian_filter, rgb2yiq def dss(x: torch.Tensor, y: torch.Tensor, reduction: str = 'mean', data_range: Union[int, float] = 1.0, dct_size: int = 8, sigma_weight: float = 1.55, kernel_size: int = 3, sigma_similarity: float = 1.5, percentile: float = 0.05) -> torch.Tensor: if sigma_weight == 0 or sigma_similarity == 0: raise ValueError('Gaussian sigmas must not be 0.') if percentile <= 0 or percentile > 1: raise ValueError('Percentile must be in [0,1]') _validate_input(tensors=[x, y], dim_range=(4, 4)) for size in (dct_size, kernel_size): if size <= 0 or size > min(x.size(-1), x.size(-2)): raise ValueError('DCT and kernels sizes must be included in [0, input size)') x = (x / float(data_range)) * 255 y = (y / float(data_range)) * 255 num_channels = x.size(1) if num_channels == 3: x_lum = rgb2yiq(x)[:, :1] y_lum = rgb2yiq(y)[:, :1] else: x_lum = x y_lum = y rows, cols = x_lum.size()[-2:] rows = dct_size * (rows // dct_size) cols = dct_size * (cols // dct_size) x_lum = x_lum[:, :, 0:rows, 0:cols] y_lum = y_lum[:, :, 0:rows, 0:cols] dct_x = _dct_decomp(x_lum, dct_size) dct_y = _dct_decomp(y_lum, dct_size) coords = torch.arange(1, dct_size + 1).to(device=x.device, dtype=torch.float32) weight = (coords - 0.5) ** 2 weight = (- (weight.unsqueeze(0) + weight.unsqueeze(1)) / (2 * sigma_weight ** 2)).exp() subband_sim_matrix = torch.zeros((x.size(0), dct_size, dct_size), device=x.device) threshold = 1e-2 for m in range(dct_size): for n in range(dct_size): first_term = (m == 0 and n == 0) if weight[m, n] < threshold: weight[m, n] = 0 continue subband_sim_matrix[:, m, n] = _subband_similarity( dct_x[:, :, m::dct_size, n::dct_size], dct_y[:, :, m::dct_size, n::dct_size], first_term, kernel_size, sigma_similarity, percentile) eps = torch.finfo(weight.dtype).eps similarity_scores = torch.sum(subband_sim_matrix * (weight / (torch.sum(weight)) + eps), dim=[1, 2]) dss_val = _reduce(similarity_scores, reduction) return dss_val def _subband_similarity(x: torch.Tensor, y: torch.Tensor, first_term: bool, kernel_size: int = 3, sigma: float = 1.5, percentile: float = 0.05) -> torch.Tensor: dc_coeff, ac_coeff = (1000, 300) c = dc_coeff if first_term else ac_coeff kernel = gaussian_filter(kernel_size=kernel_size, sigma=sigma) kernel = kernel.view(1, 1, kernel_size, kernel_size).to(x) mu_x = F.conv2d(x, kernel, padding=kernel_size // 2) mu_y = F.conv2d(y, kernel, padding=kernel_size // 2) sigma_xx = F.conv2d(x * x, kernel, padding=kernel_size // 2) - mu_x ** 2 sigma_yy = F.conv2d(y * y, kernel, padding=kernel_size // 2) - mu_y ** 2 sigma_xx[sigma_xx < 0] = 0 sigma_yy[sigma_yy < 0] = 0 left_term = (2 * torch.sqrt(sigma_xx * sigma_yy) + c) / (sigma_xx + sigma_yy + c) percentile_index = round(percentile * (left_term.size(-2) * left_term.size(-1))) sorted_left = torch.sort(left_term.flatten(start_dim=1)).values similarity = torch.mean(sorted_left[:, :percentile_index], dim=1) if first_term: sigma_xy = F.conv2d(x * y, kernel, padding=kernel_size // 2) - mu_x * mu_y right_term = ((sigma_xy + c) / (torch.sqrt(sigma_xx * sigma_yy) + c)) sorted_right = torch.sort(right_term.flatten(start_dim=1)).values similarity *= torch.mean(sorted_right[:, :percentile_index], dim=1) return similarity
Apache License 2.0
williamsjj/shiji
shiji/stats/vendor/txstatsd/stats/exponentiallydecayingsample.py
ExponentiallyDecayingSample.rescale
python
def rescale(self, now, next): self.next_scale_time = (now + self.RESCALE_THRESHOLD) old_start_time = self.start_time self.start_time = now new_values = [] for k, v in self._values: nk = k * exp(-self.alpha * (self.start_time - old_start_time)) insort(new_values, (nk, v)) self._values = new_values
A common feature of the above techniques - indeed, the key technique that allows us to track the decayed weights efficiently - is that they maintain counts and other quantities based on g(ti - L), and only scale by g(t - L) at query time. But while g(ti - L)/g(t-L) is guaranteed to lie between zero and one, the intermediate values of g(ti - L) could become very large. For polynomial functions, these values should not grow too large, and should be effectively represented in practice by floating point values without loss of precision. For exponential functions, these values could grow quite large as new values of (ti - L) become large, and potentially exceed the capacity of common floating point types. However, since the values stored by the algorithms are linear combinations of g values (scaled sums), they can be rescaled relative to a new landmark. That is, by the analysis of exponential decay in Section III-A, the choice of L does not affect the final result. We can therefore multiply each value based on L by a factor of exp(-alpha(L' - L)), and obtain the correct value as if we had instead computed relative to a new landmark L' (and then use this new L' at query time). This can be done with a linear pass over whatever data structure is being used.
https://github.com/williamsjj/shiji/blob/d823cbba4d31f3c81d5bb76b05fc4015a7a059c9/shiji/stats/vendor/txstatsd/stats/exponentiallydecayingsample.py#L100-L131
from time import time from random import random from math import exp from bisect import insort class ExponentiallyDecayingSample(object): RESCALE_THRESHOLD = 60 * 10 def __init__(self, reservoir_size, alpha, wall_time=None): self.alpha = alpha self.reservoir_size = reservoir_size if wall_time is None: wall_time = time self.tick = wall_time self.clear() def clear(self): self._values = [] self.count = 0 self.start_time = self.tick() self.next_scale_time = ( self.tick() + self.RESCALE_THRESHOLD) def size(self): return min(self.reservoir_size, self.count) def update(self, value, timestamp=None): if timestamp is None: timestamp = self.tick() now = timestamp if now >= self.next_scale_time: self.rescale(now, self.next_scale_time) priority = exp(self.alpha * (timestamp - self.start_time)) / random() values = self._values if self.count < self.reservoir_size: self.count += 1 insort(values, (priority, value)) else: if values[0][0] < priority: insort(values, (priority, value)) values.pop(0) def get_values(self): return [v for (k, v) in self._values]
MIT License
asdf-format/asdf
asdf/util.py
iter_subclasses
python
def iter_subclasses(cls): for x in cls.__subclasses__(): yield x for y in iter_subclasses(x): yield y
Returns all subclasses of a class.
https://github.com/asdf-format/asdf/blob/ee34d21e2d0e8834128716cc72fd47f31856d00e/asdf/util.py#L91-L98
import enum import inspect import math import struct import types import importlib.util import re from functools import lru_cache from urllib.request import pathname2url import numpy as np from . import constants urllib_parse_spec = importlib.util.find_spec('urllib.parse') patched_urllib_parse = importlib.util.module_from_spec(urllib_parse_spec) urllib_parse_spec.loader.exec_module(patched_urllib_parse) del urllib_parse_spec patched_urllib_parse.uses_relative.append('asdf') patched_urllib_parse.uses_netloc.append('asdf') __all__ = ['human_list', 'get_array_base', 'get_base_uri', 'filepath_to_url', 'iter_subclasses', 'calculate_padding', 'resolve_name', 'NotSet', 'is_primitive', 'uri_match', 'get_class_name'] def human_list(l, separator="and"): if len(l) == 1: return l[0] else: return ', '.join(l[:-1]) + ' ' + separator + ' ' + l[-1] def get_array_base(arr): base = arr while isinstance(base.base, np.ndarray): base = base.base return base def get_base_uri(uri): parts = patched_urllib_parse.urlparse(uri) return patched_urllib_parse.urlunparse(list(parts[:5]) + ['']) def filepath_to_url(path): return patched_urllib_parse.urljoin('file:', pathname2url(path))
BSD 3-Clause New or Revised License
microsoft/azure-devops-python-api
azure-devops/azure/devops/v6_0/work/work_client.py
WorkClient.get_board_columns
python
def get_board_columns(self, team_context, board): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') response = self._send(http_method='GET', location_id='c555d7ff-84e1-47df-9923-a3fe0cd8751b', version='6.0-preview.1', route_values=route_values) return self._deserialize('[BoardColumn]', self._unwrap_collection(response))
GetBoardColumns. [Preview API] Get columns on a board :param :class:`<TeamContext> <azure.devops.v6_0.work.models.TeamContext>` team_context: The team context for the operation :param str board: Name or ID of the specific board :rtype: [BoardColumn]
https://github.com/microsoft/azure-devops-python-api/blob/451cade4c475482792cbe9e522c1fee32393139e/azure-devops/azure/devops/v6_0/work/work_client.py#L820-L850
 from msrest import Serializer, Deserializer from ...client import Client from . import models class WorkClient(Client): def __init__(self, base_url=None, creds=None): super(WorkClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = '1d4f49f9-02b9-4e26-b826-2cdb6195f2a9' def get_backlog_configurations(self, team_context): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') response = self._send(http_method='GET', location_id='7799f497-3cb5-4f16-ad4f-5cd06012db64', version='6.0-preview.1', route_values=route_values) return self._deserialize('BacklogConfiguration', response) def get_backlog_level_work_items(self, team_context, backlog_id): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if backlog_id is not None: route_values['backlogId'] = self._serialize.url('backlog_id', backlog_id, 'str') response = self._send(http_method='GET', location_id='7c468d96-ab1d-4294-a360-92f07e9ccd98', version='6.0-preview.1', route_values=route_values) return self._deserialize('BacklogLevelWorkItems', response) def get_backlog(self, team_context, id): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if id is not None: route_values['id'] = self._serialize.url('id', id, 'str') response = self._send(http_method='GET', location_id='a93726f9-7867-4e38-b4f2-0bfafc2f6a94', version='6.0-preview.1', route_values=route_values) return self._deserialize('BacklogLevelConfiguration', response) def get_backlogs(self, team_context): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') response = self._send(http_method='GET', location_id='a93726f9-7867-4e38-b4f2-0bfafc2f6a94', version='6.0-preview.1', route_values=route_values) return self._deserialize('[BacklogLevelConfiguration]', self._unwrap_collection(response)) def get_column_suggested_values(self, project=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') response = self._send(http_method='GET', location_id='eb7ec5a3-1ba3-4fd1-b834-49a5a387e57d', version='6.0-preview.1', route_values=route_values) return self._deserialize('[BoardSuggestedValue]', self._unwrap_collection(response)) def get_board_mapping_parent_items(self, team_context, child_backlog_context_category_ref_name, workitem_ids): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') query_parameters = {} if child_backlog_context_category_ref_name is not None: query_parameters['childBacklogContextCategoryRefName'] = self._serialize.query('child_backlog_context_category_ref_name', child_backlog_context_category_ref_name, 'str') if workitem_ids is not None: workitem_ids = ",".join(map(str, workitem_ids)) query_parameters['workitemIds'] = self._serialize.query('workitem_ids', workitem_ids, 'str') response = self._send(http_method='GET', location_id='186abea3-5c35-432f-9e28-7a15b4312a0e', version='6.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[ParentChildWIMap]', self._unwrap_collection(response)) def get_row_suggested_values(self, project=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') response = self._send(http_method='GET', location_id='bb494cc6-a0f5-4c6c-8dca-ea6912e79eb9', version='6.0-preview.1', route_values=route_values) return self._deserialize('[BoardSuggestedValue]', self._unwrap_collection(response)) def get_board(self, team_context, id): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if id is not None: route_values['id'] = self._serialize.url('id', id, 'str') response = self._send(http_method='GET', location_id='23ad19fc-3b8e-4877-8462-b3f92bc06b40', version='6.0-preview.1', route_values=route_values) return self._deserialize('Board', response) def get_boards(self, team_context): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') response = self._send(http_method='GET', location_id='23ad19fc-3b8e-4877-8462-b3f92bc06b40', version='6.0-preview.1', route_values=route_values) return self._deserialize('[BoardReference]', self._unwrap_collection(response)) def set_board_options(self, options, team_context, id): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if id is not None: route_values['id'] = self._serialize.url('id', id, 'str') content = self._serialize.body(options, '{str}') response = self._send(http_method='PUT', location_id='23ad19fc-3b8e-4877-8462-b3f92bc06b40', version='6.0-preview.1', route_values=route_values, content=content) return self._deserialize('{str}', self._unwrap_collection(response)) def get_board_user_settings(self, team_context, board): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') response = self._send(http_method='GET', location_id='b30d9f58-1891-4b0a-b168-c46408f919b0', version='6.0-preview.1', route_values=route_values) return self._deserialize('BoardUserSettings', response) def update_board_user_settings(self, board_user_settings, team_context, board): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') content = self._serialize.body(board_user_settings, '{str}') response = self._send(http_method='PATCH', location_id='b30d9f58-1891-4b0a-b168-c46408f919b0', version='6.0-preview.1', route_values=route_values, content=content) return self._deserialize('BoardUserSettings', response) def get_capacities_with_identity_ref(self, team_context, iteration_id): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if iteration_id is not None: route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str') response = self._send(http_method='GET', location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57', version='6.0-preview.2', route_values=route_values) return self._deserialize('[TeamMemberCapacityIdentityRef]', self._unwrap_collection(response)) def get_capacity_with_identity_ref(self, team_context, iteration_id, team_member_id): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if iteration_id is not None: route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str') if team_member_id is not None: route_values['teamMemberId'] = self._serialize.url('team_member_id', team_member_id, 'str') response = self._send(http_method='GET', location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57', version='6.0-preview.2', route_values=route_values) return self._deserialize('TeamMemberCapacityIdentityRef', response) def replace_capacities_with_identity_ref(self, capacities, team_context, iteration_id): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if iteration_id is not None: route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str') content = self._serialize.body(capacities, '[TeamMemberCapacityIdentityRef]') response = self._send(http_method='PUT', location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57', version='6.0-preview.2', route_values=route_values, content=content) return self._deserialize('[TeamMemberCapacityIdentityRef]', self._unwrap_collection(response)) def update_capacity_with_identity_ref(self, patch, team_context, iteration_id, team_member_id): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if iteration_id is not None: route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str') if team_member_id is not None: route_values['teamMemberId'] = self._serialize.url('team_member_id', team_member_id, 'str') content = self._serialize.body(patch, 'CapacityPatch') response = self._send(http_method='PATCH', location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57', version='6.0-preview.2', route_values=route_values, content=content) return self._deserialize('TeamMemberCapacityIdentityRef', response) def get_board_card_rule_settings(self, team_context, board): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') response = self._send(http_method='GET', location_id='b044a3d9-02ea-49c7-91a1-b730949cc896', version='6.0-preview.2', route_values=route_values) return self._deserialize('BoardCardRuleSettings', response) def update_board_card_rule_settings(self, board_card_rule_settings, team_context, board): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') content = self._serialize.body(board_card_rule_settings, 'BoardCardRuleSettings') response = self._send(http_method='PATCH', location_id='b044a3d9-02ea-49c7-91a1-b730949cc896', version='6.0-preview.2', route_values=route_values, content=content) return self._deserialize('BoardCardRuleSettings', response) def update_taskboard_card_rule_settings(self, board_card_rule_settings, team_context): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') content = self._serialize.body(board_card_rule_settings, 'BoardCardRuleSettings') self._send(http_method='PATCH', location_id='3f84a8d1-1aab-423e-a94b-6dcbdcca511f', version='6.0-preview.2', route_values=route_values, content=content) def get_board_card_settings(self, team_context, board): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') response = self._send(http_method='GET', location_id='07c3b467-bc60-4f05-8e34-599ce288fafc', version='6.0-preview.2', route_values=route_values) return self._deserialize('BoardCardSettings', response) def update_board_card_settings(self, board_card_settings_to_save, team_context, board): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') content = self._serialize.body(board_card_settings_to_save, 'BoardCardSettings') response = self._send(http_method='PUT', location_id='07c3b467-bc60-4f05-8e34-599ce288fafc', version='6.0-preview.2', route_values=route_values, content=content) return self._deserialize('BoardCardSettings', response) def update_taskboard_card_settings(self, board_card_settings_to_save, team_context): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') content = self._serialize.body(board_card_settings_to_save, 'BoardCardSettings') self._send(http_method='PUT', location_id='0d63745f-31f3-4cf3-9056-2a064e567637', version='6.0-preview.2', route_values=route_values, content=content) def get_board_chart(self, team_context, board, name): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') response = self._send(http_method='GET', location_id='45fe888c-239e-49fd-958c-df1a1ab21d97', version='6.0-preview.1', route_values=route_values) return self._deserialize('BoardChart', response) def get_board_charts(self, team_context, board): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') response = self._send(http_method='GET', location_id='45fe888c-239e-49fd-958c-df1a1ab21d97', version='6.0-preview.1', route_values=route_values) return self._deserialize('[BoardChartReference]', self._unwrap_collection(response)) def update_board_chart(self, chart, team_context, board, name): project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if board is not None: route_values['board'] = self._serialize.url('board', board, 'str') if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') content = self._serialize.body(chart, 'BoardChart') response = self._send(http_method='PATCH', location_id='45fe888c-239e-49fd-958c-df1a1ab21d97', version='6.0-preview.1', route_values=route_values, content=content) return self._deserialize('BoardChart', response)
MIT License
mirantis/disk_perf_test_tool
wally/suits/job.py
JobParams.summary
python
def summary(self) -> str: pass
Test short summary, used mostly for file names and short image description
https://github.com/mirantis/disk_perf_test_tool/blob/aaad8b81218d0907e9aa1425e18b1a044f06960d/wally/suits/job.py#L22-L24
import abc from typing import Dict, Any, Tuple, cast, Union, NamedTuple from collections import OrderedDict from cephlib.istorage import Storable Var = NamedTuple('Var', [('name', str)]) class JobParams(metaclass=abc.ABCMeta): def __init__(self, **params: Dict[str, Any]) -> None: self.params = params @property @abc.abstractmethod
Apache License 2.0
bguillouet/traj-dist
traj_dist/distance.py
pdist
python
def pdist(traj_list, metric="sspd", type_d="euclidean", converted=None, precision=None, eps=None, g=None, verbose=False): list_dim = [x.shape[1] if len(x.shape) > 1 else 1 for x in traj_list] nb_traj = len(traj_list) if not (len(set(list_dim)) == 1): raise ValueError("All trajectories must have same dimesion !") dim = list_dim[0] if not (metric in ["sspd", "dtw", "lcss", "hausdorff", "frechet", "discret_frechet", "sowd_grid", "erp", "edr"]): raise ValueError("The metric argument should be 'sspd', 'dtw', 'lcss','erp','edr' 'hausdorff', 'frechet'," "'discret_frechet' or 'sowd_grid' \nmetric given is : " + metric) if not (type_d in ["spherical", "euclidean"]): raise ValueError("The type_d argument should be 'euclidean' or 'spherical'\ntype_d given is : " + type_d) if type_d == "spherical" and (metric in ["frechet", "discret_frechet"]): raise ValueError("spherical implementation for distance " + metric + " is not " "disponible") if type_d == "euclidean" and (metric in ["sowd", "sowd_grid"]): if not (converted): raise ValueError("Euclidean implementation for distance " + metric + " is not disponible if your data is not already converted in cell format") if verbose: print(("Computing " + type_d + " distance " + metric + " for %d trajectories" % nb_traj)) M = np.zeros(sum(range(nb_traj))) dist = METRIC_DIC[type_d][metric] if metric.startswith("sowd_grid"): if converted is None: warnings.warn("converted parameter should be specified for metric sowd_grid. Default " "is False") converted = False if converted: cells_list = traj_list else: if precision is None: warnings.warn("precision parameter should be specified for metric sowd_grid if converted " "is False. Default is 7") precision = 7 if verbose: print("Cells conversion start") cells_list_, _, _, _, _ = trajectory_set_grid(traj_list, precision) cells_list = [np.array(x)[:, :2] for x in cells_list_] if verbose: print("Cells conversion ok") im = 0 for i in range(nb_traj): cells_list_i = cells_list[i] for j in range(i + 1, nb_traj): cells_list_j = cells_list[j] M[im] = dist(cells_list_i, cells_list_j) im += 1 elif metric == "erp": if g is None: g = np.zeros(dim, dtype=float) warnings.warn("g parameter should be specified for metric erp. Default is ") if verbose: print(g) else: if g.shape[0] != dim: raise ValueError("g and trajectories in list should have same dimension") im = 0 for i in range(nb_traj): traj_list_i = traj_list[i] for j in range(i + 1, nb_traj): traj_list_j = traj_list[j] M[im] = dist(traj_list_i, traj_list_j, g) im += 1 elif metric == "lcss" or metric == "edr": if eps is None: warnings.warn("eps parameter should be specified for metric 'lcss' and 'edr', default is 100 ") eps = 100 im = 0 for i in range(nb_traj): traj_list_i = traj_list[i] for j in range(i + 1, nb_traj): traj_list_j = traj_list[j] M[im] = dist(traj_list_i, traj_list_j, eps) im += 1 else: im = 0 for i in range(nb_traj): traj_list_i = traj_list[i] for j in range(i + 1, nb_traj): traj_list_j = traj_list[j] M[im] = dist(traj_list_i, traj_list_j) im += 1 return M
Usage ----- Pairwise distances between trajectory in traj_list. metrics available are : 1. 'sspd' Computes the distances using the Symmetrized Segment Path distance. 2. 'dtw' Computes the distances using the Dynamic Path Warping distance. 3. 'lcss' Computes the distances using the Longuest Common SubSequence distance 4. 'hausdorf' Computes the distances using the Hausdorff distance. 5. 'frechet' Computes the distances using the Frechet distance. 6. 'discret_frechet' Computes the distances using the Discrete Frechet distance. 7. 'sowd_grid' Computes the distances using the Symmetrized One Way Distance. 8. 'erp' Computes the distances using the Edit Distance with real Penalty. 9. 'edr' Computes the distances using the Edit Distance on Real sequence. type_d available are "euclidean" or "spherical". Some distance can be computing according to spherical space instead of euclidean. If so, traj_0 and traj_1 have to be 2-dimensional. First column is longitude, second one is latitude. 'sowd_grid' computes distance between trajectory in grid representation. If the coordinate are spherical, this conversion can be made according to the geohash encoding. If so, the geohash 'precision' is needed. 'edr' and 'lcss' require 'eps' parameter. These distance assume that two locations are similar, or not, according to a given threshold, eps. 'erp' require g parameter. This distance require a gap parameter. Which must have same dimension that the trajectory. Parameters ---------- param traj_list: a list of nT numpy array trajectory param metric : string, distance used param type_d : string, distance type_d used (spherical or euclidean) param converted : boolean, specified if the data are converted in cell format (sowd_grid metric) param precision : int, precision of geohash (sowd_grid ) param eps : float, threshold distance (edr and lcss) param g : numpy arrays, gaps (erp distance) Returns ------- M : a nT x nT numpy array. Where the i,j entry is the distance between traj_list[i] and traj_list[j]
https://github.com/bguillouet/traj-dist/blob/05893c52078fde5bd6ae9efd5e52178e5bd9a85c/traj_dist/distance.py#L419-L584
from .pydist.linecell import trajectory_set_grid from .cydist.sspd import c_e_sspd, c_g_sspd from .cydist.dtw import c_e_dtw, c_g_dtw from .cydist.erp import c_e_erp, c_g_erp from .cydist.edr import c_e_edr, c_g_edr from .cydist.lcss import c_e_lcss, c_g_lcss from .cydist.hausdorff import c_e_hausdorff, c_g_hausdorff from .cydist.discret_frechet import c_discret_frechet from .cydist.frechet import c_frechet from .cydist.sowd import c_sowd_grid import numpy as np import warnings __all__ = ["pdist", "cdist", "sspd", "sowd_grid", "frechet", "discret_frechet", "hausdorff", "dtw", "lcss", "edr", "erp"] METRIC_DIC = {"spherical": {"sspd": c_g_sspd, "dtw": c_g_dtw, "lcss": c_g_lcss, "hausdorff": c_g_hausdorff, "sowd_grid": c_sowd_grid, "erp": c_g_erp, "edr": c_g_edr}, "euclidean": {"sspd": c_e_sspd, "dtw": c_e_dtw, "lcss": c_e_lcss, "hausdorff": c_e_hausdorff, "discret_frechet": c_discret_frechet, "frechet": c_frechet, "sowd_grid": c_sowd_grid, "erp": c_e_erp, "edr": c_e_edr}} def sspd(traj_1, traj_2, type_d="euclidean"): dim_1 = traj_1.shape[1] dim_2 = traj_2.shape[1] if dim_1 != 2 or dim_2 != 2: raise ValueError("Trajectories should be in 2D. t1 is %dD and t2 is %d given" % (dim_1, dim_2)) if not (type_d in ["spherical", "euclidean"]): raise ValueError("The type_d argument should be 'euclidean' or 'spherical'\ntype_d given is : " + type_d) dist_func = METRIC_DIC[type_d]["sspd"] dist = dist_func(traj_1, traj_2) return dist def sowd_grid(traj_1, traj_2, type_d="euclidean", converted=None, precision=None): dim_1 = traj_1.shape[1] dim_2 = traj_2.shape[1] if dim_1 != 2 or dim_2 != 2: raise ValueError("Trajectories should be in 2D. t1 is %dD and t2 is %d given" % (dim_1, dim_2)) if not (type_d in ["spherical", "euclidean"]): raise ValueError("The type_d argument should be 'euclidean' or 'spherical'\ntype_d given is : " + type_d) if type_d == "euclidean" and not converted: raise Warning("Euclidean implementation for distance sowd_grid is not " "disponible if your data is not already converted in cell format") if converted is None: warnings.warn("converted parameter should be specified for metric sowd_grid. Default " "is True") converted = True if converted: cells_list = [traj_1, traj_2] else: if precision is None: warnings.warn("precision parameter should be specified for metric sowd_grid if converted " "is False. Default is 7") precision = 7 cells_list_, _, _, _, _ = trajectory_set_grid([traj_1, traj_2], precision) cells_list = [np.array(cells_list_[0])[:, :2], np.array(cells_list_[1])[:, :2]] dist_func = METRIC_DIC[type_d]["sowd_grid"] dist = dist_func(cells_list[0], cells_list[1]) return dist def frechet(traj_1, traj_2, type_d="euclidean"): dim_1 = traj_1.shape[1] dim_2 = traj_2.shape[1] if dim_1 != 2 or dim_2 != 2: raise ValueError("Trajectories should be in 2D. t1 is %dD and t2 is %d given" % (dim_1, dim_2)) if type_d != "euclidean": raise ValueError("The type_d argument should be 'euclidean'\ntype_d given is : " + type_d) dist_func = METRIC_DIC[type_d]["frechet"] dist = dist_func(traj_1, traj_2) return dist def discret_frechet(traj_1, traj_2, type_d="euclidean"): dim_1 = traj_1.shape[1] dim_2 = traj_2.shape[1] if dim_1 != 2 or dim_2 != 2: raise ValueError("Trajectories should be in 2D. t1 is %dD and t2 is %d given" % (dim_1, dim_2)) if type_d != "euclidean": raise ValueError("The type_d argument should be 'euclidean'\ntype_d given is : " + type_d) dist_func = METRIC_DIC[type_d]["discret_frechet"] dist = dist_func(traj_1, traj_2) return dist def hausdorff(traj_1, traj_2, type_d="euclidean"): dim_1 = traj_1.shape[1] dim_2 = traj_2.shape[1] if dim_1 != 2 or dim_2 != 2: raise ValueError("Trajectories should be in 2D. t1 is %dD and t2 is %d given" % (dim_1, dim_2)) if not (type_d in ["spherical", "euclidean"]): raise ValueError("The type_d argument should be 'euclidean' or 'spherical'\ntype_d given is : " + type_d) dist_func = METRIC_DIC[type_d]["hausdorff"] dist = dist_func(traj_1, traj_2) return dist def dtw(traj_1, traj_2, type_d="euclidean"): dim_1 = traj_1.shape[1] dim_2 = traj_2.shape[1] if dim_1 != 2 or dim_2 != 2: raise ValueError("Trajectories should be in 2D. t1 is %dD and t2 is %d given" % (dim_1, dim_2)) if not (type_d in ["spherical", "euclidean"]): raise ValueError("The type_d argument should be 'euclidean' or 'spherical'\ntype_d given is : " + type_d) dist_func = METRIC_DIC[type_d]["dtw"] dist = dist_func(traj_1, traj_2) return dist def lcss(traj_1, traj_2, type_d="euclidean", eps=200): dim_1 = traj_1.shape[1] dim_2 = traj_2.shape[1] if dim_1 != 2 or dim_2 != 2: raise ValueError("Trajectories should be in 2D. t1 is %dD and t2 is %d given" % (dim_1, dim_2)) if not (type_d in ["spherical", "euclidean"]): raise ValueError("The type_d argument should be 'euclidean' or 'spherical'\ntype_d given is : " + type_d) dist_func = METRIC_DIC[type_d]["lcss"] dist = dist_func(traj_1, traj_2, eps) return dist def edr(traj_1, traj_2, type_d="euclidean", eps=200): dim_1 = traj_1.shape[1] dim_2 = traj_2.shape[1] if dim_1 != 2 or dim_2 != 2: raise ValueError("Trajectories should be in 2D. t1 is %dD and t2 is %d given" % (dim_1, dim_2)) if not (type_d in ["spherical", "euclidean"]): raise ValueError("The type_d argument should be 'euclidean' or 'spherical'\ntype_d given is : " + type_d) dist_func = METRIC_DIC[type_d]["edr"] dist = dist_func(traj_1, traj_2, eps) return dist def erp(traj_1, traj_2, type_d="euclidean", g=None): dim_1 = traj_1.shape[1] dim_2 = traj_2.shape[1] if dim_1 != 2 or dim_2 != 2: raise ValueError("Trajectories should be in 2D. t1 is %dD and t2 is %d given" % (dim_1, dim_2)) dim = dim_1 if g is None: g = np.zeros(dim, dtype=float) warnings.warn("g parameter should be specified for metric erp. Default is ") print(g) else: if g.shape[0] != dim: raise ValueError("g and trajectories in list should have same dimension") if not (type_d in ["spherical", "euclidean"]): raise ValueError("The type_d argument should be 'euclidean' or 'spherical'\ntype_d given is : " + type_d) dist_func = METRIC_DIC[type_d]["erp"] dist = dist_func(traj_1, traj_2, g) return dist
MIT License
qdata/textattack
textattack/constraints/semantics/word_embedding_distance.py
WordEmbeddingDistance.extra_repr_keys
python
def extra_repr_keys(self): if self.min_cos_sim is None: metric = "max_mse_dist" else: metric = "min_cos_sim" return [ "embedding", metric, "cased", "include_unknown_words", ] + super().extra_repr_keys()
Set the extra representation of the constraint using these keys. To print customized extra information, you should reimplement this method in your own constraint. Both single-line and multi- line strings are acceptable.
https://github.com/qdata/textattack/blob/3f0d5290bebc8436a60869576bede9138ea34cda/textattack/constraints/semantics/word_embedding_distance.py#L111-L127
from textattack.constraints import Constraint from textattack.shared import AbstractWordEmbedding, WordEmbedding from textattack.shared.validators import transformation_consists_of_word_swaps class WordEmbeddingDistance(Constraint): def __init__( self, embedding=WordEmbedding.counterfitted_GLOVE_embedding(), include_unknown_words=True, min_cos_sim=None, max_mse_dist=None, cased=False, compare_against_original=True, ): super().__init__(compare_against_original) self.include_unknown_words = include_unknown_words self.cased = cased if bool(min_cos_sim) == bool(max_mse_dist): raise ValueError("You must choose either `min_cos_sim` or `max_mse_dist`.") self.min_cos_sim = min_cos_sim self.max_mse_dist = max_mse_dist if not isinstance(embedding, AbstractWordEmbedding): raise ValueError( "`embedding` object must be of type `textattack.shared.AbstractWordEmbedding`." ) self.embedding = embedding def get_cos_sim(self, a, b): return self.embedding.get_cos_sim(a, b) def get_mse_dist(self, a, b): return self.embedding.get_mse_dist(a, b) def _check_constraint(self, transformed_text, reference_text): try: indices = transformed_text.attack_attrs["newly_modified_indices"] except KeyError: raise KeyError( "Cannot apply part-of-speech constraint without `newly_modified_indices`" ) if any( i >= len(reference_text.words) or i >= len(transformed_text.words) for i in indices ): return False for i in indices: ref_word = reference_text.words[i] transformed_word = transformed_text.words[i] if not self.cased: ref_word = ref_word.lower() transformed_word = transformed_word.lower() try: ref_id = self.embedding.word2index(ref_word) transformed_id = self.embedding.word2index(transformed_word) except KeyError: if self.include_unknown_words: continue return False if self.min_cos_sim: cos_sim = self.get_cos_sim(ref_id, transformed_id) if cos_sim < self.min_cos_sim: return False if self.max_mse_dist: mse_dist = self.get_mse_dist(ref_id, transformed_id) if mse_dist > self.max_mse_dist: return False return True def check_compatibility(self, transformation): return transformation_consists_of_word_swaps(transformation)
MIT License
athena-home-loans/keydra
src/keydra/clients/splunk.py
SplunkClient._wait_for_splunkcloud_task
python
def _wait_for_splunkcloud_task(self, id, timeout=240): attempt = 1 while attempt < timeout: try: statusresp = self._service.get( '/services/dmc/tasks/{}'.format(id), output_mode='json' )['body'].read() except HTTPError: raise Exception('Could not fetch status for task {}'.format(id)) status = json.loads(statusresp)['entry'][0] LOGGER.debug("Task {} is currently '{}' after {} seconds.".format( id, status['content']['state'], attempt ) ) if status['content']['state'] == 'completed': LOGGER.info("Deployment task {} completed in {} seconds.".format(id, attempt)) return time.sleep(1) attempt += 1 raise Exception( 'Deployment task did not complete within {} seconds! Aborting.'.format(timeout) )
Wait for a Splunk Cloud (Classic) deployment task to complete :param id: The task Id to wait for :type id: :class:`string` :param timeout: How many seconds to wait for before giving up :type timeout: :class:`int`
https://github.com/athena-home-loans/keydra/blob/3b1423f629cd826d270d4d9e29023e22694fa24e/src/keydra/clients/splunk.py#L417-L454
import json import requests import time import splunklib.client as splunkclient import urllib.parse as urlparse from splunklib.binding import HTTPError from keydra.logging import get_logger from keydra.providers.base import exponential_backoff_retry LOGGER = get_logger() ADMIN_API = 'https://admin.splunk.com' class AppNotInstalledException(Exception): pass class TaskAlreadyInProgressException(Exception): pass class SplunkClient(object): def __init__(self, username, password, host, verify, port=8089): self.host = host self.port = port if password.startswith('eyJ') and len(password) > 32: self.tokenauth = True self._auth_headers = { 'Content-Type': 'application/json', 'Authorization': f'Bearer {password}' } else: self.tokenauth = False self._service = splunkclient.Service( host=host, port=port, username=username, password=password, verify=verify ) self._service.login() self._instance = host.split('.')[0] def _get(self, url, params={}): params['output_mode'] = 'json' if not params: params = self.params resp = requests.get(url, headers=self._auth_headers, params=params) resp.raise_for_status() try: return resp.json() except ValueError: return resp.text def _post(self, url, data, params={}): params['output_mode'] = 'json' resp = requests.post(url, headers=self._auth_headers, params=params, data=data) resp.raise_for_status() try: return resp.json() except ValueError: return resp.text def _delete(self, url, data, params={}): params['output_mode'] = 'json' resp = requests.delete(url, headers=self._auth_headers, params=params, data=data) resp.raise_for_status() try: return resp.json() except ValueError: return resp.text def update_app_config(self, app, path, obj, data): if self.app_exists(app) is not True: raise AppNotInstalledException( 'App {} not installed on Splunk ' 'host {}'.format( app, self._service.host ) ) post_data = dict(**data) try: attempt = self._service.post( '/servicesNS/nobody/{app}/' '{path}/{object}'.format( app=app, path=path, object=obj ), **post_data ) except HTTPError as error: if 'does not exist' in str(error.body): post_data['name'] = obj attempt = self._service.post( '/servicesNS/nobody/{app}/' '{path}/{object}'.format( app=app, path=path, object=obj ), **post_data ) else: raise Exception( 'Error updating Splunk app {} on ' 'host {}: {}'.format( app, self._service.host, error ) ) return attempt.status def update_app_storepass(self, app, username, password, realm=None): if not realm: realm = '' if self.app_exists(app) is not True: raise AppNotInstalledException( 'App {} not installed on Splunk ' 'host {}'.format( app, self._service.host ) ) post_data = { 'name': username, 'password': password, 'realm': realm } try: self._service.get( '/servicesNS/nobody/{}/' 'storage/passwords/{}'.format(app, username) ) url = '/servicesNS/nobody/{}/storage/passwords/{}'.format( app, username ) post_data.pop('name', None) post_data.pop('realm', None) except HTTPError: url = '/servicesNS/nobody/{}/storage/passwords'.format(app) try: attempt = self._service.post(url, **post_data) except HTTPError as error: raise Exception( 'Error updating Splunk app {} on ' 'host {}: {}'.format( app, self._service.host, error ) ) return attempt.status def app_exists(self, appname): matching_apps = len( self._service.apps.list(search='name={}'.format(appname)) ) return matching_apps > 0 def delete_token(self, username, token): url = 'https://{}:{}/services/authorization/tokens/{}'.format( self.host, self.port, username ) postdata = { 'id': token } self._delete(url, postdata) def list_tokens_by_user(self, username): url = 'https://{}:{}/services/authorization/tokens'.format( self.host, self.port, ) params = { 'username': username } resp = self._get(url, params) LOGGER.debug(f'List token response: {resp}') tokens = [] for entry in resp['entry']: tokens.append(entry['name']) return tokens def rotate_token(self, username, lifetime=None): url = 'https://{}:{}/services/authorization/tokens'.format( self.host, self.port, ) postdata = { 'name': username, 'audience': 'Managed by Keydra' } if lifetime is not None: postdata['expires_on'] = f'+{lifetime}' resp = self._post(url, postdata) newtoken = resp['entry'][0]['content']['token'] newtokenid = resp['entry'][0]['content']['id'] if newtoken.startswith('eyJ'): self._auth_headers['Authorization'] = f'Bearer {newtoken}' for token in self.list_tokens_by_user(username): if token != newtokenid: LOGGER.debug(f'Attempting to delete token {token}') self.delete_token(username, token) return newtoken else: raise Exception(f'Error rotating token for user {username}. New token is invalid!') def change_passwd(self, username, oldpasswd, newpasswd): attempt = self._service.post( "/services/authentication/users/{}".format(username), password=newpasswd, oldpassword=oldpasswd ) if attempt.status != 200: raise Exception( 'Error rotating user {} on Splunk host ' '{}'.format(username, self._service.host) ) return True def rotate_hectoken(self, inputname): response = self._service.get( '/services/data/inputs/http', output_mode='json' ) inputs = json.loads(response['body'].read()) for entry in inputs['entry']: if entry['name'] == 'http://'+inputname: rotresp = self._service.post( urlparse.unquote(entry['links']['edit']+'/rotate'), output_mode='json' ) newconfig = json.loads(rotresp['body'].read())['entry'][0] return newconfig['content']['token'] raise Exception( 'Error rotating HEC token {} on Splunk host ' '{}. Input was not found! Input list: {}'.format( inputname, self._service.host, inputs ) ) def _get_splunkcloud_httpinput(self, inputname): getresp = self._service.get( '/services/dmc/config/inputs/__indexers/http/{}'.format(inputname), output_mode='json' )['body'].read() return json.loads(getresp)['entry'] def _get_last_splunkcloud_deploytask(self): taskresp = self._service.get( '/services/dmc/deploy', output_mode='json' )['body'].read() tasks = json.loads(taskresp)['entry'] if tasks[0]['name'] == 'lastDeploy': return tasks[0]['content']['taskId'] else: raise Exception( "Could not fetch last task Id! Task with name 'lastDeploy' was not " "found in the Splunk response. Unexpected response from server." )
MIT License