repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
mhallsmoore/qstrader
qstrader/portcon/pcm.py
PortfolioConstructionModel._obtain_current_portfolio
python
def _obtain_current_portfolio(self): return self.broker.get_portfolio_as_dict(self.broker_portfolio_id)
Query the broker for the current account asset quantities and return as a portfolio dictionary. Returns ------- `dict{str: dict}` Current broker account asset quantities in integral units.
https://github.com/mhallsmoore/qstrader/blob/7d1df112a0c7a941a44a1c155bb4208c1f61e1ca/qstrader/portcon/pcm.py#L142-L152
from qstrader import settings from qstrader.execution.order import Order class PortfolioConstructionModel(object): def __init__( self, broker, broker_portfolio_id, universe, order_sizer, optimiser, alpha_model=None, risk_model=None, cost_model=None, data_handler=None, ): self.broker = broker self.broker_portfolio_id = broker_portfolio_id self.universe = universe self.order_sizer = order_sizer self.optimiser = optimiser self.alpha_model = alpha_model self.risk_model = risk_model self.cost_model = cost_model self.data_handler = data_handler def _obtain_full_asset_list(self, dt): broker_portfolio = self.broker.get_portfolio_as_dict( self.broker_portfolio_id ) broker_assets = list(broker_portfolio.keys()) universe_assets = self.universe.get_assets(dt) return sorted( list( set(broker_assets).union(set(universe_assets)) ) ) def _create_zero_target_weight_vector(self, full_assets): return {asset: 0.0 for asset in full_assets} def _create_full_asset_weight_vector(self, zero_weights, optimised_weights): return {**zero_weights, **optimised_weights} def _generate_target_portfolio(self, dt, weights): return self.order_sizer(dt, weights)
MIT License
ccextractor/sample-platform
mod_deploy/controllers.py
is_valid_signature
python
def is_valid_signature(x_hub_signature, data, private_key): hash_algorithm, github_signature = x_hub_signature.split('=', 1) algorithm = hashlib.__dict__.get(hash_algorithm) encoded_key = bytes(private_key, 'latin-1') mac = hmac.new(encoded_key, msg=data, digestmod=algorithm) return hmac.compare_digest(mac.hexdigest(), github_signature)
Re-check if the GitHub hook request got valid signature. :param x_hub_signature: Signature to check :type x_hub_signature: str :param data: Signature's data :type data: bytearray :param private_key: Signature's token :type private_key: str
https://github.com/ccextractor/sample-platform/blob/54dfc5c5badf7e1381af727347dcaa289fb5683a/mod_deploy/controllers.py#L107-L122
import hashlib import hmac import json import subprocess from datetime import datetime, timedelta from functools import wraps from ipaddress import IPv4Address, IPv6Address, ip_address, ip_network from os import path from shutil import copyfile from typing import Callable, List, Union import requests from flask import Blueprint, abort, g, request from git import InvalidGitRepositoryError, Repo mod_deploy = Blueprint('deploy', __name__) IPAddress = Union[IPv4Address, IPv6Address] cached_web_hook_blocks: List[str] = [] cached_load_time: datetime = datetime(1970, 1, 1) def cache_has_expired() -> bool: global cached_load_time return cached_load_time + timedelta(hours=1) < datetime.now() def get_cached_web_hook_blocks() -> List[str]: global cached_web_hook_blocks from run import config if len(cached_web_hook_blocks) == 0 or cache_has_expired(): client_id = config.get('GITHUB_CLIENT_ID', '') client_secret = config.get('GITHUB_CLIENT_SECRET', '') meta_json = requests.get( f'https://api.github.com/meta?client_id={client_id}&client_secret={client_secret}').json() try: cached_web_hook_blocks = meta_json['hooks'] except KeyError: g.log.critical(f"Failed to retrieve hook IP's from GitHub! API returned {meta_json}") return cached_web_hook_blocks def is_github_web_hook_ip(request_ip: IPAddress) -> bool: for block in get_cached_web_hook_blocks(): if request_ip in ip_network(block): return True return False def request_from_github(abort_code: int = 418) -> Callable: def decorator(f): @wraps(f) def decorated_function(*args, **kwargs): if request.method != 'POST': return 'OK' request_ip = ip_address(f"{request.remote_addr}") if not is_github_web_hook_ip(request_ip): g.log.warning(f"Unauthorized attempt to deploy by IP {request_ip}") abort(abort_code) for header in ['X-GitHub-Event', 'X-GitHub-Delivery', 'X-Hub-Signature', 'User-Agent']: if header not in request.headers: g.log.critical(f"{header} not in headers!") abort(abort_code) ua = request.headers.get('User-Agent') if not ua.startswith('GitHub-Hookshot/'): g.log.critical("User-Agent does not begin with GitHub-Hookshot/!") abort(abort_code) if not request.is_json: g.log.critical("Request is not JSON!") abort(abort_code) return f(*args, **kwargs) return decorated_function return decorator
ISC License
digitalglobe/gbdxtools
gbdxtools/images/meta.py
GeoDaskImage.aoi
python
def aoi(self, **kwargs): g = self._parse_geoms(**kwargs) if g is None: return self else: return self[g]
Subsets the Image by the given bounds Args: bbox (list): optional. A bounding box array [minx, miny, maxx, maxy] wkt (str): optional. A WKT geometry string geojson (str): optional. A GeoJSON geometry dictionary Returns: image: an image instance of the same type
https://github.com/digitalglobe/gbdxtools/blob/8ddef9f8822a49126e059b56e465da7447e33244/gbdxtools/images/meta.py#L234-L249
import os import random from functools import partial from itertools import product from collections import namedtuple from collections.abc import Container from gbdxtools.rda.io import to_geotiff from gbdxtools.rda.util import RatPolyTransform, AffineTransform, pad_safe_positive, pad_safe_negative, RDA_TO_DTYPE, get_proj from gbdxtools.images.mixins import PlotMixin, BandMethodsTemplate, Deprecations from shapely import ops, wkt from shapely.geometry import box, shape, mapping, asShape from shapely.geometry.base import BaseGeometry import pyproj import dask from dask import optimization from dask.delayed import delayed import dask.array as da import numpy as np from affine import Affine threads = int(os.environ.get('GBDX_THREADS', 8)) threaded_get = partial(dask.threaded.get, num_workers=threads) class DaskMeta(namedtuple("DaskMeta", ["dask", "name", "chunks", "dtype", "shape"])): __slots__ = () @classmethod def from_darray(cls, darr, new=tuple.__new__, len=len): dsk, _ = optimization.cull(darr.dask, darr.__dask_keys__()) itr = [dsk, darr.name, darr.chunks, darr.dtype, darr.shape] return cls._make(itr) @property def values(self): return self._asdict().values() class DaskImage(da.Array): def __new__(cls, dm, **kwargs): if isinstance(dm, da.Array): dm = DaskMeta.from_darray(dm) elif isinstance(dm, dict): dm = DaskMeta(**dm) elif isinstance(dm, DaskMeta): pass elif dm.__class__.__name__ in ("Op", "GraphMeta", "TmsMeta", "TemplateMeta"): itr = [dm.dask, dm.name, dm.chunks, dm.dtype, dm.shape] dm = DaskMeta._make(itr) else: raise ValueError("{} must be initialized with a DaskMeta, a dask array, or a dict with DaskMeta fields".format(cls.__name__)) self = da.Array.__new__(cls, dm.dask, dm.name, dm.chunks, dtype=dm.dtype, shape=dm.shape) if "__geo_transform__" in kwargs: self.__geo_transform__ = kwargs["__geo_transform__"] if "__geo_interface__" in kwargs: self.__geo_interface__ = kwargs["__geo_interface__"] return self @property def __daskmeta__(self): return DaskMeta(self) def read(self, bands=None, **kwargs): arr = self if bands is not None: arr = self[bands, ...] return arr.compute(scheduler=threaded_get) def randwindow(self, window_shape): row = random.randrange(window_shape[0], self.shape[1]) col = random.randrange(window_shape[1], self.shape[2]) return self[:, row-window_shape[0]:row, col-window_shape[1]:col] def iterwindows(self, count=64, window_shape=(256, 256)): if count is None: while True: yield self.randwindow(window_shape) else: for i in range(count): yield self.randwindow(window_shape) def window_at(self, geom, window_shape): y_size, x_size = window_shape[0], window_shape[1] bounds = box(*geom.bounds) px = ops.transform(self.__geo_transform__.rev, bounds).centroid miny, maxy = int(px.y - y_size/2), int(px.y + y_size/2) minx, maxx = int(px.x - x_size/2), int(px.x + x_size/2) _, y_max, x_max = self.shape if minx < 0 or miny < 0 or maxx > x_max or maxy > y_max: raise ValueError("Input geometry resulted in a window outside of the image") return self[:, miny:maxy, minx:maxx] def window_cover(self, window_shape, pad=True): size_y, size_x = window_shape[0], window_shape[1] _ndepth, _nheight, _nwidth = self.shape nheight, _m = divmod(_nheight, size_y) nwidth, _n = divmod(_nwidth, size_x) img = self if pad is True: new_height, new_width = _nheight, _nwidth if _m != 0: new_height = (nheight + 1) * size_y if _n != 0: new_width = (nwidth + 1) * size_x if (new_height, new_width) != (_nheight, _nwidth): bounds = box(0, 0, new_width, new_height) geom = ops.transform(self.__geo_transform__.fwd, bounds) img = self[geom] row_lims = range(0, img.shape[1], size_y) col_lims = range(0, img.shape[2], size_x) for maxy, maxx in product(row_lims, col_lims): reg = img[:, maxy:(maxy + size_y), maxx:(maxx + size_x)] if pad is False: if reg.shape[1:] == window_shape: yield reg else: yield reg class GeoDaskImage(DaskImage, Container, PlotMixin, BandMethodsTemplate, Deprecations): _default_proj = "EPSG:4326" def map_blocks(self, *args, **kwargs): darr = super(GeoDaskImage, self).map_blocks(*args, **kwargs) return GeoDaskImage(darr, __geo_interface__ = self.__geo_interface__, __geo_transform__ = self.__geo_transform__) def rechunk(self, *args, **kwargs): darr = super(GeoDaskImage, self).rechunk(*args, **kwargs) return GeoDaskImage(darr, __geo_interface__ = self.__geo_interface__, __geo_transform__ = self.__geo_transform__) def asShape(self): return asShape(self) @property def affine(self): return self.__geo_transform__._affine @property def bounds(self): return shape(self).bounds @property def proj(self): return self.__geo_transform__.proj.replace('epsg', 'EPSG')
MIT License
hyperion-project/hyperion.kodi
resources/lib/settings.py
Settings.grabbing
python
def grabbing(self): return self.enable and self.__player.isPlayingVideo() and (self.enableScreensaver or not self.screensaver)
Check if we grabbing is requested based on the current state and settings
https://github.com/hyperion-project/hyperion.kodi/blob/b78eee87e0d1cd95e85ce672226d3b1dd4015b31/resources/lib/settings.py#L97-L101
import xbmc import xbmcaddon from misc import log class MyMonitor (xbmc.Monitor): def __init__(self, settings): xbmc.Monitor.__init__(self) self.__settings = settings self.__settings.screensaver = xbmc.getCondVisibility("System.ScreenSaverActive") self.__settings.abort = xbmc.abortRequested def onAbortRequested(self): self.__settings.abort = False def onSettingsChanged(self): self.__settings.readSettings() def onScreensaverDeactivated(self): self.__settings.screensaver = False def onScreensaverActivated(self): self.__settings.screensaver = True class Settings: def __init__(self): self.rev = 0 self.__monitor = MyMonitor(self) self.__player = xbmc.Player() self.readSettings() def __del__(self): del self.__monitor del self.__player def readSettings(self): log("Reading settings") addon = xbmcaddon.Addon() self.enable = addon.getSetting('hyperion_enable').lower() == 'true' self.enableScreensaver = addon.getSetting('screensaver_enable').lower() == 'true' self.address = addon.getSetting("hyperion_ip") self.port = int(addon.getSetting("hyperion_port")) self.priority = int(addon.getSetting("hyperion_priority")) self.timeout = int(addon.getSetting("reconnect_timeout")) self.capture_width = int(addon.getSetting("capture_width")) self.capture_height = int(addon.getSetting("capture_height")) self.useDefaultDelay = addon.getSetting('use_default_delay').lower() == 'true' self.delay = int(addon.getSetting("delay")) self.delay24 = int(addon.getSetting("delay24")) self.delay25 = int(addon.getSetting("delay25")) self.delay50 = int(addon.getSetting("delay50")) self.delay59 = int(addon.getSetting("delay59")) self.delay60 = int(addon.getSetting("delay60")) self.showErrorMessage = True self.rev += 1
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_object_reference.py
V1ObjectReference.resource_version
python
def resource_version(self): return self._resource_version
Gets the resource_version of this V1ObjectReference. # noqa: E501 Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501 :return: The resource_version of this V1ObjectReference. # noqa: E501 :rtype: str
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_object_reference.py#L201-L209
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1ObjectReference(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'field_path': 'str', 'kind': 'str', 'name': 'str', 'namespace': 'str', 'resource_version': 'str', 'uid': 'str' } attribute_map = { 'api_version': 'apiVersion', 'field_path': 'fieldPath', 'kind': 'kind', 'name': 'name', 'namespace': 'namespace', 'resource_version': 'resourceVersion', 'uid': 'uid' } def __init__(self, api_version=None, field_path=None, kind=None, name=None, namespace=None, resource_version=None, uid=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._field_path = None self._kind = None self._name = None self._namespace = None self._resource_version = None self._uid = None self.discriminator = None if api_version is not None: self.api_version = api_version if field_path is not None: self.field_path = field_path if kind is not None: self.kind = kind if name is not None: self.name = name if namespace is not None: self.namespace = namespace if resource_version is not None: self.resource_version = resource_version if uid is not None: self.uid = uid @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def field_path(self): return self._field_path @field_path.setter def field_path(self, field_path): self._field_path = field_path @property def kind(self): return self._kind @kind.setter def kind(self, kind): self._kind = kind @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def namespace(self): return self._namespace @namespace.setter def namespace(self, namespace): self._namespace = namespace @property
Apache License 2.0
artyomovs/netbox-plugin-config-officer
config_officer/config_manager.py
merge_configs
python
def merge_configs(config1, config2): output = [] if config1: for line in config1: output.append(line) if line in config2: if is_section(config1, line): for conf_2_line in get_lines_in_section(config2, line): output.append(conf_2_line) output.append('!') if config2: for line in config2: if line != "##_##": output.append(line) output.append('!') return output
Merge to config with the same sections.
https://github.com/artyomovs/netbox-plugin-config-officer/blob/6534d97d529ef12fc2ed68bb4a9ce2d25c4f0f8e/config_officer/config_manager.py#L44-L62
import diffios import re def get_lines_in_section(config, section): output = [] if section in config: if config.index(section) == len(config): return [] for i in range(config.index(section)+1, len(config)): line = config[i] if re.search(r'^ ', line): output.append(line) if section in config: config[config.index(section)] = "##_##" config[i] = "##_##" else: break return output else: return [] def is_section(config, line): if config.index(line) + 1 == len(config): return False if re.match(r"^ ", line): return False else: if re.match(r"^ ", config[config.index(line) + 1]): return True else: return False
Apache License 2.0
cisco-en-programmability/dnacentersdk
dnacentersdk/restsession.py
RestSession.refresh_token
python
def refresh_token(self): self._access_token = self._get_access_token() self.update_headers({'X-Auth-Token': self.access_token})
Call the get_access_token method and update the session's auth header with the new token.
https://github.com/cisco-en-programmability/dnacentersdk/blob/ef2adde6113e7a6acd28a287007eb470fa39d31f/dnacentersdk/restsession.py#L228-L233
from __future__ import ( absolute_import, division, print_function, unicode_literals, ) from future import standard_library standard_library.install_aliases() import os import re import time import urllib.parse import warnings from builtins import * import requests from past.builtins import basestring from .config import ( DEFAULT_SINGLE_REQUEST_TIMEOUT, DEFAULT_WAIT_ON_RATE_LIMIT, DEFAULT_VERIFY ) from .exceptions import ( dnacentersdkException, RateLimitError, RateLimitWarning, ApiError, DownloadFailure, ) from .response_codes import EXPECTED_RESPONSE_CODE from .utils import ( check_response_code, check_type, extract_and_parse_json, validate_base_url, pprint_request_info, pprint_response_info, ) from requests_toolbelt.multipart import encoder import socket import errno import logging from requests.packages.urllib3.response import HTTPResponse logger = logging.getLogger(__name__) class RestSession(object): def __init__(self, get_access_token, access_token, base_url, single_request_timeout=DEFAULT_SINGLE_REQUEST_TIMEOUT, wait_on_rate_limit=DEFAULT_WAIT_ON_RATE_LIMIT, verify=DEFAULT_VERIFY, version=None, debug=False): check_type(access_token, basestring, may_be_none=False) check_type(base_url, basestring, may_be_none=False) check_type(single_request_timeout, int) check_type(wait_on_rate_limit, bool, may_be_none=False) check_type(verify, (bool, basestring), may_be_none=False) check_type(version, basestring, may_be_none=False) check_type(debug, (bool), may_be_none=False) super(RestSession, self).__init__() self._base_url = str(validate_base_url(base_url)) self._get_access_token = get_access_token self._access_token = str(access_token) self._single_request_timeout = single_request_timeout self._wait_on_rate_limit = wait_on_rate_limit self._verify = verify self._version = version self._debug = debug if debug: logger.setLevel(logging.DEBUG) logger.propagate = True else: logger.setLevel(logging.INFO) if verify is False: requests.packages.urllib3.disable_warnings() self._req_session = requests.session() self.update_headers({'X-Auth-Token': access_token, 'Content-type': 'application/json;charset=utf-8'}) @property def version(self): return self._version @property def verify(self): return self._verify @verify.setter def verify(self, value): check_type(value, (bool, basestring), may_be_none=False) self._verify = value @property def base_url(self): return self._base_url @base_url.setter def base_url(self, value): check_type(value, basestring, may_be_none=False) self._base_url = str(validate_base_url(value)) @property def access_token(self): return self._access_token @property def single_request_timeout(self): return self._single_request_timeout @single_request_timeout.setter def single_request_timeout(self, value): check_type(value, int) assert value is None or value > 0 self._single_request_timeout = value @property def wait_on_rate_limit(self): return self._wait_on_rate_limit @wait_on_rate_limit.setter def wait_on_rate_limit(self, value): check_type(value, bool, may_be_none=False) self._wait_on_rate_limit = value @property def headers(self): return self._req_session.headers.copy() @property def debug(self): return self._debug def update_headers(self, headers): check_type(headers, dict, may_be_none=False) self._req_session.headers.update(headers)
MIT License
catalystneuro/nwb-conversion-tools
nwb_conversion_tools/datainterfaces/ecephys/neuroscope/neuroscope_utils.py
get_xml_file_path
python
def get_xml_file_path(data_file_path: str): session_path = Path(data_file_path).parent return str(session_path / f"{session_path.stem}.xml")
Infer the xml_file_path from the data_file_path (.dat or .eeg). Assumes the two are in the same folder and follow the session_id naming convention.
https://github.com/catalystneuro/nwb-conversion-tools/blob/dce20066488319feeab29f27705c31bdc7ca8827/nwb_conversion_tools/datainterfaces/ecephys/neuroscope/neuroscope_utils.py#L7-L14
from pathlib import Path from lxml import etree as et
BSD 3-Clause New or Revised License
beetbox/beets
beetsplug/fetchart.py
RequestMixin.request
python
def request(self, *args, **kwargs): return _logged_get(self._log, *args, **kwargs)
Like `requests.get`, but uses the logger `self._log`. See also `_logged_get`.
https://github.com/beetbox/beets/blob/41fc611290323ce927a8cb990f89fc5dcae57063/beetsplug/fetchart.py#L211-L216
from contextlib import closing import os import re from tempfile import NamedTemporaryFile from collections import OrderedDict import requests from beets import plugins from beets import importer from beets import ui from beets import util from beets import config from mediafile import image_mime_type from beets.util.artresizer import ArtResizer from beets.util import sorted_walk from beets.util import syspath, bytestring_path, py3_path import confuse CONTENT_TYPES = { 'image/jpeg': [b'jpg', b'jpeg'], 'image/png': [b'png'] } IMAGE_EXTENSIONS = [ext for exts in CONTENT_TYPES.values() for ext in exts] class Candidate: CANDIDATE_BAD = 0 CANDIDATE_EXACT = 1 CANDIDATE_DOWNSCALE = 2 CANDIDATE_DOWNSIZE = 3 MATCH_EXACT = 0 MATCH_FALLBACK = 1 def __init__(self, log, path=None, url=None, source='', match=None, size=None): self._log = log self.path = path self.url = url self.source = source self.check = None self.match = match self.size = size def _validate(self, plugin): if not self.path: return self.CANDIDATE_BAD if (not (plugin.enforce_ratio or plugin.minwidth or plugin.maxwidth or plugin.max_filesize)): return self.CANDIDATE_EXACT if not self.size: self.size = ArtResizer.shared.get_size(self.path) self._log.debug('image size: {}', self.size) if not self.size: self._log.warning('Could not get size of image (please see ' 'documentation for dependencies). ' 'The configuration options `minwidth`, ' '`enforce_ratio` and `max_filesize` ' 'may be violated.') return self.CANDIDATE_EXACT short_edge = min(self.size) long_edge = max(self.size) if plugin.minwidth and self.size[0] < plugin.minwidth: self._log.debug('image too small ({} < {})', self.size[0], plugin.minwidth) return self.CANDIDATE_BAD edge_diff = long_edge - short_edge if plugin.enforce_ratio: if plugin.margin_px: if edge_diff > plugin.margin_px: self._log.debug('image is not close enough to being ' 'square, ({} - {} > {})', long_edge, short_edge, plugin.margin_px) return self.CANDIDATE_BAD elif plugin.margin_percent: margin_px = plugin.margin_percent * long_edge if edge_diff > margin_px: self._log.debug('image is not close enough to being ' 'square, ({} - {} > {})', long_edge, short_edge, margin_px) return self.CANDIDATE_BAD elif edge_diff: self._log.debug('image is not square ({} != {})', self.size[0], self.size[1]) return self.CANDIDATE_BAD downscale = False if plugin.maxwidth and self.size[0] > plugin.maxwidth: self._log.debug('image needs rescaling ({} > {})', self.size[0], plugin.maxwidth) downscale = True downsize = False if plugin.max_filesize: filesize = os.stat(syspath(self.path)).st_size if filesize > plugin.max_filesize: self._log.debug('image needs resizing ({}B > {}B)', filesize, plugin.max_filesize) downsize = True if downscale: return self.CANDIDATE_DOWNSCALE elif downsize: return self.CANDIDATE_DOWNSIZE else: return self.CANDIDATE_EXACT def validate(self, plugin): self.check = self._validate(plugin) return self.check def resize(self, plugin): if self.check == self.CANDIDATE_DOWNSCALE: self.path = ArtResizer.shared.resize(plugin.maxwidth, self.path, quality=plugin.quality, max_filesize=plugin.max_filesize) elif self.check == self.CANDIDATE_DOWNSIZE: self.path = ArtResizer.shared.resize(max(self.size), self.path, quality=plugin.quality, max_filesize=plugin.max_filesize) def _logged_get(log, *args, **kwargs): req_kwargs = kwargs send_kwargs = {} for arg in ('stream', 'verify', 'proxies', 'cert', 'timeout'): if arg in kwargs: send_kwargs[arg] = req_kwargs.pop(arg) if 'message' in kwargs: message = kwargs.pop('message') else: message = 'getting URL' req = requests.Request('GET', *args, **req_kwargs) with requests.Session() as s: s.headers = {'User-Agent': 'beets'} prepped = s.prepare_request(req) settings = s.merge_environment_settings( prepped.url, {}, None, None, None ) send_kwargs.update(settings) log.debug('{}: {}', message, prepped.url) return s.send(prepped, **send_kwargs) class RequestMixin:
MIT License
python-discord/sir-lancebot
bot/exts/fun/trivia_quiz.py
DynamicQuestionGen.base_units_convert
python
def base_units_convert(cls, q_format: str, a_format: str) -> QuizEntry: unit = random.choice(list(cls.UNITS_TO_BASE_UNITS)) question = q_format.format( unit + " " + cls.UNITS_TO_BASE_UNITS[unit][0] ) answer = a_format.format( cls.UNITS_TO_BASE_UNITS[unit][1] ) return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
Generate a SI base units conversion question.
https://github.com/python-discord/sir-lancebot/blob/559e76ffbef7af85132d86f2e3ab8acf7e7f5eef/bot/exts/fun/trivia_quiz.py#L188-L199
import asyncio import json import logging import operator import random import re import string from collections import defaultdict from dataclasses import dataclass from datetime import datetime, timedelta from pathlib import Path from typing import Callable, Optional import discord from discord.ext import commands, tasks from rapidfuzz import fuzz from bot.bot import Bot from bot.constants import Client, Colours, NEGATIVE_REPLIES, Roles logger = logging.getLogger(__name__) DEFAULT_QUESTION_LIMIT = 7 STANDARD_VARIATION_TOLERANCE = 88 DYNAMICALLY_GEN_VARIATION_TOLERANCE = 97 MAX_ERROR_FETCH_TRIES = 3 WRONG_ANS_RESPONSE = [ "No one answered correctly!", "Better luck next time...", ] RULES = ( "No cheating and have fun!", "Points for each question reduces by 25 after 10s or after a hint. Total time is 30s per question" ) WIKI_FEED_API_URL = "https://en.wikipedia.org/api/rest_v1/feed/featured/{date}" TRIVIA_QUIZ_ICON = ( "https://raw.githubusercontent.com/python-discord/branding/main/icons/trivia_quiz/trivia-quiz-dist.png" ) @dataclass(frozen=True) class QuizEntry: question: str answers: list[str] var_tol: int class DynamicQuestionGen: N_PREFIX_STARTS_AT = 5 N_PREFIXES = [ "penta", "hexa", "hepta", "octa", "nona", "deca", "hendeca", "dodeca", "trideca", "tetradeca", ] PLANETS = [ ("1st", "Mercury"), ("2nd", "Venus"), ("3rd", "Earth"), ("4th", "Mars"), ("5th", "Jupiter"), ("6th", "Saturn"), ("7th", "Uranus"), ("8th", "Neptune"), ] TAXONOMIC_HIERARCHY = [ "species", "genus", "family", "order", "class", "phylum", "kingdom", "domain", ] UNITS_TO_BASE_UNITS = { "hertz": ("(unit of frequency)", "s^-1"), "newton": ("(unit of force)", "m*kg*s^-2"), "pascal": ("(unit of pressure & stress)", "m^-1*kg*s^-2"), "joule": ("(unit of energy & quantity of heat)", "m^2*kg*s^-2"), "watt": ("(unit of power)", "m^2*kg*s^-3"), "coulomb": ("(unit of electric charge & quantity of electricity)", "s*A"), "volt": ("(unit of voltage & electromotive force)", "m^2*kg*s^-3*A^-1"), "farad": ("(unit of capacitance)", "m^-2*kg^-1*s^4*A^2"), "ohm": ("(unit of electric resistance)", "m^2*kg*s^-3*A^-2"), "weber": ("(unit of magnetic flux)", "m^2*kg*s^-2*A^-1"), "tesla": ("(unit of magnetic flux density)", "kg*s^-2*A^-1"), } @classmethod def linear_system(cls, q_format: str, a_format: str) -> QuizEntry: x, y = random.randint(2, 5), random.randint(2, 5) answer = a_format.format(x, y) coeffs = random.sample(range(1, 6), 4) question = q_format.format( coeffs[0], coeffs[1], coeffs[0] * x + coeffs[1] * y, coeffs[2], coeffs[3], coeffs[2] * x + coeffs[3] * y, ) return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE) @classmethod def mod_arith(cls, q_format: str, a_format: str) -> QuizEntry: quotient, m, b = random.randint(30, 40), random.randint(10, 20), random.randint(200, 350) ans = random.randint(0, 9) a = quotient * m + ans - b question = q_format.format(a, b, m) answer = a_format.format(ans) return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE) @classmethod def ngonal_prism(cls, q_format: str, a_format: str) -> QuizEntry: n = random.randint(0, len(cls.N_PREFIXES) - 1) question = q_format.format(cls.N_PREFIXES[n]) answer = a_format.format((n + cls.N_PREFIX_STARTS_AT) * 2) return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE) @classmethod def imag_sqrt(cls, q_format: str, a_format: str) -> QuizEntry: ans_coeff = random.randint(3, 10) question = q_format.format(ans_coeff ** 2) answer = a_format.format(ans_coeff) return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE) @classmethod def binary_calc(cls, q_format: str, a_format: str) -> QuizEntry: a = random.randint(15, 20) b = random.randint(10, a) oper = random.choice( ( ("+", operator.add), ("-", operator.sub), ("*", operator.mul), ) ) if oper[0] == "*": a -= 5 b -= 5 question = q_format.format(a, oper[0], b) answer = a_format.format(oper[1](a, b)) return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE) @classmethod def solar_system(cls, q_format: str, a_format: str) -> QuizEntry: planet = random.choice(cls.PLANETS) question = q_format.format(planet[0]) answer = a_format.format(planet[1]) return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE) @classmethod def taxonomic_rank(cls, q_format: str, a_format: str) -> QuizEntry: level = random.randint(0, len(cls.TAXONOMIC_HIERARCHY) - 2) question = q_format.format(cls.TAXONOMIC_HIERARCHY[level]) answer = a_format.format(cls.TAXONOMIC_HIERARCHY[level + 1]) return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE) @classmethod
MIT License
trustyjaid/trusty-cogs
hockey/goal.py
Goal.post_team_goal
python
async def post_team_goal(self, bot: Red, game_data: Game) -> List[Tuple[int, int, int]]: post_state = ["all", game_data.home_team, game_data.away_team] msg_list = [] if "Edmonton Oilers" in self.team_name and "missed" not in self.event.lower(): try: hue = Oilers(bot) self.tasks.append(hue.goal_lights()) except Exception: pass goal_embed = await self.goal_post_embed(game_data) goal_text = await self.goal_post_text(game_data) tasks = [] all_channels = await bot.get_cog("Hockey").config.all_channels() post_data = [] async for channel_id, data in AsyncIter(all_channels.items(), steps=100): channel = await get_channel_obj(bot, channel_id, data) if not channel: continue should_post = await check_to_post(bot, channel, data, post_state, "Goal") if should_post: post_data.append( await self.actually_post_goal(bot, channel, goal_embed, goal_text) ) for channel in post_data: if channel is None: continue else: msg_list.append(channel) return msg_list
Creates embed and sends message if a team has scored a goal
https://github.com/trustyjaid/trusty-cogs/blob/41978fa07fc2964f0dec8c0bbfc5d112802df321/hockey/goal.py#L165-L199
from __future__ import annotations import asyncio import logging from datetime import datetime, timezone from typing import TYPE_CHECKING, List, Optional, Tuple import discord from redbot import VersionInfo, version_info from redbot.core.bot import Red from redbot.core.i18n import Translator from redbot.core.utils import AsyncIter, bounded_gather from .constants import HEADSHOT_URL, TEAMS from .helper import check_to_post, get_channel_obj, get_team if TYPE_CHECKING: from .game import Game try: from .oilers import Oilers except ImportError: pass _ = Translator("Hockey", __file__) log = logging.getLogger("red.trusty-cogs.Hockey") class Goal: goal_id: str team_name: str scorer_id: int jersey_no: str description: str period: int period_ord: str time_remaining: str time: datetime home_score: int away_score: int strength: str empty_net: bool event: str link: Optional[str] def __init__(self, **kwargs): super().__init__() self.goal_id = kwargs.get("goal_id") self.team_name = kwargs.get("team_name") self.scorer_id = kwargs.get("scorer_id") self.headshot = HEADSHOT_URL.format(kwargs.get("scorer_id", "")) self.jersey_no = kwargs.get("jersey_no") self.description = kwargs.get("description") self.period = kwargs.get("period") self.period_ord = kwargs.get("period_ord") self.time_remaining = kwargs.get("time_remaining") time = kwargs.get("time") time = kwargs.get("time", "") time = datetime.strptime(time, "%Y-%m-%dT%H:%M:%SZ") self.time = time.replace(tzinfo=timezone.utc) self.home_score = kwargs.get("home_score") self.away_score = kwargs.get("away_score") self.strength = kwargs.get("strength") self.empty_net = kwargs.get("empty_net") self.event = kwargs.get("event") self.link = kwargs.get("link", None) self.tasks: List[asyncio.Task] = [] self.home_shots: int = kwargs.get("home_shots", 0) self.away_shots: int = kwargs.get("away_shots", 0) def __repr__(self): return "<Hockey Goal team={0.team_name} id={0.goal_id} >".format(self) def to_json(self) -> dict: return { "goal_id": self.goal_id, "team_name": self.team_name, "scorer_id": self.scorer_id, "jersey_no": self.jersey_no, "description": self.description, "period": self.period, "period_ord": self.period_ord, "time_remaining": self.time_remaining, "time": self.time.strftime("%Y-%m-%dT%H:%M:%SZ"), "home_score": self.home_score, "away_score": self.away_score, "strength": self.strength, "empty_net": self.empty_net, "event": self.event, "link": self.link, "home_shots": self.home_shots, "away_shots": self.away_shots, } @classmethod async def from_json( cls, data: dict, players: dict, media_content: Optional[dict] = None ) -> Goal: scorer_id = [] if "players" in data: scorer_id = [ p["player"]["id"] for p in data["players"] if p["playerType"] in ["Scorer", "Shooter"] ] if "strength" in data["result"]: str_dat = data["result"]["strength"]["name"] strength = "Even Strength" if str_dat == "Even" else str_dat if data["about"]["ordinalNum"] == "SO": strength = "Shoot Out" else: strength = " " empty_net = data["result"]["emptyNet"] if "emptyNet" in data["result"] else False player_id = f"ID{scorer_id[0]}" if scorer_id != [] else None if player_id in players: jersey_no = players[player_id]["jerseyNumber"] else: jersey_no = "" link = None if media_content: event_id = data["about"]["eventId"] try: for highlight in media_content["media"]["milestones"]["items"]: if highlight["statsEventId"] == str(event_id): for playback in highlight["highlight"]["playbacks"]: if playback["name"] == "FLASH_1800K_896x504": link = playback["url"] except KeyError: pass return cls( goal_id=data["result"]["eventCode"], team_name=data["team"]["name"], scorer_id=scorer_id[0] if scorer_id != [] else None, jersey_no=jersey_no, description=data["result"]["description"], period=data["about"]["period"], period_ord=data["about"]["ordinalNum"], time_remaining=data["about"]["periodTimeRemaining"], time=data["about"]["dateTime"], home_score=data["about"]["goals"]["home"], away_score=data["about"]["goals"]["away"], strength=strength, empty_net=empty_net, event=data["result"]["event"], link=link, home_shots=data.get("home_shots", 0), away_shots=data.get("away_shots", 0), ) @property def timestamp(self) -> int: return int(self.time.timestamp())
MIT License
matthiasvalvekens/pyhanko
pyhanko/stamp.py
QRPosition.from_config
python
def from_config(cls, config_str) -> 'QRPosition': try: return { 'left': QRPosition.LEFT_OF_TEXT, 'right': QRPosition.RIGHT_OF_TEXT, 'top': QRPosition.ABOVE_TEXT, 'bottom': QRPosition.BELOW_TEXT }[config_str.lower()] except KeyError: raise ConfigurationError( f"'{config_str}' is not a valid QR position setting; valid " f"values are 'left', 'right', 'top', 'bottom'" )
Convert from a configuration string. :param config_str: A string: 'left', 'right', 'top', 'bottom' :return: An :class:`.QRPosition` value. :raise ConfigurationError: on unexpected string inputs.
https://github.com/matthiasvalvekens/pyhanko/blob/5a4de16b7ed1a7ce52fe9e26064b12f0fc487432/pyhanko/stamp.py#L285-L306
import enum import uuid from binascii import hexlify from dataclasses import dataclass from datetime import datetime from typing import Optional import qrcode import tzlocal from pyhanko.pdf_utils import content, generic, layout from pyhanko.pdf_utils.config_utils import ConfigurableMixin, ConfigurationError from pyhanko.pdf_utils.generic import pdf_name, pdf_string from pyhanko.pdf_utils.incremental_writer import IncrementalPdfFileWriter from pyhanko.pdf_utils.misc import rd from pyhanko.pdf_utils.qr import PdfStreamQRImage from pyhanko.pdf_utils.text import DEFAULT_BOX_LAYOUT, TextBox, TextBoxStyle from pyhanko.pdf_utils.writer import BasePdfFileWriter, init_xobject_dictionary __all__ = [ "AnnotAppearances", "BaseStampStyle", "TextStampStyle", "QRStampStyle", "StaticStampStyle", "QRPosition", "BaseStamp", "TextStamp", "QRStamp", "StaticContentStamp", "text_stamp_file", "qr_stamp_file", "STAMP_ART_CONTENT", ] class AnnotAppearances: def __init__(self, normal: generic.IndirectObject, rollover: Optional[generic.IndirectObject] = None, down: Optional[generic.IndirectObject] = None): self.normal = normal self.rollover = rollover self.down = down def as_pdf_object(self) -> generic.DictionaryObject: res = generic.DictionaryObject({pdf_name('/N'): self.normal}) if self.rollover is not None: res[pdf_name('/R')] = self.rollover if self.down is not None: res[pdf_name('/D')] = self.down return res def _get_background_content(bg_spec) -> content.PdfContent: if not isinstance(bg_spec, str): raise ConfigurationError( "Background specification must be a string" ) if bg_spec == '__stamp__': return STAMP_ART_CONTENT elif bg_spec.endswith('.pdf'): return content.ImportedPdfPage(bg_spec) else: from PIL import Image from pyhanko.pdf_utils.images import PdfImage img = Image.open(bg_spec) return PdfImage(img, writer=None) @dataclass(frozen=True) class BaseStampStyle(ConfigurableMixin): border_width: int = 3 background: content.PdfContent = None background_layout: layout.SimpleBoxLayoutRule = layout.SimpleBoxLayoutRule( x_align=layout.AxisAlignment.ALIGN_MID, y_align=layout.AxisAlignment.ALIGN_MID, margins=layout.Margins.uniform(5) ) background_opacity: float = 0.6 @classmethod def process_entries(cls, config_dict): super().process_entries(config_dict) bg_spec = None try: bg_spec = config_dict['background'] except KeyError: pass if bg_spec is not None: config_dict['background'] = _get_background_content(bg_spec) def create_stamp(self, writer: BasePdfFileWriter, box: layout.BoxConstraints, text_params: dict) -> 'BaseStamp': raise NotImplementedError @dataclass(frozen=True) class StaticStampStyle(BaseStampStyle): background_opacity: float = 1.0 @classmethod def from_pdf_file(cls, file_name, page_ix=0, **kwargs) -> 'StaticStampStyle': return StaticStampStyle( background=content.ImportedPdfPage(file_name, page_ix=page_ix), **kwargs ) def create_stamp(self, writer: BasePdfFileWriter, box: layout.BoxConstraints, text_params: dict) -> 'StaticContentStamp': return StaticContentStamp(writer=writer, style=self, box=box) @dataclass(frozen=True) class TextStampStyle(BaseStampStyle): text_box_style: TextBoxStyle = TextBoxStyle() inner_content_layout: layout.SimpleBoxLayoutRule = None stamp_text: str = '%(ts)s' timestamp_format: str = '%Y-%m-%d %H:%M:%S %Z' def create_stamp(self, writer: BasePdfFileWriter, box: layout.BoxConstraints, text_params: dict) -> 'TextStamp': return TextStamp( writer=writer, style=self, box=box, text_params=text_params ) class QRPosition(enum.Enum): LEFT_OF_TEXT = layout.SimpleBoxLayoutRule( x_align=layout.AxisAlignment.ALIGN_MIN, y_align=layout.AxisAlignment.ALIGN_MID, ) RIGHT_OF_TEXT = layout.SimpleBoxLayoutRule( x_align=layout.AxisAlignment.ALIGN_MAX, y_align=layout.AxisAlignment.ALIGN_MID, ) ABOVE_TEXT = layout.SimpleBoxLayoutRule( y_align=layout.AxisAlignment.ALIGN_MAX, x_align=layout.AxisAlignment.ALIGN_MID, ) BELOW_TEXT = layout.SimpleBoxLayoutRule( y_align=layout.AxisAlignment.ALIGN_MIN, x_align=layout.AxisAlignment.ALIGN_MID, ) @property def horizontal_flow(self): return self in (QRPosition.LEFT_OF_TEXT, QRPosition.RIGHT_OF_TEXT) @classmethod
MIT License
dismalpy/dismalpy
dismalpy/ssm/mlemodel.py
MLEResultsMixin.kalman_gain
python
def kalman_gain(self): return self._kalman_gain
Kalman gain matrices
https://github.com/dismalpy/dismalpy/blob/93bb317ca64971fd4d2a62e3da8dae6c2b8947fc/dismalpy/ssm/mlemodel.py#L342-L346
from __future__ import division, absolute_import, print_function import numpy as np from .simulation_smoother import SimulationSmoother, SimulationSmoothResults try: from statsmodels.tsa.statespace import mlemodel, varmax from statsmodels.tsa.statespace.mlemodel import PredictionResultsWrapper except ImportError: from .compat import mlemodel from .compat.mlemodel import PredictionResultsWrapper import statsmodels.base.wrapper as wrap class MLEMixin(object): def initialize_statespace(self, **kwargs): endog = self.endog.T self.ssm = SimulationSmoother(endog.shape[0], self.k_states, **kwargs) self.ssm.bind(endog) self.k_endog = self.ssm.k_endog def fit(self, *args, **kwargs): return_params = kwargs.get('return_params', False) kwargs['return_params'] = False results = super(MLEMixin, self).fit(*args, **kwargs) if return_params: results = results.params else: result_kwargs = {} if 'cov_type' in kwargs: result_kwargs['cov_type'] = kwargs['cov_type'] if 'cov_kwds' in kwargs: result_kwargs['cov_kwds'] = kwargs['cov_kwds'] mlefit = results.mlefit results = self.smooth(results.params, **result_kwargs) results.mlefit = mlefit results.mle_retvals = mlefit.mle_retvals results.mle_settings = mlefit.mle_settings return results def filter(self, params, transformed=True, cov_type=None, cov_kwds=None, return_ssm=False, **kwargs): params = np.array(params, ndmin=1) if not transformed: params = self.transform_params(params) transformed = True results = super(MLEMixin, self).filter(params, transformed, return_ssm=True, **kwargs) if not return_ssm: result_kwargs = {} if cov_type is not None: result_kwargs['cov_type'] = cov_type if cov_kwds is not None: result_kwargs['cov_kwds'] = cov_kwds results = MLEResultsWrapper( MLEResults(self, params, results, **result_kwargs) ) return results def smooth(self, params, transformed=True, cov_type=None, cov_kwds=None, return_ssm=False, **kwargs): params = np.array(params, ndmin=1) if not transformed: params = self.transform_params(params) self.update(params, transformed=True) self.data.param_names = self.param_names results = self.ssm.smooth(**kwargs) if not return_ssm: result_kwargs = {} if cov_type is not None: result_kwargs['cov_type'] = cov_type if cov_kwds is not None: result_kwargs['cov_kwds'] = cov_kwds results = MLEResultsWrapper( MLEResults(self, params, results, **result_kwargs) ) return results def simulation_smoother(self, **kwargs): return self.ssm.simulation_smoother(**kwargs) class MLEModel(MLEMixin, mlemodel.MLEModel): pass class MLEResultsMixin(object): def __init__(self, model, params, smoother_results, cov_type='opg', cov_kwds=None, **kwargs): super(MLEResultsMixin, self).__init__( model, params, smoother_results, cov_type=cov_type, cov_kwds=cov_kwds, **kwargs ) self.smoother_results = smoother_results @property
BSD 2-Clause Simplified License
nguy/artview
artview/components/manual_unfold.py
ManualUnfold.__init__
python
def __init__(self, Vradar=None, Vpoints=None, name=" ManualUnfold", parent=None): super(ManualUnfold, self).__init__(name=name, parent=parent) self.lockNyquist = False if Vradar is None: self.Vradar = Variable(None) else: self.Vradar = Vradar if Vpoints is None: self.Vpoints = Variable(None) else: self.Vpoints = Vpoints self.sharedVariables = {"Vradar": self.NewRadar, "Vpoints": None} self.connectAllVariables() self.central_widget = QtWidgets.QWidget() self.setCentralWidget(self.central_widget) self.layout = QtWidgets.QGridLayout(self.central_widget) self.velField = QtWidgets.QLineEdit( pyart.config.get_field_name('velocity')) self.layout.addWidget(QtWidgets.QLabel("vel_field:"), 0, 0) self.layout.addWidget(self.velField, 1, 0) self.corrVelField = QtWidgets.QLineEdit( pyart.config.get_field_name('corrected_velocity')) self.layout.addWidget(QtWidgets.QLabel("corr_vel_field:"), 2, 0) self.layout.addWidget(self.corrVelField, 3, 0) self.nyquistVelocity = QtWidgets.QDoubleSpinBox() self.nyquistVelocity.setRange(-1, 1000) self.nyquistVelocity.setValue(-1) self.layout.addWidget(QtWidgets.QLabel("nyquist_velocity:"), 4, 0) self.layout.addWidget(self.nyquistVelocity, 5, 0) self.positiveButton = QtWidgets.QPushButton("Unfold Positive Values") self.positiveButton.clicked.connect(self.positiveUnfold) self.layout.addWidget(self.positiveButton, 6, 0) self.negativeButton = QtWidgets.QPushButton("Unfold Negative Values") self.negativeButton.clicked.connect(self.negativeUnfold) self.layout.addWidget(self.negativeButton, 7, 0) self.unfoldList = collections.deque(maxlen=30) self.foldButton = QtWidgets.QPushButton("Fold Back") self.foldButton.clicked.connect(self.foldBack) self.layout.addWidget(self.foldButton, 8, 0) self.buttonHelp = QtWidgets.QPushButton("Help") self.buttonHelp.setToolTip("About using Manual Unfold") self.buttonHelp.clicked.connect(self._displayHelp) self.layout.addWidget(self.buttonHelp, 9, 0) self.layout.addItem(QtWidgets.QSpacerItem( 0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding), 10, 0) self.NewRadar(None, True) self.show()
Initialize the class to create the interface. Parameters ---------- [Optional] Vradar : :py:class:`~artview.core.core.Variable` instance Radar signal variable. A value of None will instantiate a empty variable. Vpoints : :py:class:`~artview.core.core.Variable` instance Points signal variable. A value of None will instantiate a empty variable. name : string Field Radiobutton window name. parent : PyQt instance Parent instance to associate to this class. If None, then Qt owns, otherwise associated with parent PyQt instance.
https://github.com/nguy/artview/blob/9b522f61054b51979b24150f7f668a05741e92dd/artview/components/manual_unfold.py#L40-L123
import code import pyart import sys import os import numpy as np import collections path = os.path.dirname(sys.modules[__name__].__file__) path = os.path.join(path, '...') sys.path.insert(0, path) import artview from ..core import (Component, Variable, common, QtCore, QtGui, QtWidgets, componentsList) class ManualUnfold(Component): Vradar = None Vpoints = None @classmethod def guiStart(self, parent=None): kwargs, independent = common._SimplePluginStart("ManualUnfold").startDisplay() kwargs['parent'] = parent return self(**kwargs), independent
BSD 3-Clause New or Revised License
onshape-public/onshape-clients
python/onshape_client/oas/models/bt_geometry_filter130_all_of.py
BTGeometryFilter130AllOf.__init__
python
def __init__( self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs ): self._data_store = {} self._check_type = _check_type self._from_server = _from_server self._path_to_item = _path_to_item self._configuration = _configuration for var_name, var_value in six.iteritems(kwargs): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): continue setattr(self, var_name, var_value)
bt_geometry_filter130_all_of.BTGeometryFilter130AllOf - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _from_server (bool): True if the data is from the server False if the data is from the client (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. bt_type (str): [optional] # noqa: E501 geometry_type (str): [optional] # noqa: E501
https://github.com/onshape-public/onshape-clients/blob/20843a00c628e516e7219e17a23ec4ef2bf9f16f/python/onshape_client/oas/models/bt_geometry_filter130_all_of.py#L121-L164
from __future__ import absolute_import import re import sys import six import nulltype from onshape_client.oas.model_utils import ( ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) class BTGeometryFilter130AllOf(ModelNormal): allowed_values = { ("geometry_type",): { "LINE": "LINE", "CIRCLE": "CIRCLE", "ARC": "ARC", "PLANE": "PLANE", "CYLINDER": "CYLINDER", "CONE": "CONE", "SPHERE": "SPHERE", "TORUS": "TORUS", "SPLINE": "SPLINE", "ELLIPSE": "ELLIPSE", "MESH": "MESH", "CONIC": "CONIC", "REVOLVED": "REVOLVED", "EXTRUDED": "EXTRUDED", "UNKNOWN": "UNKNOWN", }, } validations = {} additional_properties_type = None @staticmethod def openapi_types(): return { "bt_type": (str,), "geometry_type": (str,), } @staticmethod def discriminator(): return None attribute_map = { "bt_type": "btType", "geometry_type": "geometryType", } @staticmethod def _composed_schemas(): return None required_properties = set( [ "_data_store", "_check_type", "_from_server", "_path_to_item", "_configuration", ] )
MIT License
tmarenko/mff_auto
lib/game/missions/danger_room.py
DangerRoom.open_danger_room
python
def open_danger_room(self): self.game.select_mode(self.mode_name) self._close_rewards_notifications() return wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.DANGER_ROOM_LABEL)
Opens Danger Room missions lobby.
https://github.com/tmarenko/mff_auto/blob/e5d150c4a76d13f05652bbde811c4c5cd0f2246e/lib/game/missions/danger_room.py#L67-L71
import re import lib.logger as logging from lib.functions import wait_until, r_sleep from lib.game import ui from lib.game.battle_bot import ManualBattleBot from lib.game.missions.missions import Missions logger = logging.get_logger(__name__) character_popularity_regexp = re.compile(r"([0-9][0-9]?\.?[0-9]?[0-9])? ?%?") class DangerRoom(Missions): class MODE: NORMAL = "NORMAL" EXTREME = "EXTREME" def __init__(self, game): super().__init__(game, mode_name='DANGER ROOM') @property def battle_over_conditions(self): def maintain_team(): return self.emulator.is_ui_element_on_screen(ui.DANGER_ROOM_MAINTAIN_CURRENT_TEAM) return [maintain_team] @property def disconnect_conditions(self): def game_canceled(): if self.emulator.is_ui_element_on_screen(ui.DANGER_ROOM_GAME_CANCELED): self.emulator.click_button(ui.DANGER_ROOM_GAME_CANCELED) return True return super().disconnect_conditions + [game_canceled] def _close_rewards_notifications(self, timeout=3): def close_notification(ui_element): if self.emulator.is_ui_element_on_screen(ui_element): self.emulator.click_button(ui_element) return True return False def close_notifications(): return close_notification(ui_element=ui.DANGER_ROOM_WEEKLY_RESULTS_CLOSE) or close_notification(ui_element=ui.DANGER_ROOM_WEEKLY_REWARDS_CLOSE) for _ in range(timeout): notifications_closed = wait_until(close_notifications, timeout=1) logger.debug(f"Danger Room: rewards notifications was closed: {notifications_closed}")
Apache License 2.0
lachhebo/pyclustertend
pyclustertend/metric.py
assess_tendency_by_metric
python
def assess_tendency_by_metric( dataset, metric="silhouette", n_cluster: int = 10, random_state: int = None ): result_kmeans = np.array([]) for k_cluster in range(2, n_cluster + 1): labels = KMeans(n_clusters=k_cluster, random_state=random_state).fit_predict( dataset ) if metric == "silhouette": result_kmeans = np.append(result_kmeans, silhouette_score(dataset, labels)) elif metric == "calinski_harabasz": result_kmeans = np.append( result_kmeans, calinski_harabasz_score(dataset, labels) ) elif metric == "davies_bouldin": result_kmeans = np.append( result_kmeans, davies_bouldin_score(dataset, labels) ) if metric == "davies_bouldin": return np.argmin(result_kmeans) + 2, result_kmeans else: return np.argmax(result_kmeans) + 2, result_kmeans
Assess the clusterability of a dataset using KMeans algorithm and a metric score, the best cluster number is the number that best scored with the silhouette score. Parameters ---------- dataset : numpy array, DataFrame The input dataset metric : string The method to assess cluster quality ('silhouette', 'calinski_harabasz', 'davies_bouldin'), default to 'silhouette' n_cluster : int The maxium number of cluster to consider random_state : int (default to None) Returns --------------------- (n_clusters, value) : n_clusters is the number of cluster that best scored on the silhouette score on Kmeans. As for value, it is the silhouette score for each number of cluster on KMeans. Examples -------- >>> from sklearn import datasets >>> from pyclustertend import assess_tendency_by_metric >>> from sklearn.preprocessing import scale >>> X = scale(datasets.load_boston().data) >>> assess_tendency_by_metric(X, n_cluster=10) (2, array([0.36011769, 0.25740335, 0.28098046, 0.28781574, 0.26746932, 0.26975514, 0.27155699, 0.28883395, 0.29028124]))
https://github.com/lachhebo/pyclustertend/blob/11687499c470552b1d1b3cfcc712463678cae407/pyclustertend/metric.py#L13-L66
from typing import Union import numpy as np import pandas as pd from sklearn.cluster import KMeans from sklearn.metrics import ( silhouette_score, calinski_harabasz_score, davies_bouldin_score, )
BSD 3-Clause New or Revised License
bloomreach/briefly
src/briefly/core.py
ExecutorService.build_dependency_graph
python
def build_dependency_graph(self, node): if node.executed or node in self.dag: return self.dag.add_node(node, self.order) self.order += 1 for dep in node.deps: self.build_dependency_graph(dep) if not dep.executed: self.dag.add_edge(dep, node)
Create dependency map recursively.
https://github.com/bloomreach/briefly/blob/78e9b6682ce936b77e4ff3fef0344beabe4b582a/src/briefly/core.py#L119-L130
import sys import time import traceback import threading import optparse import Queue from properties import * from process import * from defaults import * from coreutils import * import dag BRIEFLY_VERSION = '1.0' class NodeExecutor(threading.Thread): def __init__(self, service, task_done_callback=None): super(NodeExecutor, self).__init__() self.service = service self.daemon = True self.task_done = task_done_callback self.start() def run(self): while True: try: node = self.service.get_next_job() if node is None: break self.execute_node(node) finally: if node is not None: self.service.complete_execute(node) def execute_node(self, node): node.reset_log() if node.prop.test_run: node.test_execute() return if any([dep.exe_error is not None for dep in node.deps]): log(' - %s : skipped (depencency error)', node.hash()) node.exe_error = Exception("Dependency error") return exec_exp = None for i in range(node.prop.num_retry): log(' - %s : executing', node.hash()) if i > 0: node.log('Try again #%d...', i) log(' - %s : try again #%d', node.hash(), i) try: node.check_execute() log(' - %s : done', node.hash()) break except Exception, e: log(' - %s : exception: %s', node.hash(), str(e)) if i == node.prop.num_retry - 1: log(' - %s : %s', node.hash(), traceback.format_exc()) exec_exp = e if self.task_done: self.task_done(node, exec_exp) class ExecutorService(object): def __init__(self, objs, task_done_callback=None): self.number_of_threads = objs.prop.run_threads self.dag = dag.DependencyGraph() self.executor_factory = NodeExecutor self.lock = threading.Lock() self.pending = Queue.PriorityQueue() self.task_done_callback = task_done_callback self.order = 1 def get_next_job(self): order, node = self.pending.get() return node
Apache License 2.0
popsim-consortium/demes-python
demes/hypothesis_strategies.py
pulses_lists
python
def pulses_lists(draw, graph, max_pulses=10): n_pulses = draw(st.integers(min_value=0, max_value=max_pulses)) pulses = [] ingress_proportions = collections.defaultdict(lambda: 0) for j, deme_j in enumerate(graph.demes[:-1]): for deme_k in graph.demes[j + 1 :]: time_lo = max(deme_j.end_time, deme_k.end_time) time_hi = min(deme_j.start_time, deme_k.start_time) if time_hi <= time_lo + FLOAT_EPS: continue n = draw(st.integers(min_value=0, max_value=n_pulses)) for _ in range(n): source, dest = deme_j.name, deme_k.name if draw(st.booleans()): source, dest = dest, source time = draw( st.floats( min_value=time_lo, max_value=time_hi, exclude_min=True, exclude_max=True, width=32, ) ) max_proportion = 1 - ingress_proportions[(dest, time)] if math.isclose(max_proportion, 0): continue proportion = draw( st.floats( min_value=0, max_value=prec32(max_proportion), exclude_min=True, exclude_max=True, width=32, ) ) ingress_proportions[(dest, time)] += proportion pulse = dict( sources=[source], dest=dest, time=time, proportions=[proportion], ) pulses.append(pulse) n_pulses -= 1 if n_pulses == 0: break if n_pulses == 0: break return pulses
A hypothesis strategy for creating a pulses list for a graph.
https://github.com/popsim-consortium/demes-python/blob/29d7ae04a0ffb9de7bd99ff05d73d9897ee11daa/demes/hypothesis_strategies.py#L272-L329
import math import itertools import collections import struct import hypothesis as hyp import hypothesis.strategies as st import demes __all__ = ["graphs"] def __dir__(): return sorted(__all__) def prec32(x): return struct.unpack("f", struct.pack("f", x))[0] FLOAT_MAX = prec32(1e30) FLOAT_EPS = prec32(1e-6) @st.composite def deme_names(draw, max_length=20): name = draw(st.text(min_size=1, max_size=max_length)) hyp.assume(name.isidentifier()) return name @st.composite def yaml_strings(draw, min_size=1, max_size=100): return draw( st.text( alphabet=st.characters( blacklist_categories=( "Cc", "Cs", ), blacklist_characters=("\ufffe", "\uffff"), whitelist_characters=("\x09", "\x0a", "\x0d", "\x85"), ), min_size=min_size, max_size=max_size, ) ) @st.composite def epochs_lists( draw, start_time=math.inf, max_epochs=5, min_deme_size=FLOAT_EPS, max_deme_size=FLOAT_MAX, size_functions=None, ): if size_functions is None: size_functions = ["constant", "exponential", "linear"] assert max_epochs >= 2 times = draw( st.lists( st.floats( min_value=0, max_value=min(FLOAT_MAX, start_time), exclude_max=True, width=32, ), unique=True, min_size=1, max_size=max_epochs, ) ) times.sort(reverse=True) epochs = [] for i, end_time in enumerate(times): start_size = draw(st.floats(min_value=min_deme_size, max_value=max_deme_size)) if i == 0 and math.isinf(start_time): end_size = start_size size_function = "constant" else: size_function = draw(st.sampled_from(size_functions)) if size_function == "constant": end_size = start_size else: end_size = draw( st.floats(min_value=min_deme_size, max_value=max_deme_size) ) if end_size == start_size: size_function = "constant" cloning_rate = draw(st.floats(min_value=0, max_value=1)) selfing_rate = draw(st.floats(min_value=0, max_value=prec32(1 - cloning_rate))) epochs.append( dict( end_time=end_time, start_size=start_size, end_size=end_size, size_function=size_function, cloning_rate=cloning_rate, selfing_rate=selfing_rate, ) ) return epochs @st.composite def migration_matrices( draw, graph, max_migrations=10, max_additional_migration_intervals=5 ): n = len(graph.demes) assert n > 0 uniq_deme_times = set(deme.start_time for deme in graph.demes) uniq_deme_times.update(deme.end_time for deme in graph.demes) start_time, *end_times = sorted(uniq_deme_times, reverse=True) for end_time in end_times: if sum(1 for deme in graph.demes if deme.start_time <= start_time) > 1: break start_time = end_time if start_time == end_times[-1]: return [[[0] * n for _ in range(n)]], math.inf, [0] saved_start_time = start_time additional_times = draw( st.lists( st.floats( min_value=end_times[-1], max_value=start_time, exclude_max=True, width=32, ), unique=True, min_size=0, max_size=max_additional_migration_intervals, ) ) end_times = sorted(set(end_times + additional_times), reverse=True) mm_list = [[[0] * n for _ in range(n)] for _ in range(len(end_times))] n_migrations = draw(st.integers(min_value=0, max_value=max_migrations)) for migration_matrix, end_time in zip(mm_list, end_times): deme_indices = [ j for j, deme in enumerate(graph.demes) if ( deme.start_time >= start_time > deme.end_time and deme.start_time > end_time >= deme.end_time ) ] if len(deme_indices) < 2: continue pairs = list(itertools.permutations(deme_indices, 2)) pair_indices = draw( st.lists( st.integers(min_value=0, max_value=len(pairs) - 1), unique=True, min_size=0, max_size=min(len(pairs), n_migrations), ) ) for k in pair_indices: a, b = pairs[k] assert migration_matrix[a][b] == 0 max_rate = 1 - sum(migration_matrix[a]) if math.isclose(max_rate, 0): continue n_migrations -= 1 rate = draw( st.floats(min_value=0, max_value=prec32(max_rate), exclude_min=True) ) migration_matrix[a][b] = rate if n_migrations == 0: break start_time = end_time return mm_list, saved_start_time, end_times @st.composite def migrations_lists(draw, graph, max_migrations=10): mm_list, start_time, end_times = draw( migration_matrices(graph, max_migrations=max_migrations) ) assert len(mm_list) == len(end_times) migrations = [] for migration_matrix, end_time in zip(mm_list, end_times): for j, row in enumerate(migration_matrix): for k, rate in enumerate(row): if rate > 0: migration = demes.AsymmetricMigration( source=graph.demes[k].name, dest=graph.demes[j].name, start_time=start_time, end_time=end_time, rate=rate, ) migrations.append(migration) start_time = end_time return migrations @st.composite
ISC License
verejnedigital/verejne.digital
data/prod_generation/test.py
EntityResolutionTestHandlers.setUp
python
def setUp(self): self.surnames = entity_tools.get_surnames() self.titles_parser = entity_tools.get_academic_titles_parser()
Loads surnames and academic titles.
https://github.com/verejnedigital/verejne.digital/blob/848e64c2baf7d741decba8ef8b8f6df0a531ec8a/data/prod_generation/test.py#L23-L26
import unittest import db.db as db_lib import prod_generation.entity_tools as entity_tools import prod_generation.post_process_neighbours as post_process_neighbours import prod_generation.post_process_income_graph as post_process_income_graph class EntityResolutionTestHandlers(unittest.TestCase):
Apache License 2.0
lisc-tools/lisc
lisc/requester/requester.py
Requester.request_url
python
def request_url(self, url): if not self.is_active: raise ValueError('Requester object is not active.') self.throttle() self._log_url(url) out = requests.get(url) self.time_last_req = time.time() self.n_requests += 1 return out
Request a URL. Parameters ---------- url : str Web address to request. Returns ------- out : requests.models.Response Object containing the requested web page. Examples -------- Use a ``Requester`` object to request the LISC Github repository url: >>> requester = Requester() >>> response = requester.request_url('https://github.com/lisc-tools/lisc')
https://github.com/lisc-tools/lisc/blob/2fd81d837468c8e2cbfc6b7666c827d2b1932cd3/lisc/requester/requester.py#L141-L177
import os import time from copy import deepcopy import requests from lisc.utils.db import check_directory from lisc.utils.io import check_ext class Requester(): def __init__(self, wait_time=0., logging=None, directory=None): self.is_active = bool() self.n_requests = int() self.wait_time = float() self.start_time = str() self.end_time = str() self.time_last_req = float() self.set_wait_time(wait_time) self.open() self.logging, self.log = self._set_up_logging(logging, directory) def __repr__(self): return str(self.__dict__) def as_dict(self): req_dict = deepcopy(self.__dict__) req_dict.pop('time_last_req') return req_dict def set_wait_time(self, wait_time): self.wait_time = wait_time def check(self): print('Requester object is active: \t', str(self.is_active)) print('Number of requests sent: \t', str(self.n_requests)) print('Requester opened: \t\t', str(self.start_time)) print('Requester closed: \t\t', str(self.end_time)) def throttle(self): time_since_req = time.time() - self.time_last_req if time_since_req < self.wait_time: self.wait(self.wait_time - time_since_req) @staticmethod def wait(wait_time): time.sleep(wait_time)
Apache License 2.0
stefan-korner/spacepylibrary
SCOS/MIB.py
PCFrecord.__init__
python
def __init__(self, fields): self.pcfName = fields[0] self.pcfDescr = fields[1] self.pcfPtc = int(fields[4]) self.pcfPfc = int(fields[5]) self.pcfParVal = fields[15]
initialise selected attributes from the record
https://github.com/stefan-korner/spacepylibrary/blob/6a9f0827005c03cbc59557def78bbc035a97bbea/SCOS/MIB.py#L104-L110
import SCOS.ENV class PIDrecord: def __init__(self, fields): self.pidType = int(fields[0]) self.pidSType = int(fields[1]) self.pidAPID = int(fields[2]) self.pidPI1 = int(fields[3]) self.pidPI2 = int(fields[4]) self.pidSPID = int(fields[5]) self.pidDescr = fields[6] self.pidTPSD = int(fields[8]) self.pidDFHsize = int(fields[9]) self.pidCheck = bool(int((fields[13]+"0")[0])) def key(self): return self.pidSPID def picKey(self): return str([self.pidType, self.pidSType, self.pidAPID]) def picAlternateKey(self): return str([self.pidType, self.pidSType, -1]) class PICrecord: def __init__(self, fields): self.picType = int(fields[0]) self.picSType = int(fields[1]) self.picPI1off = int(fields[2]) self.picPI1wid = int(fields[3]) self.picPI2off = int(fields[4]) self.picPI2wid = int(fields[5]) if len(fields) >= 7: try: self.picAPID = int(fields[6]) except: self.picAPID = -1 else: self.picAPID = -1 def key(self): return str([self.picType, self.picSType, self.picAPID]) class TPCFrecord: def __init__(self, fields): self.tpcfSPID = int(fields[0]) self.tpcfName = fields[1] if len(fields) >= 3: try: self.tpcfSize = int(fields[2]) except: self.tpcfSize = 0 else: self.tpcfSize = 0 def key(self): return self.tpcfSPID class PCFrecord:
MIT License
pr-omethe-us/pyteck
pyteck/parse_files_XML.py
get_experiment_kind
python
def get_experiment_kind(root): if root.find('experimentType').text != 'Ignition delay measurement': raise KeywordError('experimentType not ignition delay measurement') try: kind = root.find('apparatus/kind').text if kind == 'shock tube': return 'ST' elif kind == 'rapid compression machine': return 'RCM' else: raise NotImplementedError(kind + ' experiment not supported') except: raise MissingElementError('apparatus/kind')
Read common properties from root of ReSpecTh XML file. Parameters ---------- root : ``etree.Element`` root of ReSpecTh XML file Returns ------- kind : str Type of experiment ('ST' or 'RCM')
https://github.com/pr-omethe-us/pyteck/blob/bba7984deb39e0beec0144e061c539d2cb33e0b2/pyteck/parse_files_XML.py#L92-L117
from __future__ import print_function from __future__ import division import os from argparse import ArgumentParser import numpy try: import yaml except ImportError: print('Warning: YAML must be installed to read input file.') try: from lxml import etree except ImportError: try: import xml.etree.cElementTree as etree except ImportError: try: import xml.etree.ElementTree as etree except ImportError: print("Failed to import ElementTree from any known place") raise from .utils import units, SPEC_KEY, SPEC_KEY_REV, get_temp_unit from .exceptions import (KeywordError, UndefinedElementError, MissingElementError, MissingAttributeError, UndefinedKeywordError ) from .simulation import Simulation from . import validation def get_file_metadata(root): properties = {} properties['file-authors'] = [{'name': '', 'ORCID': ''}] try: properties['file-authors'][0]['name'] = root.find('fileAuthor').text except AttributeError: print('Warning: no fileAuthor given') properties['file-version'] = '(1, 0)' elem = root.find('fileVersion') if elem is None: print('Warning: no fileVersion element') try: version = (int(elem.find('major').text), int(elem.find('minor').text) ) except AttributeError: print('Warning: missing fileVersion major/minor') properties['file-version'] = str(version) properties['reference'] = {} elem = root.find('bibliographyLink') try: properties['reference']['citation'] = elem.attrib['preferredKey'] except KeyError: print('Warning: missing preferredKey attribute in bibliographyLink') try: properties['reference']['doi'] = elem.attrib['doi'] except KeyError: print('Warning: missing doi attribute in bibliographyLink') return properties
MIT License
aerospike/aerospike-admin
lib/health/parser.py
HealthParser.p_assert_desc_string
python
def p_assert_desc_string(self, p): p[0] = p[1]
assert_desc_string : NUMBER | STRING
https://github.com/aerospike/aerospike-admin/blob/dde7b15e798bf199b1c59279f65e5f963d4e3789/lib/health/parser.py#L690-L695
import copy import re from .exceptions import SyntaxException from . import commands from . import constants from . import operation from . import util try: from ply import lex, yacc except Exception: pass HealthVars = {} class HealthLexer: SNAPSHOT_KEY_PATTERN = r"SNAPSHOT(\d+)$" assert_levels = { "CRITICAL": constants.AssertLevel.CRITICAL, "WARNING": constants.AssertLevel.WARNING, "INFO": constants.AssertLevel.INFO, } components = { "ALL": "ALL", "ASD_PROCESS": "ASD_PROCESS", "AVG-CPU": "AVG-CPU", "BIN": "BIN", "BUFFERS/CACHE": "BUFFERS/CACHE", "CONFIG": "CONFIG", "CPU_UTILIZATION": "CPU_UTILIZATION", "DEVICE_INTERRUPTS": "DEVICE_INTERRUPTS", "DEVICE_STAT": "DEVICE_STAT", "DF": "DF", "DMESG": "DMESG", "ENDPOINTS": "ENDPOINTS", "ENVIRONMENT": "ENVIRONMENT", "FREE": "FREE", "HDPARM": "HDPARM", "HEALTH": "HEALTH", "INTERRUPTS": "INTERRUPTS", "IOSTAT": "IOSTAT", "IPTABLES": "IPTABLES", "LIMITS": "LIMITS", "LSB": "LSB", "LSCPU": "LSCPU", "MEM": "MEM", "MEMINFO": "MEMINFO", "METADATA": "METADATA", "NETWORK": "NETWORK", "ORIGINAL_CONFIG": "ORIGINAL_CONFIG", "RAM": "RAM", "ROLES": "ROLES", "ROSTER": "ROSTER", "SYSTEM": "SYSTEM", "SECURITY": "SECURITY", "SERVICE": "SERVICE", "SERVICES": "SERVICES", "SCHEDULER": "SCHEDULER", "STATISTICS": "STATISTICS", "SWAP": "SWAP", "SYSCTLALL": "SYSCTLALL", "TASKS": "TASKS", "TOP": "TOP", "UDF": "UDF", "UPTIME": "UPTIME", "USERS": "USERS", "XDR": "XDR", "XDR_PROCESS": "XDR_PROCESS", } group_ids = { "BUCKET_END": "BUCKET_END", "BUCKET_START": "BUCKET_START", "CLUSTER": "CLUSTER", "DEVICE": "DEVICE", "FILENAME": "FILENAME", "FILE_SYSTEM": "FILE_SYSTEM", "INTERRUPT_DEVICE": "INTERRUPT_DEVICE", "INTERRUPT_ID": "INTERRUPT_ID", "INTERRUPT_TYPE": "INTERRUPT_TYPE", "KEY": "KEY", "NODE": "NODE", "OUTLIER": "OUTLIER", "SNAPSHOT": "SNAPSHOT", } component_and_group_id = { "DC": "DC", "HISTOGRAM": "HISTOGRAM", "NAMESPACE": "NAMESPACE", "RACKS": "RACKS", "SET": "SET", "SINDEX": "SINDEX", } agg_ops = { "AND": "AND", "AVG": "AVG", "COUNT": "COUNT", "COUNT_ALL": "COUNT_ALL", "EQUAL": "EQUAL", "MAX": "MAX", "MIN": "MIN", "OR": "OR", "FIRST": "FIRST", "SUM": "SUM", "VALUE_UNIFORM": "VALUE_UNIFORM", } complex_ops = {"DIFF": "DIFF", "SD_ANOMALY": "SD_ANOMALY", "NO_MATCH": "NO_MATCH"} apply_ops = {"APPLY_TO_ANY": "APPLY_TO_ANY", "APPLY_TO_ALL": "APPLY_TO_ALL"} simple_ops = {"SPLIT": "SPLIT", "UNIQUE": "UNIQUE"} complex_params = { "MAJORITY": constants.MAJORITY, } assert_ops = {"ASSERT": "ASSERT"} bool_vals = {"true": True, "false": False} reserved = { "as": "AS", "by": "BY", "common": "COMMON", "do": "DO", "from": "FROM", "group": "GROUP", "ignore": "IGNORE", "like": "LIKE", "on": "ON", "save": "SAVE", "select": "SELECT", } tokens = [ "NUMBER", "FLOAT", "BOOL_VAL", "VAR", "NEW_VAR", "COMPONENT", "GROUP_ID", "COMPONENT_AND_GROUP_ID", "AGG_OP", "COMPLEX_OP", "APPLY_OP", "SIMPLE_OP", "COMPLEX_PARAM", "ASSERT_OP", "ASSERT_LEVEL", "STRING", "COMMA", "DOT", "IN", "PLUS", "MINUS", "TIMES", "DIVIDE", "BINARY_AND", "BINARY_OR", "LPAREN", "RPAREN", "GT", "GE", "LT", "LE", "EQ", "NE", "ASSIGN", "PCT", ] + list(reserved.values()) def t_FLOAT(self, t): t.value = float(t.value) return t def t_NUMBER(self, t): t.value = int(t.value) return t def t_VAR(self, t): t.type = HealthLexer.reserved.get(t.value.lower(), "NEW_VAR") if not t.type == "NEW_VAR": return t elif t.value.lower() in HealthLexer.bool_vals.keys(): t.type = "BOOL_VAL" t.value = HealthLexer.bool_vals.get(t.value.lower()) elif re.match(HealthLexer.SNAPSHOT_KEY_PATTERN, t.value): t.value = util.create_snapshot_key( int(re.search(HealthLexer.SNAPSHOT_KEY_PATTERN, t.value).group(1)) ) t.type = "COMPONENT" elif t.value in HealthLexer.components.keys(): t.type = "COMPONENT" elif t.value in HealthLexer.group_ids.keys(): t.type = "GROUP_ID" elif t.value in HealthLexer.component_and_group_id: t.type = "COMPONENT_AND_GROUP_ID" elif t.value in HealthLexer.agg_ops.keys(): t.type = "AGG_OP" elif t.value in HealthLexer.complex_ops.keys(): t.type = "COMPLEX_OP" elif t.value in HealthLexer.apply_ops.keys(): t.type = "APPLY_OP" elif t.value in HealthLexer.simple_ops.keys(): t.type = "SIMPLE_OP" elif t.value == "IN": t.type = "IN" elif t.value in HealthLexer.complex_params.keys(): t.value = HealthLexer.complex_params[t.value] t.type = "COMPLEX_PARAM" elif t.value in HealthLexer.assert_ops.keys(): t.type = "ASSERT_OP" elif t.value in HealthLexer.assert_levels.keys(): t.value = HealthLexer.assert_levels[t.value] t.type = "ASSERT_LEVEL" elif t.value in HealthVars: t.type = "VAR" t.value = ( constants.HEALTH_PARSER_VAR, t.value, copy.deepcopy(HealthVars[t.value]), ) return t def t_STRING(self, t): if len(t.value) < 3: t.value = None else: t.value = t.value[1 : len(t.value) - 1] return t def t_newline(self, t): t.lexer.lineno += len(t.value) t_ignore = " \t" t_COMMA = r"\," t_DOT = r"\." t_PLUS = r"\+" t_MINUS = r"-" t_PCT = r"%%" t_TIMES = r"\*" t_DIVIDE = r"/" t_BINARY_OR = r"\|\|" t_BINARY_AND = r"&&" t_LPAREN = r"\(" t_RPAREN = r"\)" t_GT = r">" t_GE = r">=" t_LT = r"<" t_LE = r"<=" t_EQ = r"==" t_NE = r"!=" t_ASSIGN = r"=" def t_error(self, t): raise TypeError("Unknown text '%s'" % (t.value,)) def build(self, **kwargs): self.lexer = lex.lex(module=self, **kwargs) return self.lexer class HealthParser: tokens = HealthLexer.tokens health_input_data = {} precedence = ( ("left", "ASSIGN"), ("left", "BINARY_OR"), ("left", "BINARY_AND"), ("left", "EQ", "NE", "LT", "GT", "LE", "GE"), ("left", "PLUS", "MINUS"), ("left", "TIMES", "DIVIDE"), ("left", "PCT"), ) def p_statement(self, p): if len(p) > 2 and p[2] is not None: if isinstance(p[2], Exception): val = None elif util.is_health_parser_variable(p[2]): val = p[2][2] else: val = p[2] if util.is_health_parser_variable(p[1]): HealthVars[p[1][1]] = val else: HealthVars[p[1]] = val p[0] = val if isinstance(p[2], Exception): raise p[2] else: p[0] = p[1] def p_binary_operation(self, p): p[0] = (p[2], p[1], p[3], None, None, p[4]) def p_opt_on_clause(self, p): if len(p) == 1: p[0] = False else: p[0] = True def p_agg_operation(self, p): p[0] = (p[1], p[3], None, None, None, False) def p_complex_operation(self, p): p[0] = (p[1], p[3], None, p[5], p[7], False) def p_apply_operation(self, p): p[0] = (p[1], p[3], p[7], p[5], None, False) def p_simple_operation(self, p): p[0] = (p[1], p[3], p[4], None, None, False) def p_opt_simple_operation_param(self, p): if len(p) == 1: p[0] = None else: p[0] = util.create_health_internal_tuple(p[2], []) def p_apply_comparison_op(self, p): p[0] = p[1] def p_complex_comparison_operand(self, p): if util.is_health_parser_variable(p[1]): p[0] = p[1][2] elif not isinstance(p[1], tuple): p[0] = util.create_health_internal_tuple(p[1], []) else: p[0] = p[1] def p_operand(self, p): if util.is_health_parser_variable(p[1]): p[0] = p[1][2] else: p[0] = util.create_health_internal_tuple(p[1], []) def p_value(self, p): p[0] = p[1] def p_number(self, p): if len(p) == 2: p[0] = p[1] elif p[1] == "-": p[0] = p[2] * -1 else: p[0] = p[2] def p_op(self, p): p[0] = p[1] def p_comparison_op(self, p): p[0] = p[1] def p_group_by_clause(self, p): p[0] = p[3] def p_group_by_ids(self, p): if len(p) > 2: p[1].append(p[3]) p[0] = p[1] else: p[0] = [p[1]] def p_group_by_id(self, p): p[0] = p[1] def p_opt_group_by_clause(self, p): if len(p) == 1: p[0] = None else: p[0] = p[1] def p_group_by_statement(self, p): try: p[0] = operation.do_multiple_group_by(p[2][2], p[1]) except Exception as e: p[0] = e def p_opt_assign_statement(self, p): if len(p) > 1: p[0] = p[1] else: p[0] = None def p_assign_statement(self, p): p[0] = p[2] def p_cmd_statement(self, p): p[0] = p[1] def p_op_statement(self, p): try: p[0] = commands.do_operation( op=p[3][0], arg1=p[3][1], arg2=p[3][2], group_by=p[1], result_comp_op=p[3][3], result_comp_val=p[3][4], on_common_only=p[3][5], save_param=p[4], ) except Exception as e: p[0] = e def p_opt_save_clause(self, p): if len(p) == 3: if p[2] is None: p[0] = "" else: p[0] = p[2] else: p[0] = None def p_assert_statement(self, p): if len(p) < 14: p[0] = commands.do_assert( op=p[1], data=p[3], check_val=p[5], error=p[7], category=p[9], level=p[11], ) elif len(p) < 16: p[0] = commands.do_assert( op=p[1], data=p[3], check_val=p[5], error=p[7], category=p[9], level=p[11], description=p[13], ) elif len(p) < 18: p[0] = commands.do_assert( op=p[1], data=p[3], check_val=p[5], error=p[7], category=p[9], level=p[11], description=p[13], success_msg=p[15], ) else: skip_assert, assert_filter_arg = p[17] if skip_assert: p[0] = None else: if assert_filter_arg is not None: data = commands.do_operation(op="==", arg1=p[3], arg2=p[5]) try: new_data = commands.do_operation( op="||", arg1=data, arg2=assert_filter_arg, on_common_only=True, ) if new_data: data = new_data except Exception: pass p[0] = commands.do_assert( op=p[1], data=data, check_val=util.create_health_internal_tuple(True, []), error=p[7], category=p[9], level=p[11], description=p[13], success_msg=p[15], ) else: p[0] = commands.do_assert( op=p[1], data=p[3], check_val=p[5], error=p[7], category=p[9], level=p[11], description=p[13], success_msg=p[15], ) def p_assert_if_condition(self, p): skip_assert, assert_filter_arg = commands.do_assert_if_check( p[2][0], p[1], p[2][1] ) p[0] = (skip_assert, assert_filter_arg) def p_opt_assert_if_arg2(self, p): if len(p) > 1: p[0] = (p[1], p[2]) else: p[0] = (None, None) def p_assert_arg(self, p): p[0] = p[1] def p_assert_comparison_arg(self, p): p[0] = util.create_health_internal_tuple(p[1], []) def p_constant(self, p): p[0] = util.h_eval(p[1]) def p_assert_category(self, p): p[0] = p[1]
Apache License 2.0
qiskit/qiskit-aqua
qiskit/optimization/applications/ising/max_cut.py
get_operator
python
def get_operator(weight_matrix): num_nodes = weight_matrix.shape[0] pauli_list = [] shift = 0 for i in range(num_nodes): for j in range(i): if weight_matrix[i, j] != 0: x_p = np.zeros(num_nodes, dtype=bool) z_p = np.zeros(num_nodes, dtype=bool) z_p[i] = True z_p[j] = True pauli_list.append([0.5 * weight_matrix[i, j], Pauli((z_p, x_p))]) shift -= 0.5 * weight_matrix[i, j] return WeightedPauliOperator(paulis=pauli_list), shift
Generate Hamiltonian for the max-cut problem of a graph. Args: weight_matrix (numpy.ndarray) : adjacency matrix. Returns: WeightedPauliOperator: operator for the Hamiltonian float: a constant shift for the obj function.
https://github.com/qiskit/qiskit-aqua/blob/5ccf0e20129880e78a57f2f78c59b9a362ebb208/qiskit/optimization/applications/ising/max_cut.py#L31-L54
import logging import numpy as np from qiskit.quantum_info import Pauli from qiskit.aqua.operators import WeightedPauliOperator logger = logging.getLogger(__name__)
Apache License 2.0
alexpnt/default-credit-card-prediction
src/core/preprocessing.py
standardize
python
def standardize(X,axis=0): return preprocessing.scale(X,axis)
Scale data to zero mean and unit variance. Keyword arguments: X -- The feature vectors axis -- Default is zero. If axis is 0, standardize each feature, otherwise standardize each input sample
https://github.com/alexpnt/default-credit-card-prediction/blob/e4ba7e191a25123e2be6eaed87389e5fa6c3204e/src/core/preprocessing.py#L10-L18
from sklearn import preprocessing from unbalanced_dataset.under_sampling import UnderSampler,NearMiss,NeighbourhoodCleaningRule from unbalanced_dataset.over_sampling import OverSampler,SMOTE verbose = True
MIT License
karchinlab/2020plus
src/classify/python/vogelstein_classifier.py
VogelsteinClassifier.set_onco_threshold
python
def set_onco_threshold(self, threshold): if 0 < threshold < 1: self.onco_threshold = threshold
Setter for percentage threshold for recurrent missense mutations to call it an oncogene.
https://github.com/karchinlab/2020plus/blob/3a645e2dfedbb3857494e8e7f9cf30eb8f4e87cc/src/classify/python/vogelstein_classifier.py#L167-L171
from __future__ import division import numpy as np class VogelsteinClassifier(object): def __init__(self, onco_threshold=.2, tsg_threshold=.2, kind='vogelstein', min_count=0, tsg_min=7, onco_min=10, db_size=404863): if not 0 < onco_threshold < 1: raise ValueError("Oncogene threshold is invalid") if not 0 < tsg_threshold < 1: raise ValueError("TSG threshold is invalid") self.kind = kind self.db_size = db_size self.db_tsg_min = tsg_min self.db_onco_min = onco_min self.onco_threshold = onco_threshold self.tsg_threshold = tsg_threshold self.min_count = min_count self.tsg_min = tsg_min self.onco_min = onco_min self.onco_label = "oncogene" self.tsg_label = "tsg" self.other_label = "other" def _subsample_count(recur_ct, del_ct, total_ct, desired_ct): if total_ct <= desired_ct: return recur_ct, del_ct, total_ct else: prng = np.random.RandomState() ct_array = np.array([recur_ct, del_ct, total_ct - (recur_ct + del_ct)]) prob = ct_array.astype(float) / ct_array.sum() multinomial_sample = prng.multinomial(desired_ct, prob) return multinomial_sample def predict_list(self, input_list, kind='count', scale_type=None, subsample=None): all_cts = sum([x[-1] for x in input_list]) if scale_type: self.tsg_min = self.db_tsg_min * float(all_cts)/self.db_size self.onco_min = self.db_onco_min * float(all_cts)/self.db_size else: self.tsg_min = self.db_tsg_min self.onco_min = self.db_onco_min gene_class_list = [] if kind == 'count': for recur_ct, del_ct, total_ct in input_list: tmp_gene_class = self.predict_by_cts(recur_ct, del_ct, total_ct) gene_class_list.append(tmp_gene_class) else: for recur_pct, del_pct, total_cts in input_list: tmp_gene_class = self.predict_by_pct(recur_pct, del_pct, total_cts) gene_class_list.append(tmp_gene_class) return gene_class_list def predict_by_cts(self, recurrent, deleterious, total): if total < self.min_count: return self.other_label recur_perc = recurrent / float(total) del_perc = deleterious / float(total) gene_class = self.predict_by_pct(recur_perc, del_perc, total) return gene_class def predict_by_pct(self, recur_pct, del_pct, total): recur_ct = recur_pct * total del_ct = del_pct * total if self.kind == 'vogelstein': if recur_pct >= self.onco_threshold and recur_ct >= self.onco_min: if del_pct <= .05: return self.onco_label elif del_ct >= self.tsg_min: return self.tsg_label else: return self.other_label elif del_pct >= self.tsg_threshold and del_ct >= self.tsg_min: return self.tsg_label else: return self.other_label elif self.kind == 'min': if total < self.min_count: return self.other_label elif recur_pct >= self.onco_threshold: if recur_pct >= del_pct: return self.onco_label else: return self.tsg_label elif del_pct >= self.tsg_threshold: return self.tsg_label else: return self.other_label
Apache License 2.0
pypyr/pypyr
pypyr/pipelinerunner.py
run_pipeline
python
def run_pipeline(pipeline, context, pipeline_context_input=None, parse_input=True, groups=None, success_group=None, failure_group=None): logger.debug("starting") if not groups: groups = ['steps'] if not success_group and not failure_group: success_group = 'on_success' failure_group = 'on_failure' steps_runner = StepsRunner(pipeline_definition=pipeline, context=context) try: if parse_input: logger.debug("executing context_parser") prepare_context(pipeline=pipeline, context_in_args=pipeline_context_input, context=context) else: logger.debug("skipping context_parser") except Exception: logger.error("Something went wrong. Will now try to run %s", failure_group) try: steps_runner.run_failure_step_group(failure_group) except StopStepGroup: pass except Stop: raise logger.debug("Raising original exception to caller.") raise try: steps_runner.run_step_groups(groups=groups, success_group=success_group, failure_group=failure_group) except StopPipeline: logger.debug("StopPipeline: stopped %s", context.pipeline_name)
Run the specified pypyr pipeline. This function runs the actual pipeline. If you are running another pipeline from within a pipeline don't call main(). Do call main() or main_with_context() instead for your 1st pipeline, if there are subsequent pipelines calling pipelines use load_and_run_pipeline or run_pipeline. Pipeline and context should be already loaded. If pipeline not loaded yet, you probably want to call load_and_run_pipeline instead. If none of groups, success_group & failure_group specified, defaults to ['steps'], on_success, on_failure. If any of groups, success_group or failure_group specified, will ONLY run the specified (i.e if you specify groups you don't get on_success/on_failure groups unless you specify these explicitly.) Args: pipeline (dict): Dictionary representing the pipeline. context (pypyr.context.Context): Reusable context object. pipeline_context_input (list of str): Args used to initialize context. These go to the context_parser. parse_input (bool): run context_parser in pipeline. groups (list of str): step-group names to run in pipeline. success_group (str): step-group name to run on success completion. failure_group (str): step-group name to run on pipeline failure. Returns: None
https://github.com/pypyr/pypyr/blob/9de0476ebba114c26ff3bf38ae23ebb69bc3e087/pypyr/pipelinerunner.py#L376-L454
import logging import pypyr.context import pypyr.log.logger import pypyr.moduleloader from pypyr.cache.parsercache import contextparser_cache from pypyr.cache.pipelinecache import pipeline_cache from pypyr.errors import Stop, StopPipeline, StopStepGroup from pypyr.stepsrunner import StepsRunner import pypyr.yaml logger = logging.getLogger(__name__) def get_parsed_context(pipeline, context_in_args): logger.debug("starting") if 'context_parser' in pipeline: parser_module_name = pipeline['context_parser'] logger.debug("context parser specified: %s", parser_module_name) get_parsed_context = contextparser_cache.get_context_parser( parser_module_name) logger.debug("running parser %s", parser_module_name) result_context = get_parsed_context(context_in_args) logger.debug("context parse %s done", parser_module_name) if result_context is None: logger.debug( "%s returned None. Using empty context instead", parser_module_name ) return pypyr.context.Context() else: return pypyr.context.Context(result_context) else: logger.debug("pipeline does not have custom context parser. Using " "empty context.") logger.debug("done") return pypyr.context.Context() def main( pipeline_name, pipeline_context_input=None, working_dir=None, groups=None, success_group=None, failure_group=None, loader=None ): prepare_and_run(pipeline_name=pipeline_name, working_dir=working_dir, pipeline_context_input=pipeline_context_input, parse_input=True, loader=loader, groups=groups, success_group=success_group, failure_group=failure_group) def main_with_context( pipeline_name, dict_in=None, working_dir=None, groups=None, success_group=None, failure_group=None, loader=None ): if dict_in: context = pypyr.context.Context(dict_in) else: context = pypyr.context.Context() prepare_and_run(pipeline_name=pipeline_name, working_dir=working_dir, context=context, parse_input=False, loader=loader, groups=groups, success_group=success_group, failure_group=failure_group) return context def prepare_and_run( pipeline_name, working_dir=None, pipeline_context_input=None, context=None, parse_input=True, loader=None, groups=None, success_group=None, failure_group=None ): logger.debug("starting pypyr") pypyr.moduleloader.set_working_directory(working_dir) if context is not None: context.pipeline_name = pipeline_name context.working_dir = pypyr.moduleloader.get_working_directory() try: load_and_run_pipeline(pipeline_name=pipeline_name, pipeline_context_input=pipeline_context_input, context=context, parse_input=parse_input, loader=loader, groups=groups, success_group=success_group, failure_group=failure_group) except Stop: logger.debug("Stop: stopped pypyr") logger.debug("pypyr done") def prepare_context(pipeline, context_in_args, context): logger.debug("starting") parsed_context = get_parsed_context( pipeline=pipeline, context_in_args=context_in_args) context.update(parsed_context) logger.debug("done") def load_and_run_pipeline(pipeline_name, pipeline_context_input=None, context=None, parse_input=True, loader=None, groups=None, success_group=None, failure_group=None): logger.debug("you asked to run pipeline: %s", pipeline_name) logger.debug("you set the initial context arg to: %s", pipeline_context_input) if context is None: context = pypyr.context.Context() context.pipeline_name = pipeline_name context.working_dir = pypyr.moduleloader.get_working_directory() pipeline_definition = pipeline_cache.get_pipeline( pipeline_name=pipeline_name, loader=loader) run_pipeline( pipeline=pipeline_definition, pipeline_context_input=pipeline_context_input, context=context, parse_input=parse_input, groups=groups, success_group=success_group, failure_group=failure_group )
Apache License 2.0
henniggroup/gasp-python
gasp/energy_calculators.py
GulpEnergyCalculator.get_energy
python
def get_energy(self, gout): output_lines = gout.split('\n') for line in output_lines: if 'Final energy' in line: return float(line.split()[3])
Parses the final energy from the GULP output. Args: gout: the GULP output, as a string
https://github.com/henniggroup/gasp-python/blob/0c8d993c82e0e1c69a05b3c34bbb2fcbbdbb7f07/gasp/energy_calculators.py#L731-L742
from __future__ import division, unicode_literals, print_function from gasp.general import Cell from pymatgen.core.lattice import Lattice from pymatgen.core.periodic_table import Element from pymatgen.io.lammps.data import LammpsData, LammpsBox, ForceField, Topology import pymatgen.command_line.gulp_caller as gulp_caller import shutil import subprocess import os import collections class VaspEnergyCalculator(object): def __init__(self, incar_file, kpoints_file, potcar_files, geometry): self.name = 'vasp' self.incar_file = incar_file self.kpoints_file = kpoints_file self.potcar_files = potcar_files def do_energy_calculation(self, organism, dictionary, key, composition_space): job_dir_path = str(os.getcwd()) + '/temp/' + str(organism.id) os.mkdir(job_dir_path) shutil.copy(self.incar_file, job_dir_path) shutil.copy(self.kpoints_file, job_dir_path) organism.cell.sort() organism.cell.to(fmt='poscar', filename=job_dir_path + '/POSCAR') symbols = [] for site in organism.cell.sites: if site.specie.symbol not in symbols: symbols.append(site.specie.symbol) total_potcar_path = job_dir_path + '/POTCAR' with open(total_potcar_path, 'w') as total_potcar_file: for symbol in symbols: with open(self.potcar_files[symbol], 'r') as potcar_file: for line in potcar_file: total_potcar_file.write(line) print('Starting VASP calculation on organism {} '.format(organism.id)) devnull = open(os.devnull, 'w') try: subprocess.call(['callvasp', job_dir_path], stdout=devnull, stderr=devnull) except: print('Error running VASP on organism {} '.format(organism.id)) dictionary[key] = None return try: relaxed_cell = Cell.from_file(job_dir_path + '/CONTCAR') except: print('Error reading structure of organism {} from CONTCAR ' 'file '.format(organism.id)) dictionary[key] = None return converged = False with open(job_dir_path + '/OUTCAR') as f: for line in f: if 'reached' in line and 'required' in line and 'accuracy' in line: converged = True if not converged: print('VASP relaxation of organism {} did not converge '.format( organism.id)) dictionary[key] = None return pv = 0 with open(job_dir_path + '/OUTCAR') as f: for line in f: if 'energy(sigma->0)' in line: u = float(line.split()[-1]) elif 'enthalpy' in line: pv = float(line.split()[-1]) enthalpy = u + pv organism.cell = relaxed_cell organism.total_energy = enthalpy organism.epa = enthalpy/organism.cell.num_sites print('Setting energy of organism {} to {} ' 'eV/atom '.format(organism.id, organism.epa)) dictionary[key] = organism class LammpsEnergyCalculator(object): def __init__(self, input_script, geometry): self.name = 'lammps' self.input_script = input_script def do_energy_calculation(self, organism, dictionary, key, composition_space): job_dir_path = str(os.getcwd()) + '/temp/' + str(organism.id) os.mkdir(job_dir_path) shutil.copy(self.input_script, job_dir_path) script_name = os.path.basename(self.input_script) input_script_path = job_dir_path + '/' + str(script_name) self.conform_to_lammps(organism.cell) self.write_data_file(organism, job_dir_path, composition_space) organism.cell.to(fmt='poscar', filename=job_dir_path + '/POSCAR.' + str(organism.id) + '_unrelaxed') print('Starting LAMMPS calculation on organism {} '.format( organism.id)) try: lammps_output = subprocess.check_output( ['calllammps', input_script_path], stderr=subprocess.STDOUT) lammps_output = lammps_output.decode('utf-8') except subprocess.CalledProcessError as e: with open(job_dir_path + '/log.lammps', 'w') as log_file: log_file.write(e.output.decode('utf-8')) print('Error running LAMMPS on organism {} '.format(organism.id)) dictionary[key] = None return with open(job_dir_path + '/log.lammps', 'w') as log_file: log_file.write(lammps_output) symbols = [] all_elements = composition_space.get_all_elements() for element in all_elements: symbols.append(element.symbol) try: relaxed_cell = self.get_relaxed_cell( job_dir_path + '/dump.atom', job_dir_path + '/in.data', symbols) except: print('Error reading structure of organism {} from LAMMPS ' 'output '.format(organism.id)) dictionary[key] = None return try: total_energy = self.get_energy(job_dir_path + '/log.lammps') except: print('Error reading energy of organism {} from LAMMPS ' 'output '.format(organism.id)) dictionary[key] = None return epa = total_energy/organism.cell.num_sites if epa < -50: print('Discarding organism {} due to unphysically large energy: ' '{} eV/atom.'.format(organism.id, str(epa))) dictionary[key] = None return organism.cell = relaxed_cell organism.total_energy = total_energy organism.epa = epa print('Setting energy of organism {} to {} eV/atom '.format( organism.id, organism.epa)) dictionary[key] = organism def conform_to_lammps(self, cell): cell.rotate_to_principal_directions() lattice_coords = cell.lattice.matrix ax = lattice_coords[0][0] bx = lattice_coords[1][0] cx = lattice_coords[2][0] by = lattice_coords[1][1] cy = lattice_coords[2][1] if ax < bx or ax < cx: cell.make_supercell([2, 1, 1]) self.conform_to_lammps(cell) elif by < cy: cell.make_supercell([1, 2, 1]) self.conform_to_lammps(cell) def write_data_file(self, organism, job_dir_path, composition_space): lattice_coords = organism.cell.lattice.matrix xhi = lattice_coords[0][0] yhi = lattice_coords[1][1] zhi = lattice_coords[2][2] box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]] xy = lattice_coords[1][0] xz = lattice_coords[2][0] yz = lattice_coords[2][1] box_tilts = [xy, xz, yz] lammps_box = LammpsBox(box_bounds, tilt=box_tilts) elements_dict = collections.OrderedDict() num_elements = len(composition_space.get_all_elements()) is_single_element = (num_elements == 1) if is_single_element: single_element = composition_space.get_all_elements() elements_dict[single_element[0].symbol] = single_element[0] with open(self.input_script, 'r') as f: lines = f.readlines() for line in lines: if 'atom_style' in line: atom_style_in_script = line.split()[1] elif not is_single_element and 'pair_coeff' in line: element_symbols = line.split()[-1*num_elements:] if not is_single_element: for symbol in element_symbols: elements_dict[symbol] = Element(symbol) force_field = ForceField(elements_dict.items()) topology = Topology(organism.cell.sites) lammps_data = LammpsData.from_ff_and_topologies( lammps_box, force_field, [topology], atom_style=atom_style_in_script) lammps_data.write_file(job_dir_path + '/in.data') def get_relaxed_cell(self, atom_dump_path, data_in_path, element_symbols): with open(atom_dump_path, 'r') as atom_dump: lines = atom_dump.readlines() a_data = lines[5].split() b_data = lines[6].split() c_data = lines[7].split() xy = float(a_data[2]) xz = float(b_data[2]) yz = float(c_data[2]) xlo_bound = float(a_data[0]) xhi_bound = float(a_data[1]) ylo_bound = float(b_data[0]) yhi_bound = float(b_data[1]) zlo_bound = float(c_data[0]) zhi_bound = float(c_data[1]) xlo = xlo_bound - min([0.0, xy, xz, xy + xz]) xhi = xhi_bound - max([0.0, xy, xz, xy + xz]) ylo = ylo_bound - min(0.0, yz) yhi = yhi_bound - max([0.0, yz]) zlo = zlo_bound zhi = zhi_bound a = [xhi - xlo, 0.0, 0.0] b = [xy, yhi - ylo, 0.0] c = [xz, yz, zhi - zlo] relaxed_lattice = Lattice([a, b, c]) num_atoms = int(lines[3]) types = [] relaxed_cart_coords = [] for i in range(num_atoms): atom_info = lines[9 + i].split() types.append(int(atom_info[1])) relaxed_cart_coords.append([float(atom_info[2]) - xlo, float(atom_info[3]) - ylo, float(atom_info[4]) - zlo]) with open(data_in_path, 'r') as data_in: lines = data_in.readlines() types_masses = {} for i in range(len(lines)): if 'Masses' in lines[i]: for j in range(len(element_symbols)): types_masses[int(lines[i + j + 2].split()[0])] = float( lines[i + j + 2].split()[1]) types_symbols = {} for symbol in element_symbols: for atom_type in types_masses: if format(float(Element(symbol).atomic_mass), '.1f') == format( types_masses[atom_type], '.1f'): types_symbols[atom_type] = symbol relaxed_symbols = [] for atom_type in types: relaxed_symbols.append(types_symbols[atom_type]) return Cell(relaxed_lattice, relaxed_symbols, relaxed_cart_coords, coords_are_cartesian=True) def get_energy(self, lammps_log_path): with open(lammps_log_path, 'r') as f: lines = f.readlines() match_strings = ['Step', 'Temp', 'E_pair', 'E_mol', 'TotEng'] for i in range(len(lines)): if all(match in lines[i] for match in match_strings): energy = float(lines[i + 2].split()[4]) return energy class GulpEnergyCalculator(object): def __init__(self, header_file, potential_file, geometry): self.name = 'gulp' self.header_path = header_file self.potential_path = potential_file with open(header_file, 'r') as gulp_header_file: self.header = gulp_header_file.readlines() with open(potential_file, 'r') as gulp_potential_file: self.potential = gulp_potential_file.readlines() self.gulp_io = gulp_caller.GulpIO() self.anions_shell, self.cations_shell = self.get_shells() if geometry.shape == 'bulk': self.lattice_flags = None elif geometry.shape == 'sheet': self.lattice_flags = '1 1 0 0 0 1' elif geometry.shape == 'wire': self.lattice_flags = '0 0 1 0 0 0' elif geometry.shape == 'cluster': self.lattice_flags = '0 0 0 0 0 0' def get_shells(self): shells = [] for line in self.potential: if 'shel' in line: line_parts = line.split() shells.append(str(line_parts[line_parts.index('shel') - 1])) shells = list(set(shells)) anions_shell = False cations_shell = False for symbol in shells: element = Element(symbol) if element in gulp_caller._anions: anions_shell = True elif element in gulp_caller._cations: cations_shell = True return anions_shell, cations_shell def do_energy_calculation(self, organism, dictionary, key, composition_space): job_dir_path = str(os.getcwd()) + '/temp/' + str(organism.id) os.mkdir(job_dir_path) gin_path = job_dir_path + '/' + str(organism.id) + '.gin' self.write_input_file(organism, gin_path) print('Starting GULP calculation on organism {} '.format(organism.id)) try: gulp_output = subprocess.check_output(['callgulp', gin_path], stderr=subprocess.STDOUT) gulp_output = gulp_output.decode('utf-8') except subprocess.CalledProcessError as e: with open(job_dir_path + '/' + str(organism.id) + '.gout', 'w') as gout_file: gout_file.write(e.output.decode('utf-8')) print('Error running GULP on organism {} '.format(organism.id)) dictionary[key] = None return with open(job_dir_path + '/' + str(organism.id) + '.gout', 'w') as gout_file: gout_file.write(gulp_output) conv_err_string = 'Conditions for a minimum have not been satisfied' gradient_norm = self.get_grad_norm(gulp_output) if conv_err_string in gulp_output and gradient_norm > 0.1: print('The GULP calculation on organism {} did not ' 'converge '.format(organism.id)) dictionary[key] = None return try: relaxed_cell = self.get_relaxed_cell(gulp_output) except: print('Error reading structure of organism {} from GULP ' 'output '.format(organism.id)) dictionary[key] = None return try: total_energy = self.get_energy(gulp_output) except: print('Error reading energy of organism {} from GULP ' 'output '.format(organism.id)) dictionary[key] = None return num_atoms = self.get_num_atoms(gulp_output) organism.cell = relaxed_cell organism.epa = total_energy/num_atoms organism.total_energy = organism.epa*organism.cell.num_sites print('Setting energy of organism {} to {} eV/atom '.format( organism.id, organism.epa)) dictionary[key] = organism def write_input_file(self, organism, gin_path): structure_lines = self.gulp_io.structure_lines( organism.cell, anion_shell_flg=self.anions_shell, cation_shell_flg=self.cations_shell, symm_flg=False) structure_lines = structure_lines.split('\n') del structure_lines[-1] lattice_parameters = structure_lines[1].split() for i in range(len(lattice_parameters)): lattice_parameters[i] = round(float(lattice_parameters[i]), 10) rounded_lattice_parameters = "" for lattice_parameter in lattice_parameters: rounded_lattice_parameters += str(lattice_parameter) rounded_lattice_parameters += " " structure_lines[1] = rounded_lattice_parameters if self.lattice_flags is not None: structure_lines[1] = structure_lines[1] + self.lattice_flags for i in range(3, len(structure_lines)): structure_lines[i] = structure_lines[i] + ' 1 1 1' for i in range(len(structure_lines)): structure_lines[i] = structure_lines[i] + '\n' gulp_input = self.header + structure_lines + self.potential with open(gin_path, 'w') as gin_file: for line in gulp_input: gin_file.write(line) def get_grad_norm(self, gout): output_lines = gout.split('\n') for line in output_lines: if 'Final Gnorm' in line: line_parts = line.split() return float(line_parts[3])
MIT License
tensorpack/tensorpack
examples/FasterRCNN/modeling/model_frcnn.py
fastrcnn_2fc_head
python
def fastrcnn_2fc_head(feature): dim = cfg.FPN.FRCNN_FC_HEAD_DIM init = tfv1.variance_scaling_initializer() hidden = FullyConnected('fc6', feature, dim, kernel_initializer=init, activation=tf.nn.relu) hidden = FullyConnected('fc7', hidden, dim, kernel_initializer=init, activation=tf.nn.relu) return hidden
Args: feature (any shape): Returns: 2D head feature
https://github.com/tensorpack/tensorpack/blob/1a79d595f7eda9dc9dc8428f4461680ed2222ab6/examples/FasterRCNN/modeling/model_frcnn.py#L220-L232
import tensorflow as tf from tensorpack import tfv1 from tensorpack.models import Conv2D, FullyConnected, layer_register from tensorpack.tfutils.argscope import argscope from tensorpack.tfutils.scope_utils import under_name_scope from tensorpack.tfutils.summary import add_moving_summary from tensorpack.utils.argtools import memoized_method from config import config as cfg from utils.box_ops import pairwise_iou from .model_box import decode_bbox_target, encode_bbox_target from .backbone import GroupNorm @under_name_scope() def proposal_metrics(iou): best_iou = tf.reduce_max(iou, axis=0) mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt') summaries = [mean_best_iou] with tf.device('/cpu:0'): for th in [0.3, 0.5]: recall = tf.truediv( tfv1.count_nonzero(best_iou >= th), tf.size(best_iou, out_type=tf.int64), name='recall_iou{}'.format(th)) summaries.append(recall) add_moving_summary(*summaries) @under_name_scope() def sample_fast_rcnn_targets(boxes, gt_boxes, gt_labels): iou = pairwise_iou(boxes, gt_boxes) proposal_metrics(iou) boxes = tf.concat([boxes, gt_boxes], axis=0) iou = tf.concat([iou, tf.eye(tf.shape(gt_boxes)[0])], axis=0) def sample_fg_bg(iou): fg_mask = tf.cond(tf.shape(iou)[1] > 0, lambda: tf.reduce_max(iou, axis=1) >= cfg.FRCNN.FG_THRESH, lambda: tf.zeros(tf.shape(iou)[0], dtype=tf.bool)) fg_inds = tf.reshape(tf.where(fg_mask), [-1]) num_fg = tf.minimum(int( cfg.FRCNN.BATCH_PER_IM * cfg.FRCNN.FG_RATIO), tf.size(fg_inds), name='num_fg') fg_inds = tf.random.shuffle(fg_inds)[:num_fg] bg_inds = tf.reshape(tf.where(tf.logical_not(fg_mask)), [-1]) num_bg = tf.minimum( cfg.FRCNN.BATCH_PER_IM - num_fg, tf.size(bg_inds), name='num_bg') bg_inds = tf.random.shuffle(bg_inds)[:num_bg] add_moving_summary(num_fg, num_bg) return fg_inds, bg_inds fg_inds, bg_inds = sample_fg_bg(iou) best_iou_ind = tf.cond(tf.shape(iou)[1] > 0, lambda: tf.argmax(iou, axis=1), lambda: tf.zeros(tf.shape(iou)[0], dtype=tf.int64)) fg_inds_wrt_gt = tf.gather(best_iou_ind, fg_inds) all_indices = tf.concat([fg_inds, bg_inds], axis=0) ret_boxes = tf.gather(boxes, all_indices) ret_labels = tf.concat( [tf.gather(gt_labels, fg_inds_wrt_gt), tf.zeros_like(bg_inds, dtype=tf.int64)], axis=0) return BoxProposals( tf.stop_gradient(ret_boxes, name='sampled_proposal_boxes'), tf.stop_gradient(ret_labels, name='sampled_labels'), tf.stop_gradient(fg_inds_wrt_gt)) @layer_register(log_shape=True) def fastrcnn_outputs(feature, num_categories, class_agnostic_regression=False): num_classes = num_categories + 1 classification = FullyConnected( 'class', feature, num_classes, kernel_initializer=tf.random_normal_initializer(stddev=0.01)) num_classes_for_box = 1 if class_agnostic_regression else num_classes box_regression = FullyConnected( 'box', feature, num_classes_for_box * 4, kernel_initializer=tf.random_normal_initializer(stddev=0.001)) box_regression = tf.reshape(box_regression, (-1, num_classes_for_box, 4), name='output_box') return classification, box_regression @under_name_scope() def fastrcnn_losses(labels, label_logits, fg_boxes, fg_box_logits): label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=label_logits) label_loss = tf.reduce_mean(label_loss, name='label_loss') fg_inds = tf.where(labels > 0)[:, 0] fg_labels = tf.gather(labels, fg_inds) num_fg = tf.size(fg_inds, out_type=tf.int64) empty_fg = tf.equal(num_fg, 0) if int(fg_box_logits.shape[1]) > 1: fg_labels = tf.expand_dims(fg_labels, axis=1) fg_box_logits = tf.gather(fg_box_logits, fg_labels, batch_dims=1) fg_box_logits = tf.reshape(fg_box_logits, [-1, 4]) with tf.name_scope('label_metrics'), tf.device('/cpu:0'): prediction = tf.argmax(label_logits, axis=1, name='label_prediction') correct = tf.cast(tf.equal(prediction, labels), tf.float32) accuracy = tf.reduce_mean(correct, name='accuracy') fg_label_pred = tf.argmax(tf.gather(label_logits, fg_inds), axis=1) num_zero = tf.reduce_sum(tf.cast(tf.equal(fg_label_pred, 0), tf.int64), name='num_zero') false_negative = tf.where( empty_fg, 0., tf.cast(tf.truediv(num_zero, num_fg), tf.float32), name='false_negative') fg_accuracy = tf.where( empty_fg, 0., tf.reduce_mean(tf.gather(correct, fg_inds)), name='fg_accuracy') box_loss = tf.reduce_sum(tf.abs(fg_boxes - fg_box_logits)) box_loss = tf.truediv( box_loss, tf.cast(tf.shape(labels)[0], tf.float32), name='box_loss') add_moving_summary(label_loss, box_loss, accuracy, fg_accuracy, false_negative, tf.cast(num_fg, tf.float32, name='num_fg_label')) return [label_loss, box_loss] @under_name_scope() def fastrcnn_predictions(boxes, scores): assert boxes.shape[1] == scores.shape[1] boxes = tf.transpose(boxes, [1, 0, 2])[1:, :, :] scores = tf.transpose(scores[:, 1:], [1, 0]) max_coord = tf.reduce_max(boxes) filtered_ids = tf.where(scores > cfg.TEST.RESULT_SCORE_THRESH) filtered_boxes = tf.gather_nd(boxes, filtered_ids) filtered_scores = tf.gather_nd(scores, filtered_ids) cls_per_box = tf.slice(filtered_ids, [0, 0], [-1, 1]) offsets = tf.cast(cls_per_box, tf.float32) * (max_coord + 1) nms_boxes = filtered_boxes + offsets selection = tf.image.non_max_suppression( nms_boxes, filtered_scores, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH) final_scores = tf.gather(filtered_scores, selection, name='scores') final_labels = tf.add(tf.gather(cls_per_box[:, 0], selection), 1, name='labels') final_boxes = tf.gather(filtered_boxes, selection, name='boxes') return final_boxes, final_scores, final_labels @layer_register(log_shape=True)
Apache License 2.0
noxrepo/pox
pox/openflow/spanning_forest.py
SpanningForest._compute_nx
python
def _compute_nx (self): import networkx as NX g = NX.Graph() for l in self.topo.iterlinks(): if l.up: u,v = l.link.end[0][0],l.link.end[1][0] if g.has_edge(u,v): continue g.add_edge(u, v, data=l) tree = NX.minimum_spanning_tree(g) self.log.debug("Computed spanning forest: %s of %s links", tree.size(), len(self.topo.links)) self.topo.clear_tree() for u,v,d in tree.edges(data=True): self.topo.add_to_tree(d['data']) for sw in self.switches.values(): sw._compute()
Computes a spanning tree using NetworkX
https://github.com/noxrepo/pox/blob/5f82461e01f8822bd7336603b361bff4ffbd2380/pox/openflow/spanning_forest.py#L410-L431
from pox.core import core import pox.openflow.libopenflow_01 as of from pox.lib.util import dpid_to_str from pox.lib.recoco import Timer import time log = core.getLogger() def _now (): return time.time() class Port (object): def __init__ (self, no, up): assert no < of.OFPP_MAX self.no = no self.up = up self.reset_wait() self.never_block = False def reset_wait (self): self.ts = _now() @property def waiting (self): return self.age < core.openflow_discovery.send_cycle_time / 4 @property def age (self): return _now() - self.ts def is_down (p): if (p.config & of.OFPPC_PORT_DOWN): return True if (p.state & of.OFPPS_LINK_DOWN): return True return False def is_up (p): return not is_down(p) class Switch (object): def __init__ (self, master, dpid): self.dpid = dpid self.log = log.getChild(dpid_to_str(dpid)) self.ports = {} self._port_out_cache = None self._port_out = None self.master = master def get_port (self, p): assert p < of.OFPP_MAX if p not in self.ports: self.ports[p] = Port(p, False) return self.ports[p] def _handle_ConnectionUp (self, e): self._port_cache = None self._sync_port_data() def _handle_ConnectionDown (self, e): for p in self.ports.values(): p.up = False def _handle_PortStatus (self, e): if e.port >= of.OFPP_MAX: return if e.port not in self.ports: if e.deleted: self.ports[e.port] = Port(e.port, False) else: self.ports[e.port] = Port(e.port, is_up(e.ofp.desc)) else: if e.deleted: self.ports[e.port].up = False else: self.ports[e.port].reset_wait() def _handle_timer (self): self._sync_port_data() self._compute() def _sync_port_data (self): con = core.openflow.getConnection(self.dpid) old_ports = self.ports self.ports = {} if not con: return for p in con.ports.values(): if p.port_no >= of.OFPP_MAX: continue if p.port_no in old_ports: self.ports[p.port_no] = old_ports[p.port_no] self.ports[p.port_no].up = is_up(p) else: self.ports[p.port_no] = Port(p.port_no, is_up(p)) def _compute (self): con = core.openflow.getConnection(self.dpid) if not con: return self._sync_port_data() out = {} links = {l.port(self.dpid):l for l in self.master.topo.iterlinks(self.dpid)} for p in con.ports.values(): if p.port_no >= of.OFPP_MAX: continue p = self.ports[p.port_no] fld = False rcv = False if p.never_block: fld = True rcv = True elif p.waiting: pass else: rcv = True l = links.get(p.no) if l: if l.on_tree: fld = True else: fld = True out[p.no] = 0 if not fld: out[p.no] |= of.OFPPC_NO_FLOOD if not rcv: out[p.no] |= of.OFPPC_NO_FWD self._port_out = out self._realize() def _realize (self): if self._port_out == self._port_out_cache: return con = core.openflow.connections.get(self.dpid) if con is None: return data = [] for port_no,cfg in self._port_out.items(): if port_no not in self.ports: continue if port_no not in con.ports: continue p = con.ports[port_no] pm = of.ofp_port_mod(port_no=p.port_no, hw_addr=p.hw_addr, config = cfg, mask = of.OFPPC_NO_FLOOD|of.OFPPC_NO_FWD) data.append(pm.pack()) data.append(of.ofp_features_request().pack()) if self.send(b''.join(data)): self._port_out_cache = self._port_out self.log.info("Configured %s ports", len(data) - 1) def send (self, data): con = core.openflow.connections.get(self.dpid) if not con: self.log.info("Not connected -- didn't send %s bytes" % (len(data),)) return False con.send(data) return True class LinkData (object): def __init__ (self, link): self.link = link.uni self.uv_ts = 0.0 self.vu_ts = 0.0 assert self.link.end[0][0] != self.link.end[1][0] self.on_tree = False @property def up (self): return self.liveness == 1 @property def forward_up (self): return self.uv_ts > 0 @property def reverse_up (self): return self.vu_ts > 0 @property def liveness (self): uv = self.uv_ts > 0 vu = self.vu_ts > 0 if uv and vu: return 1 if uv or vu: return 0.5 return 0 def mark_alive (self, link): if link == self.link: self.uv_ts = _now() elif link.uni == self.link: self.vu_ts = _now() else: raise RuntimeError() def mark_dead (self, link = None): if link is None: self.uv_ts = 0.0 self.vu_ts = 0.0 return if link == self.link: self.uv_ts = 0.0 elif link.uni == self.link: self.vu_ts = 0.0 else: raise RuntimeError() def port (self, sw): if self.link.end[0][0] == sw: return self.link.end[0][1] elif self.link.end[1][0] == sw: return self.link.end[1][1] else: raise RuntimeError() def otherport (self, sw): return self.other(sw)[1] def pair (self, sw): return (sw, self.port(sw)) def otherpair (self, sw): if self.link.end[0][0] == sw: return self.link.end[1] elif self.link.end[1][0] == sw: return self.link.end[0] else: raise RuntimeError() def __hash__ (self): return hash(self.link) def __cmp__ (self, other): if isinstance(other, LinkData): return cmp(self.link, other.link) raise RuntimeError("Bad comparison") class Topo (object): def __init__ (self): self.links = {} self.ports = {} self.switches = {} self.tree_links = set() def clear_tree (self): for l in self.tree_links: l.on_tree = False self.tree_links.clear() def add_to_tree (self, l): l.on_tree = True self.tree_links.add(l) def get_link (self, link): if link.uni not in self.links: emsg = "Dynamic/hubbed/multi-access topology not supported" if link.end[0] in self.ports: if self.ports[link.end[0]].link != link.uni: raise RuntimeError(emsg) if link.end[1] in self.ports: if self.ports[link.end[1]].link != link.uni: raise RuntimeError(emsg) l = LinkData(link) self.links[link.uni] = l self.ports[link.end[0]] = l self.ports[link.end[1]] = l self._add_port(*link.end[0], link=l) self._add_port(*link.end[1], link=l) return self.links[link.uni] def _add_port (self, sw, port, link): if sw not in self.switches: self.switches[sw] = {} assert port not in self.switches[sw] self.switches[sw][port] = link def get_port (self, port): return self.ports.get(port) def iterlinks (self, sw=None): if sw is None: return iter(self.links.values()) if sw not in self.switches: return () return iter(self.switches[sw].values()) class SpanningForest (object): def __init__ (self, mode=None): if mode is None: mode = 'stable' self._mode_function = getattr(type(self), '_compute_' + mode) self.log = log self.topo = Topo() self.switches = {} self.t = None core.listen_to_dependencies(self) def _all_dependencies_met (self): self._handle_timer() def _handle_timer (self): self.t = Timer(1, self._handle_timer) for sw in self.switches.values(): sw._handle_timer() def _handle_openflow_PortStatus (self, e): self.switches[e.dpid]._handle_PortStatus(e) l = self.topo.get_port((e.dpid, e.port)) if l is not None: prev_liveness = l.liveness if is_down(e.ofp.desc): l.mark_dead() if l.liveness != prev_liveness: self._compute() def _handle_openflow_discovery_LinkEvent (self, e): link = self.topo.get_link(e.link) prev_liveness = link.liveness if e.added: link.mark_alive(e.link) else: link.mark_dead(e.link) if link.liveness != prev_liveness: self._compute() def _handle_openflow_ConnectionUp (self, event): if event.dpid not in self.switches: self.switches[event.dpid] = Switch(self, event.dpid) self.switches[event.dpid]._handle_ConnectionUp(event) self._compute() def _handle_openflow_ConnectionDown (self, event): if event.dpid in self.switches: self.switches[event.dpid]._handle_ConnectionDown(event) self._compute() def _compute (self): self._mode_function(self)
Apache License 2.0
kmill/textadv
textadv/gameworld/basicrules.py
rule_ParentEnterable_by_Location
python
def rule_ParentEnterable_by_Location(x, world) : loc = world[Location(x)] if not loc : return None while not world[IsA(loc, "room")] : if world[IsEnterable(loc)] : return loc loc = world[Location(loc)] if not loc : return None return loc
Gets either the next room or enterable by repeated calling of Location.
https://github.com/kmill/textadv/blob/b153b7f3f990f6f6f218a86c499382ad50982a8a/textadv/gameworld/basicrules.py#L570-L582
@world.define_property class Name(Property) : numargs = 1 @world.handler(Name(X) <= IsA(X, "kind")) def default_Name(x, world) : return str(x) @world.define_property class ProperNamed(Property) : numargs = 1 world[ProperNamed(X) <= IsA(X, "kind")] = False @world.define_property class PrintedName(Property) : numargs = 1 @world.handler(PrintedName(X) <= IsA(X, "kind")) def default_PrintedName(x, world) : return world[Name(x)] @world.define_property class DefiniteName(Property) : numargs = 1 @world.handler(DefiniteName(X) <= IsA(X, "kind")) def default_DefiniteName(x, world) : printed_name = world[PrintedName(x)] if world[ProperNamed(x)] : return printed_name else : return "the "+printed_name @world.define_property class IndefiniteName(Property) : numargs = 1 @world.handler(IndefiniteName(X) <= IsA(X, "kind")) def default_IndefiniteName(x, world) : printed_name = world[PrintedName(x)] if world[ProperNamed(x)] : return printed_name elif printed_name[0].lower() in "aeoiu" : return "an "+printed_name else : return "a "+printed_name @world.define_property class Description(Property) : numargs = 1 @world.define_property class Words(Property) : numargs = 1 @world.define_property class AddedWords(Property) : numargs = 1 world[AddedWords(X)] = [] @world.handler(Words(X)) def default_Words(x, world) : words = world[Name(x)].split() words[-1] = "@"+words[-1] words.extend(world[AddedWords(x)]) return words world[Description(X) <= IsA(X, "room")] = None world[ProperNamed(X) <= IsA(X, "room")] = True @world.define_property class DirectionDescription(Property) : numargs = 2 world[DirectionDescription(X, direction)] = None @world.define_property class Visited(Property) : numargs = 1 world[Visited(X) <= IsA(X, "room")] = False @world.define_property class Contents(Property) : numargs = 1 @world.handler(Contents(X) <= IsA(X, "room")) def contents_room(x, world) : return [o["y"] for o in world.query_relation(Contains(x, Y))]+world.activity.get_room_doors(x) @world.define_property class EffectiveContainer(Property) : numargs = 1 @world.handler(EffectiveContainer(X) <= IsA(X, "room")) def rule_EffectiveContainer_if_x_in_room(x, world) : return x @world.define_property class VisibleContainer(Property) : numargs = 1 @world.handler(VisibleContainer(X) <= IsA(X, "room")) def rule_VisibleContainer_if_x_in_room(x, world) : return x @world.define_property class MakesLight(Property) : numargs = 1 world[MakesLight(X) <= IsA(X, "room")] = True @world.define_property class ContributesLight(Property) : numargs = 1 @world.define_property class ContainsLight(Property) : numargs = 1 @world.handler(ContainsLight(X) <= IsA(X, "room")) def rule_ContainsLight_room_default_is_MakesLight(x, world) : return world[MakesLight(x)] @world.handler(ContainsLight(X) <= IsA(X, "room")) def rule_ContainsLight_contents_can_light_room(x, world) : if any(world[ContributesLight(o)] for o in world[Contents(x)]) : return True else : raise NotHandled() @world.define_property class NoGoMessage(Property) : numargs = 2 world[NoGoMessage(X, direction) <= IsA(X, "room")] = "{Bob|cap} can't go that way." @world.define_property class WhenGoMessage(Property) : numargs = 2 world[WhenGoMessage(X, direction) <= IsA(X, "room")] = None world[Description(X) <= IsA(X, "thing")] = None world[MakesLight(X) <= IsA(X, "thing")] = False world[ContributesLight(X) <= IsA(X, "thing")] = False @world.handler(ContributesLight(X) <= IsA(X, "thing")) def rule_ContributesLight_thing_default_is_MakesLight(x, world) : if world[MakesLight(x)] : return True else : raise NotHandled() world[ContainsLight(X) <= IsA(X, "thing")] = False @world.handler(ContainsLight(X) <= IsA(X, "thing")) def rule_ContributesLight_if_parts_contribute(x, world) : parts = world.query_relation(PartOf(Y, x), var=Y) if any(world[ContributesLight(o)] for o in parts) : return True else : raise NotHandled() @world.handler(EffectiveContainer(X) <= IsA(X, "thing")) def rule_EffectiveContainer_if_thing(x, world) : location = world[Location(x)] if location : return world[EffectiveContainer(location)] else : return x @world.handler(VisibleContainer(X) <= IsA(X, "thing")) def rule_VisibleContainer_if_thing(x, world) : return world[VisibleContainer(world[Location(x)])] @world.define_property class ContainingRoom(Property) : numargs = 1 @world.handler(ContainingRoom(X)) def rule_ContainingRoom_default(x, world) : loc = world[Location(x)] if not loc : return None while not world[IsA(loc, "room")] : loc = world[Location(loc)] if not loc : return None return loc @world.define_property class NotableDescription(Property) : numargs = 1 world[NotableDescription(X)] = False @world.define_property class Reported(Property) : numargs = 1 world[Reported(X) <= IsA(X, "thing")] = True @world.define_property class SubjectPronoun(Property) : numargs = 1 @world.define_property class ObjectPronoun(Property) : numargs = 1 @world.define_property class PossessivePronoun(Property) : numargs = 1 @world.define_property class ReflexivePronoun(Property) : numargs = 1 world[SubjectPronoun(X) <= IsA(X, "thing")] = "it" world[ObjectPronoun(X) <= IsA(X, "thing")] = "it" world[PossessivePronoun(X) <= IsA(X, "thing")] = "its" world[ReflexivePronoun(X) <= IsA(X, "thing")] = "itself" @world.define_property class FixedInPlace(Property) : numargs = 1 world[FixedInPlace(X) <= IsA(X, "thing")] = False @world.define_property class NoTakeMessage(Property) : numargs = 1 world[NoTakeMessage(X) <= FixedInPlace(X)] = "That's fixed in place." @world.define_property class VisibleTo(Property) : numargs = 2 world[VisibleTo(X, actor)] = False @world.handler(VisibleTo(X, actor)) def rule_VisibleTo_if_held(x, actor, world) : if world.query_relation(Has(actor, x)) : return True else : raise NotHandled() @world.handler(VisibleTo(X, actor)) def rule_VisibleTo_if_in_same_visible_container(x, actor, world) : actor_vis_cont = world[VisibleContainer(world[Location(actor)])] if x in world.activity.get_room_doors(actor_vis_cont) : return True if actor_vis_cont == x : x_vis_cont = x else : loc = world[Location(x)] if not loc : raise NotHandled() x_vis_cont = world[VisibleContainer(loc)] if actor_vis_cont == x_vis_cont and world[ContainsLight(actor_vis_cont)] : return True raise NotHandled() @world.handler(VisibleTo(X, actor)) def rule_VisibleTo_if_part_of(x, actor, world) : x_assembly = world.query_relation(PartOf(x, Y), var=Y) if x_assembly and world[VisibleTo(x_assembly[0], actor)] : return True raise NotHandled() @world.define_property class AccessibleTo(Property) : numargs = 2 world[AccessibleTo(X, actor)] = True @world.handler(AccessibleTo(X, actor)) def rule_not_AccessibleTo_if_different_effective_containers(x, actor, world) : actor_eff_cont = world[EffectiveContainer(world[Location(actor)])] if x in world.activity.get_room_doors(actor_eff_cont) : return True if actor_eff_cont != x : if actor_eff_cont != world[EffectiveContainer(world[Location(x)])] : return False raise NotHandled() @world.handler(AccessibleTo(X, actor)) def rule_not_AccessibleTo_if_not_visible(x, actor, world) : if not world[VisibleTo(x, actor)] : return False else : raise NotHandled() @world.define_property class IsOpaque(Property) : numargs = 1 world[IsOpaque(X) <= IsA(X, "thing")] = True @world.define_property class IsEnterable(Property) : numargs = 1 world[IsEnterable(X) <= IsA(X, "thing")] = False @world.define_property class NoEnterMessage(Property) : numargs = 1 world[NoEnterMessage(X) <= IsA(X, "thing")] = "{Bob|cap} can't enter that." @world.define_property class ParentEnterable(Property) : numargs = 1 @world.handler(ParentEnterable(X) <= IsA(X, "thing"))
MIT License
narke/gcc-cross-compiler
toolchain.py
unpack_tarball
python
def unpack_tarball(tarball): with tarfile.open(tarball) as tar: tar.extractall('.')
Extract file from a tarball.
https://github.com/narke/gcc-cross-compiler/blob/ff4156887a42ea75cec26634d39b8d2a6cca420c/toolchain.py#L243-L246
import os import sys import ftplib import shutil import pathlib import tarfile import hashlib import tempfile import argparse import subprocess BINUTILS_VERSION = '2.36' GCC_VERSION = '11.1.0' GDB_VERSION = '10.2' BASEDIR = os.getcwd() BINUTILS_TARBALL = 'binutils-{}.tar.xz'.format(BINUTILS_VERSION) GCC_TARBALL = 'gcc-{}.tar.xz'.format(GCC_VERSION) GDB_TARBALL = 'gdb-{}.tar.xz'.format(GDB_VERSION) INSTALL_DIR = BASEDIR + '/PKG' BINUTILS_CHECKSUM = 'f6114b8c40096f9aa9f64fe1ab8ba087' GCC_CHECKSUM = '77f6252be0861ab918042acf42bc10ff' GDB_CHECKSUM = 'c044b7146903ec51c9d2337a29aee93b' GMP_MAIN = """ #define GCC_GMP_VERSION_NUM(a, b, c) \ (((a) << 16L) | ((b) << 8) | (c)) #define GCC_GMP_VERSION \ GCC_GMP_VERSION_NUM(__GNU_MP_VERSION, __GNU_MP_VERSION_MINOR, __GNU_MP_VERSION_PATCHLEVEL) #if GCC_GMP_VERSION < GCC_GMP_VERSION_NUM(4, 3, 2) choke me #endif """ MPFR_MAIN = """ #if MPFR_VERSION < MPFR_VERSION_NUM(2, 4, 2) choke me #endif """ MPC_MAIN = """ #if MPC_VERSION < MPC_VERSION_NUM(0, 8, 1) choke me #endif """ ISL_MAIN = """ isl_ctx_get_max_operations (isl_ctx_alloc ()); """ TARGETS = { 'aarch64': 'aarch64-linux-gnu', 'amd64': 'amd64-linux-gnu', 'arm32': 'arm-linux-gnueabi', 'ia32': 'i686-pc-linux-gnu', 'ia64': 'ia64-pc-linux-gnu', 'mips32': 'mipsel-linux-gnu', 'mips32eb': 'mips-linux-gnu', 'mips64': 'mips64el-linux-gnu', 'ppc32': 'ppc-linux-gnu', 'ppc64': 'ppc64-linux-gnu', 'sparc32': 'sparc-leon3-linux-gnu', 'sparc64': 'sparc64-linux-gnu', 'lm32': 'lm32-elf' } def check_header(dependency, header, body): code = """ #include %s int main() { %s return 0; } """ % (header, body) filename = tempfile.NamedTemporaryFile(suffix='.c') filename.write(code.encode()) try: subprocess.check_call(['cc', '-c', '-o', '{}.o'.format(filename.name[:-2]), '{}'.format(filename.name)]) os.unlink('{}.o'.format(filename.name[:-2])) except subprocess.CalledProcessError: print('{0} of {1} not found'.format(header, dependency)) sys.exit() def check_headers(): check_header('GMP', '<gmp.h>', GMP_MAIN) check_header('MPFR', '<mpfr.h>', MPFR_MAIN) check_header('MPC', '<mpc.h>', MPC_MAIN) check_header('isl', '<isl/ctx.h>', ISL_MAIN) def show_dependencies(): message = """IMPORTANT NOTICE: For a successful compilation and use of the cross-compiler toolchain you need at least the following dependencies. Please make sure that the dependencies are present in your system. Otherwise the compilation process might fail after a few seconds or minutes." - SED, AWK, Flex, Bison, gzip, bzip2, Bourne Shell - gettext, zlib, Texinfo, libelf, libgomp - GNU Make, Coreutils, Sharutils, tar - GNU Multiple Precision Library (GMP) - MPFR - MPC - integer point manipulation library (isl) - native C and C++ compiler, assembler and linker - native C and C++ standard library with headers""" print(message) def download(toolname, tarball): if toolname == 'gcc': path = '/gnu/gcc/gcc-{}/'.format(GCC_VERSION) else: path = '/gnu/{}/'.format(toolname) try: ftp = ftplib.FTP('ftp.gnu.org') ftp.login() ftp.cwd(path) with open('{}'.format(tarball), 'wb') as ftpfile: ftp.retrbinary('RETR {}'.format(tarball), ftpfile.write) ftp.quit() except ftplib.all_errors: print('Error: Downoad of {} failed'.format(tarball)) sys.exit() def check_integrity(archive, checksum): with open(archive, 'rb') as tarball: if hashlib.md5(tarball.read()).hexdigest() != checksum: print('Error: Wrong checksum for {}'.format(archive)) sys.exit() def prepare(): show_dependencies() tools = { 'binutils': { 'tarball': BINUTILS_TARBALL, 'checksum': BINUTILS_CHECKSUM }, 'gcc': { 'tarball': GCC_TARBALL, 'checksum': GCC_CHECKSUM }, 'gdb': { 'tarball': GDB_TARBALL, 'checksum': GDB_CHECKSUM } } for toolname in tools: if not os.path.isfile(tools[toolname]['tarball']): download(toolname, tools[toolname]['tarball']) check_integrity(tools[toolname]['tarball'], tools[toolname]['checksum']) def set_target_from_platform(platform): return TARGETS[platform] def cleanup_dir(path): if os.path.isdir(path): shutil.rmtree(path) def create_dir(path): if not os.path.isdir(path): print('>>> Creating directory: {}'.format(path)) pathlib.Path(path).mkdir(parents=True, exist_ok=True)
BSD 3-Clause New or Revised License
1044197988/tf.keras-commonly-used-models
常用分类模型/DenseNet.py
dense_block
python
def dense_block(x, nb_layers, nb_channels, growth_rate, dropout_rate=None, bottleneck=False, weight_decay=1e-4): x_list = [x] for i in range(nb_layers): cb = convolution_block(x, growth_rate, dropout_rate, bottleneck, weight_decay) x_list.append(cb) x = Concatenate(axis=-1)(x_list) nb_channels += growth_rate return x, nb_channels
Creates a dense block and concatenates inputs
https://github.com/1044197988/tf.keras-commonly-used-models/blob/b37276bcee454b2c39b8fcc60e87b72ec8a6a5d4/常用分类模型/DenseNet.py#L96-L107
from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Concatenate, BatchNormalization from tensorflow.keras.models import Model from tensorflow.keras.layers import Conv2D, GlobalAveragePooling2D, AveragePooling2D from tensorflow.keras.regularizers import l2 def DenseNet(input_shape=None, dense_blocks=3, dense_layers=-1, growth_rate=12, nb_classes=None, dropout_rate=None, bottleneck=False, compression=1.0, weight_decay=1e-4, depth=40): if nb_classes==None: raise Exception('Please define number of classes (e.g. num_classes=10). This is required for final softmax.') if compression <=0.0 or compression > 1.0: raise Exception('Compression have to be a value between 0.0 and 1.0. If you set compression to 1.0 it will be turn off.') if type(dense_layers) is list: if len(dense_layers) != dense_blocks: raise AssertionError('Number of dense blocks have to be same length to specified layers') elif dense_layers == -1: if bottleneck: dense_layers = (depth - (dense_blocks + 1))/dense_blocks // 2 else: dense_layers = (depth - (dense_blocks + 1))//dense_blocks dense_layers = [int(dense_layers) for _ in range(dense_blocks)] else: dense_layers = [int(dense_layers) for _ in range(dense_blocks)] img_input = Input(shape=input_shape) nb_channels = growth_rate * 2 print('Creating DenseNet') print('#############################################') print('Dense blocks: %s' % dense_blocks) print('Layers per dense block: %s' % dense_layers) print('#############################################') x = Conv2D(nb_channels, (3,3), padding='same',strides=(1,1), use_bias=False, kernel_regularizer=l2(weight_decay))(img_input) for block in range(dense_blocks): x, nb_channels = dense_block(x, dense_layers[block], nb_channels, growth_rate, dropout_rate, bottleneck, weight_decay) if block < dense_blocks - 1: x = transition_layer(x, nb_channels, dropout_rate, compression, weight_decay) nb_channels = int(nb_channels * compression) x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) x = Activation('relu')(x) x = GlobalAveragePooling2D()(x) x = Dense(nb_classes, activation='softmax', kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(x) model_name = None if growth_rate >= 36: model_name = 'widedense' else: model_name = 'dense' if bottleneck: model_name = model_name + 'b' if compression < 1.0: model_name = model_name + 'c' return Model(img_input, x, name=model_name), model_name
Apache License 2.0
lithops-cloud/lithops
lithops/storage/backends/aliyun_oss/aliyun_oss.py
AliyunObjectStorageServiceBackend.put_object
python
def put_object(self, bucket_name, key, data): if isinstance(data, str): data = data.encode() try: bucket = self._connect_bucket(bucket_name) bucket.put_object(key, data) except oss2.exceptions.NoSuchBucket: raise StorageNoSuchKeyError(bucket_name, '')
Put an object in OSS. Override the object if the key already exists. Throws StorageNoSuchKeyError if the bucket does not exist. :param bucket_name: bucket name :param key: key of the object. :param data: data of the object :type data: str/bytes :return: None
https://github.com/lithops-cloud/lithops/blob/a274a0bc423e22b9a68834cac5d63130666a4ee8/lithops/storage/backends/aliyun_oss/aliyun_oss.py#L58-L75
import oss2 import logging from lithops.serverless.backends.aliyun_fc.config import CONNECTION_POOL_SIZE from lithops.storage.utils import StorageNoSuchKeyError from lithops.utils import is_lithops_worker from lithops.constants import STORAGE_CLI_MSG logger = logging.getLogger(__name__) class AliyunObjectStorageServiceBackend: def __init__(self, config): logger.debug("Creating Aliyun Object Storage Service client") self.config = config self.auth = oss2.Auth(self.config['access_key_id'], self.config['access_key_secret']) if is_lithops_worker(): self.endpoint = self.config['internal_endpoint'] else: self.endpoint = self.config['public_endpoint'] oss2.defaults.connection_pool_size = CONNECTION_POOL_SIZE msg = STORAGE_CLI_MSG.format('Aliyun Object Storage Service') logger.info("{} - Endpoint: {}".format(msg, self.endpoint)) def _connect_bucket(self, bucket_name): if hasattr(self, 'bucket') and self.bucket.bucket_name == bucket_name: bucket = self.bucket else: self.bucket = oss2.Bucket(self.auth, self.endpoint, bucket_name) bucket = self.bucket return bucket def get_client(self): return self
Apache License 2.0
single-cell-genetics/vireo
vireoSNP/utils/vireo_model.py
Vireo.theta_s1
python
def theta_s1(self): return self.beta_mu * self.beta_sum
Beta concetration1 parameter for theta posterior
https://github.com/single-cell-genetics/vireo/blob/12df6fc17dad1bd1825bdf7b597fc3e062452130/vireoSNP/utils/vireo_model.py#L140-L142
import itertools import numpy as np from scipy.stats import entropy from scipy.sparse import csc_matrix from scipy.special import logsumexp, digamma, betaln from .vireo_base import normalize, loglik_amplify, beta_entropy from .vireo_base import get_binom_coeff, logbincoeff __docformat__ = "restructuredtext en" class Vireo(): def __init__(self, n_cell, n_var, n_donor, n_GT=3, learn_GT=True, learn_theta=True, ASE_mode=False, fix_beta_sum=False, beta_mu_init=None, beta_sum_init=None, ID_prob_init=None, GT_prob_init=None): self.n_GT = n_GT self.n_var = n_var self.n_cell = n_cell self.n_donor = n_donor self.learn_GT = learn_GT self.ASE_mode = ASE_mode self.learn_theta = learn_theta self.fix_beta_sum = fix_beta_sum self.ELBO_ = np.zeros((0)) self.set_initial(beta_mu_init, beta_sum_init, ID_prob_init, GT_prob_init) self.set_prior() def set_initial(self, beta_mu_init=None, beta_sum_init=None, ID_prob_init=None, GT_prob_init=None): theta_len = self.n_var if self.ASE_mode else 1 if beta_mu_init is not None: self.beta_mu = beta_mu_init else: self.beta_mu = (np.ones((theta_len, self.n_GT)) * np.linspace(0.01, 0.99, self.n_GT).reshape(1, -1)) if beta_sum_init is not None: self.beta_sum = beta_sum_init else: self.beta_sum = np.ones((theta_len, self.n_GT)) * 50 if ID_prob_init is not None: self.ID_prob = normalize(ID_prob_init, axis=1) else: self.ID_prob = normalize(np.random.rand(self.n_cell, self.n_donor)) if GT_prob_init is not None: self.GT_prob = normalize(GT_prob_init) else: _GT_val = np.random.rand(self.n_var, self.n_donor, self.n_GT) self.GT_prob = normalize(_GT_val) def set_prior(self, GT_prior=None, ID_prior=None, beta_mu_prior=None, beta_sum_prior=None, min_GP=0.00001): if beta_mu_prior is None: beta_mu_prior = np.expand_dims( np.linspace(0.01, 0.99, self.beta_mu.shape[1]), axis=0) if beta_sum_prior is None: beta_sum_prior = np.ones(beta_mu_prior.shape) * 50.0 self.theta_s1_prior = beta_mu_prior * beta_sum_prior self.theta_s2_prior = (1 - beta_mu_prior) * beta_sum_prior if ID_prior is not None: if len(ID_prior.shape) == 1: ID_prior = np.expand_dims(ID_prior, axis=0) self.ID_prior = ID_prior else: self.ID_prior = normalize(np.ones(self.ID_prob.shape)) if GT_prior is not None: if len(GT_prior.shape) == 2: GT_prior = np.expand_dims(GT_prior, axis=0) GT_prior[GT_prior < min_GP] = min_GP GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP GT_prior = normalize(GT_prior) self.GT_prior = GT_prior else: self.GT_prior = normalize(np.ones(self.GT_prob.shape)) @property
Apache License 2.0
simphony/osp-core
osp/core/neighbor_dict.py
NeighborDict.keys
python
def keys(self): return {k for k in self}
Get the set of keys.
https://github.com/simphony/osp-core/blob/19f233ebe6c40e92884aa07bf498304d772f7f27/osp/core/neighbor_dict.py#L79-L81
import uuid import rdflib from abc import ABC, abstractmethod from osp.core.ontology.relationship import OntologyRelationship from osp.core.ontology.oclass import OntologyClass from osp.core.ontology.namespace_registry import namespace_registry from osp.core.utils.general import iri_from_uid, uid_from_iri from osp.core.namespaces import from_iri class NeighborDict(ABC): def __init__(self, cuds_object, key_check, value_check): self.cuds_object = cuds_object self.key_check = key_check self.value_check = value_check def __iter__(self): if self.cuds_object.session: self.cuds_object.session._notify_read(self.cuds_object) return self._iter() def __getitem__(self, key): if not self.key_check(key): raise ValueError("Invalid key %s" % key) if self.cuds_object.session: self.cuds_object.session._notify_read(self.cuds_object) return self._getitem(key) def __setitem__(self, key, value): if not self.key_check(key): raise ValueError("Invalid key %s" % key) if not self.value_check(value): raise ValueError("Invalid value %s" % value) if self.cuds_object.session: self.cuds_object.session._notify_read(self.cuds_object) r = self._setitem(key, value) if self.cuds_object.session: self.cuds_object.session._notify_update(self.cuds_object) return r def __delitem__(self, key): if not self.key_check(key): raise ValueError("Invalid key %s" % key) if self.cuds_object.session: self.cuds_object.session._notify_read(self.cuds_object) r = self._delitem(key) if self.cuds_object.session: self.cuds_object.session._notify_update(self.cuds_object) return r def __eq__(self, E): return dict(self.items()) == E def update(self, E): for key, value in E.items(): self[key] = value def items(self): for k in self: yield k, self[k]
BSD 3-Clause New or Revised License
trusted-ai/adversarial-robustness-toolbox
art/attacks/evasion/lowprofool.py
LowProFool.__apply_clipping
python
def __apply_clipping(self, samples: np.ndarray, perturbations: np.ndarray) -> np.ndarray: if self.estimator.clip_values is None: return perturbations mins = self.estimator.clip_values[0] maxs = self.estimator.clip_values[1] np.clip(perturbations, mins - samples, maxs - samples, perturbations) return perturbations
Function for clipping perturbation vectors to forbid the adversary vectors to go beyond the allowed ranges of values. :param samples: Base design matrix. :param perturbations: Perturbations of samples towards being adversarial. :return: Clipped perturbation array.
https://github.com/trusted-ai/adversarial-robustness-toolbox/blob/564f46f99b3cb0406fe3570919b8e71a4c5bba9d/art/attacks/evasion/lowprofool.py#L190-L206
import logging from typing import Callable, Optional, Union, TYPE_CHECKING import numpy as np from scipy.stats import pearsonr from tqdm.auto import trange from art.attacks.attack import EvasionAttack from art.estimators.estimator import LossGradientsMixin from art.estimators.estimator import BaseEstimator from art.estimators.classification.classifier import ClassifierMixin if TYPE_CHECKING: from art.utils import CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE logger = logging.getLogger(__name__) class LowProFool(EvasionAttack): attack_params = EvasionAttack.attack_params + [ "n_steps", "threshold", "lambd", "eta", "eta_decay", "eta_min", "norm", "importance", "verbose", ] _estimator_requirements = (BaseEstimator, LossGradientsMixin, ClassifierMixin) def __init__( self, classifier: "CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE", n_steps: int = 100, threshold: Union[float, None] = 0.5, lambd: float = 1.5, eta: float = 0.2, eta_decay: float = 0.98, eta_min: float = 1e-7, norm: Union[int, float, str] = 2, importance: Union[Callable, str, np.ndarray] = "pearson", verbose: bool = False, ) -> None: super().__init__(estimator=classifier) self.n_steps = n_steps self.threshold = threshold self.lambd = lambd self.eta = eta self.eta_decay = eta_decay self.eta_min = eta_min self.norm = norm self.importance = importance self.verbose = verbose self._targeted = True self.n_classes = self.estimator.nb_classes self.n_features = self.estimator.input_shape[0] self.importance_vec = None if self.estimator.clip_values is None: logger.warning( "The `clip_values` attribute of the estimator is `None`, therefore this instance of LowProFool will by " "default generate adversarial perturbations without clipping them." ) self._check_params() if isinstance(self.importance, np.ndarray): self.importance_vec = self.importance if eta_decay < 1 and eta_min > 0: steps_before_min_eta_reached = np.ceil(np.log(eta_min / eta) / np.log(eta_decay)) if steps_before_min_eta_reached / self.n_steps < 0.8: logger.warning( "The given combination of 'n_steps', 'eta', 'eta_decay' and 'eta_min' effectively sets learning " "rate to its minimal value after about %d steps out of all %d.", steps_before_min_eta_reached, self.n_steps, ) def __weighted_lp_norm(self, perturbations: np.ndarray) -> np.ndarray: return self.lambd * np.linalg.norm( self.importance_vec * perturbations, axis=1, ord=(np.inf if self.norm == "inf" else self.norm) ).reshape(-1, 1) def __weighted_lp_norm_gradient(self, perturbations: np.ndarray) -> np.ndarray: norm = self.norm if isinstance(norm, (int, float)) and norm < np.inf and self.importance_vec is not None: numerator = ( self.importance_vec * self.importance_vec * perturbations * np.power(np.abs(perturbations), norm - 2) ) denominator = np.power(np.sum(np.power(self.importance_vec * perturbations, norm)), (norm - 1) / norm) numerator = np.where(denominator > 1e-10, numerator, np.zeros(numerator.shape[1])) denominator = np.where(denominator <= 1e-10, 1.0, denominator) return numerator / denominator numerator = np.array(self.importance_vec * perturbations) optimum = np.max(np.abs(numerator)) return np.where(abs(numerator) == optimum, np.sign(numerator), 0) def __get_gradients(self, samples: np.ndarray, perturbations: np.ndarray, targets: np.ndarray) -> np.ndarray: clf_loss_grad = self.estimator.loss_gradient( (samples + perturbations).astype(np.float32), targets.astype(np.float32) ) norm_grad = self.lambd * self.__weighted_lp_norm_gradient(perturbations) return clf_loss_grad + norm_grad
MIT License
wesabe/fixofx
3rdparty/wsgi_intercept/webunit_intercept/cookie.py
decodeCookies
python
def decodeCookies(url, server, headers, cookies): request_path = urlparse.urlparse(url)[2] if len(request_path) > 1 and request_path[-1] == '/': request_path = request_path[:-1] hdrcookies = Cookie.SimpleCookie("\n".join(map(lambda x: x.strip(), headers.getallmatchingheaders('set-cookie')))) for cookie in hdrcookies.values(): if cookie['domain']: domain = cookie['domain'] if '.' not in domain: raise Error, 'Cookie domain "%s" has no "."'%domain if domain[0] != '.': raise Error, 'Cookie domain "%s" doesn\'t start ' 'with "."'%domain if not server.endswith(domain): raise Error, 'Cookie domain "%s" doesn\'t match ' 'request host "%s"'%(domain, server) if re.search(r'[a-zA-Z]', server): H = server[:-len(domain)] if '.' in H: raise Error, 'Cookie domain "%s" too short ' 'for request host "%s"'%(domain, server) else: domain = server path = cookie['path'] or request_path if not (request_path.startswith(path) or (request_path == '' and cookie['path'] == '/')): raise Error, 'Cookie path "%s" doesn\'t match ' 'request url "%s"'%(path, request_path) bydom = cookies.setdefault(domain, {}) bypath = bydom.setdefault(path, {}) bypath[cookie.key] = cookie
Decode cookies into the supplied cookies dictionary http://www.ietf.org/rfc/rfc2109.txt
https://github.com/wesabe/fixofx/blob/1792d94697af682ca1d4a75cfefe98465d95a288/3rdparty/wsgi_intercept/webunit_intercept/cookie.py#L40-L91
import re, urlparse, Cookie class Error: def __init__(self, message): self.message = str(message) def __str__(self): return 'COOKIE ERROR: %s'%self.message def parse_cookie(text, qparmre=re.compile( r'([\0- ]*([^\0- ;,=\"]+)="([^"]*)\"([\0- ]*[;,])?[\0- ]*)'), parmre=re.compile( r'([\0- ]*([^\0- ;,=\"]+)=([^\0- ;,\"]*)([\0- ]*[;,])?[\0- ]*)')): result = {} l = 0 while 1: if qparmre.match(text[l:]) >= 0: name=qparmre.group(2) value=qparmre.group(3) l=len(qparmre.group(1)) elif parmre.match(text[l:]) >= 0: name=parmre.group(2) value=parmre.group(3) l=len(parmre.group(1)) else: return result if not result.has_key(name): result[name]=value return result
Apache License 2.0
tuw-geo/ecmwf_models
src/ecmwf_models/era5/download.py
default_variables
python
def default_variables(product='era5'): lut = load_var_table(name=product) defaults = lut.loc[lut['default'] == 1]['dl_name'].values return defaults.tolist()
These variables are being downloaded, when None are passed by the user Parameters --------- product : str, optional (default: 'era5') Name of the era5 product to read the default variables for. Either 'era5' or 'era5-land'.
https://github.com/tuw-geo/ecmwf_models/blob/ad0591b14f5a26204289b913294cbcbaa0a8bc72/src/ecmwf_models/era5/download.py#L38-L50
from ecmwf_models.utils import * import argparse import sys import os from datetime import datetime, timedelta, time import shutil import cdsapi import calendar import multiprocessing
MIT License
jnez71/adaptive_control
2link/sim_2link.py
kinem_forward
python
def kinem_forward(q, L): return np.array([ L[0]*np.cos(q[0]) + L[1]*np.cos(q[0]+q[1]), L[0]*np.sin(q[0]) + L[1]*np.sin(q[0]+q[1]), -L[0]*np.sin(q[0])*q[2] - L[1]*np.sin(q[0]+q[1])*(q[2]+q[3]), L[0]*np.cos(q[0])*q[2] + L[1]*np.cos(q[0]+q[1])*(q[2]+q[3]) ])
Returns the state of the end effector (x = [px, py, vx, vy]). Takes the current joint state (q) and link lengths (L).
https://github.com/jnez71/adaptive_control/blob/433337245b8ba6644c427e630e3a3710aea4d67f/2link/sim_2link.py#L138-L149
from __future__ import division import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from control_2link import Controller T = 20 dt = 0.001 framerate = 60 animate_adapt = True outline_path = False q = np.deg2rad([-90, 0, 0, 0]) L = [1, 0.5] m = [5, 3] g = 9.81 d = [0.05, 0.05] b = [0.1, 0.1] c = [2, 2] umax = [250, 30] vibe_mean = [0, 0] vibe_stdv = [0, 0] sensor_mean = [0, 0, 0, 0] sensor_stdv = [0, 0, 0, 0] kp = [100, 100] kd = [100, 100] kg = 'LS' ku = 5*np.ones(5) kf = 0 window = np.inf history_size = 50 adapt0 = [0, 0, 0, 0, 0] path_type = 'random' target = np.deg2rad([70, 45]) vmax = [np.pi, np.pi] amax = [5, 1] controller = Controller(dt, q, target, path_type, kp, kd, kg, ku, kf, umax, vmax, amax, history_size, window, adapt0) def dynamics(q, u): global L, m, g, b, c, umax M = np.zeros((2, 2)) M[0, 0] = (m[0]+m[1])*L[0]**2 + m[1]*L[1]**2 + 2*m[1]*L[0]*L[1]*np.cos(q[1]) M[0, 1] = m[1]*L[1]**2 + m[1]*L[0]*L[1]*np.cos(q[1]) M[1, 0] = M[0, 1] M[1, 1] = m[1]*L[1]**2 V = np.array([ -m[1]*L[0]*L[1]*(2*q[2]*q[3]+q[3]**2)*np.sin(q[1]), m[1]*L[0]*L[1]*q[2]**2*np.sin(q[1]) ]) G = np.array([ g*(m[0]+m[1])*L[0]*np.cos(q[0]) + m[1]*g*L[1]*np.cos(q[0]+q[1]), m[1]*g*L[1]*np.cos(q[0]+q[1]) ]) D = np.array([ d[0]*q[2], d[1]*q[3] ]) F = np.array([ b[0]*np.tanh(c[0]*q[2]), b[1]*np.tanh(c[1]*q[3]) ]) global unstruct_history, i unstruct_history[i, :] = F + D f = vibe_mean + vibe_stdv*np.random.randn(2) for i, mag in enumerate(abs(u)): if mag > umax[i]: u[i] = umax[i] * np.sign(u[i]) return np.concatenate((q[2:], np.linalg.inv(M).dot(u + f - V - G - D - F)))
MIT License
punch-cyber/stoq-plugins-public
s3/s3/s3.py
S3Plugin.get
python
async def get(self, task: ArchiverResponse) -> Payload: if not self.client: self._get_client() meta = PayloadMeta( extra_data={'bucket': task.results['bucket'], 'path': task.results['path']} ) content = self.client.get_object( Bucket=task.results['bucket'], Key=task.results['path'] )['Body'] return Payload(content.read(), meta)
Retrieve archived payload from S3
https://github.com/punch-cyber/stoq-plugins-public/blob/8ba855206da6aeae6cd03fad9162160296a74bd7/s3/s3/s3.py#L71-L84
import boto3 import hashlib from io import BytesIO from typing import Optional, Dict from stoq.helpers import StoqConfigParser from stoq.plugins import ConnectorPlugin, ArchiverPlugin from stoq.data_classes import ( StoqResponse, Payload, ArchiverResponse, Request, PayloadMeta, ) class S3Plugin(ArchiverPlugin, ConnectorPlugin): def __init__(self, config: StoqConfigParser) -> None: super().__init__(config) self.client = None self.access_key = config.get('options', 'access_key', fallback=None) self.secret_key = config.get('options', 'secret_key', fallback=None) self.archive_bucket = config.get('options', 'archive_bucket', fallback=None) self.connector_bucket = config.get('options', 'connector_bucket', fallback=None) self.use_sha = config.getboolean('archiver', 'use_sha', fallback=True) async def save(self, response: StoqResponse) -> None: self._upload(str(response).encode(), response.scan_id, self.connector_bucket) async def archive(self, payload: Payload, request: Request) -> ArchiverResponse: if self.use_sha: filename = hashlib.sha1(payload.content).hexdigest() filename = f'{"/".join(list(filename[:5]))}/{filename}' else: filename = payload.results.payload_id self._upload(payload.content, filename, self.archive_bucket) return ArchiverResponse({'bucket': self.archive_bucket, 'path': filename})
Apache License 2.0
melloddy/sparsechem
sparsechem/utils.py
predict_sparse
python
def predict_sparse(net, loader, dev, progress=True, dropout=False): net.eval() if dropout: net.apply(enable_dropout) class_collector = SparseCollector("yc_ind") regr_collector = SparseCollector("yr_ind") with torch.no_grad(): for b in tqdm(loader, leave=False, disable=(progress == False)): X = torch.sparse_coo_tensor( b["x_ind"], b["x_data"], size = [b["batch_size"], loader.dataset.input_size]).to(dev) yc, yr = net(X) class_collector.append(b, yc) regr_collector.append(b, yr) yc_shape = loader.dataset.y_class.shape yr_shape = loader.dataset.y_regr.shape yc_hat = class_collector.tocsr(shape=yc_shape, sigmoid=True) yr_hat = regr_collector.tocsr(shape=yr_shape, sigmoid=False) return yc_hat, yr_hat
Makes predictions for the Y values in loader. Returns sparse matrix of the shape loader.dataset.y.
https://github.com/melloddy/sparsechem/blob/2569d1bbf769d4bf8b590d4d7bca1804bbf54642/sparsechem/utils.py#L595-L622
import sklearn.metrics from tqdm import tqdm import pandas as pd import numpy as np import torch import scipy.sparse import scipy.io import scipy.special import types import json import warnings import torch.nn.functional as F from sparsechem import censored_mse_loss_numpy from collections import namedtuple from scipy.sparse import csr_matrix class Nothing(object): def __getattr__(self, name): return Nothing() def __call__(self, *args, **kwargs): return Nothing() def __repr__(self): return "Nothing" class Nothing(object): def __getattr__(self, name): return Nothing() def __call__(self, *args, **kwargs): return Nothing() def __repr__(self): return "Nothing" def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def calc_acc_kappa(recall, fpr, num_pos, num_neg): num_all = num_neg + num_pos tp = np.round(recall * num_pos).astype(np.int) fn = num_pos - tp fp = np.round(fpr * num_neg).astype(np.int) tn = num_neg - fp acc = (tp + tn) / num_all pexp = num_pos / num_all * (tp + fp) / num_all + num_neg / num_all * (tn + fn) / num_all kappa = (acc - pexp) / (1 - pexp) return acc, kappa def all_metrics(y_true, y_score): if len(y_true) <= 1 or (y_true[0] == y_true).all(): df = pd.DataFrame({"roc_auc_score": [np.nan], "auc_pr": [np.nan], "avg_prec_score": [np.nan], "f1_max": [np.nan], "p_f1_max": [np.nan], "kappa": [np.nan], "kappa_max": [np.nan], "p_kappa_max": [np.nan], "bceloss": [np.nan]}) return df fpr, tpr, tpr_thresholds = sklearn.metrics.roc_curve(y_true=y_true, y_score=y_score) roc_auc_score = sklearn.metrics.auc(x=fpr, y=tpr) precision, recall, pr_thresholds = sklearn.metrics.precision_recall_curve(y_true = y_true, probas_pred = y_score) bceloss = F.binary_cross_entropy_with_logits( input = torch.FloatTensor(y_score), target = torch.FloatTensor(y_true), reduction="none").mean().item() F1_score = np.zeros(len(precision)) mask = precision > 0 F1_score[mask] = 2 * (precision[mask] * recall[mask]) / (precision[mask] + recall[mask]) f1_max_idx = F1_score.argmax() f1_max = F1_score[f1_max_idx] p_f1_max = scipy.special.expit(pr_thresholds[f1_max_idx]) auc_pr = sklearn.metrics.auc(x = recall, y = precision) avg_prec_score = sklearn.metrics.average_precision_score( y_true = y_true, y_score = y_score) y_classes = np.where(y_score >= 0.0, 1, 0) acc, kappas = calc_acc_kappa(recall=tpr, fpr=fpr, num_pos=(y_true==1).sum(), num_neg=(y_true==0).sum()) kappa_max_idx = kappas.argmax() kappa_max = kappas[kappa_max_idx] p_kappa_max = scipy.special.expit(tpr_thresholds[kappa_max_idx]) kappa = sklearn.metrics.cohen_kappa_score(y_true, y_classes) df = pd.DataFrame({"roc_auc_score": [roc_auc_score], "auc_pr": [auc_pr], "avg_prec_score": [avg_prec_score], "f1_max": [f1_max], "p_f1_max": [p_f1_max], "kappa": [kappa], "kappa_max": [kappa_max], "p_kappa_max": p_kappa_max, "bceloss": bceloss}) return df def compute_corr(x, y): if len(y) <= 1: return np.nan ystd = y.std() xstd = x.std() if ystd == 0 or xstd == 0: return np.nan return np.dot((x - x.mean()), (y - y.mean())) / len(y) / y.std() / x.std() def all_metrics_regr(y_true, y_score, y_censor=None): if len(y_true) <= 1: df = pd.DataFrame({"rmse": [np.nan], "rmse_uncen": [np.nan], "rsquared": [np.nan], "corrcoef": [np.nan]}) return df censor0 = y_censor == 0 if y_censor is not None else slice(None) mse_cen = censored_mse_loss_numpy(target=y_true, input=y_score, censor=y_censor).mean() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) mse = ((y_true[censor0] - y_score[censor0])**2).mean() yvar = y_true[censor0].var() if yvar == 0 or np.isnan(yvar): rsquared = np.nan corr = np.nan else: rsquared = 1 - mse / yvar corr = compute_corr(y_true[censor0], y_score[censor0]) df = pd.DataFrame({ "rmse": [np.sqrt(mse_cen)], "rmse_uncen": [np.sqrt(mse)], "rsquared": [rsquared], "corrcoef": [corr], }) return df def compute_metrics(cols, y_true, y_score, num_tasks): if len(cols) < 1: return pd.DataFrame({ "roc_auc_score": np.nan, "auc_pr": np.nan, "avg_prec_score": np.nan, "f1_max": np.nan, "p_f1_max": np.nan, "kappa": np.nan, "kappa_max": np.nan, "p_kappa_max": np.nan, "bceloss": np.nan}, index=np.arange(num_tasks)) df = pd.DataFrame({"task": cols, "y_true": y_true, "y_score": y_score}) metrics = df.groupby("task", sort=True).apply(lambda g: all_metrics( y_true = g.y_true.values, y_score = g.y_score.values)) metrics.reset_index(level=-1, drop=True, inplace=True) return metrics.reindex(np.arange(num_tasks)) def compute_metrics_regr(cols, y_true, y_score, num_tasks, y_censor=None): if len(cols) < 1: return pd.DataFrame({ "rmse": np.nan, "rmse_uncen": np.nan, "rsquared": np.nan, "corrcoef": np.nan, }, index=np.arange(num_tasks)) df = pd.DataFrame({ "task": cols, "y_true": y_true, "y_score": y_score, "y_censor": y_censor, }) metrics = df.groupby("task", sort=True).apply(lambda g: all_metrics_regr( y_true = g.y_true.values, y_score = g.y_score.values, y_censor = g.y_censor.values if y_censor is not None else None)) metrics.reset_index(level=-1, drop=True, inplace=True) return metrics.reindex(np.arange(num_tasks)) def class_fold_counts(y_class, folding): folds = np.unique(folding) num_pos = [] num_neg = [] for fold in folds: yf = y_class[folding == fold] num_pos.append( np.array((yf == +1).sum(0)).flatten() ) num_neg.append( np.array((yf == -1).sum(0)).flatten() ) return np.row_stack(num_pos), np.row_stack(num_neg) def print_metrics(epoch, train_time, metrics_tr, metrics_va, header): if metrics_tr is None: if header: print("Epoch\tlogl_va | auc_va | aucpr_va | maxf1_va | tr_time") output_fstr = ( f"{epoch}.\t{metrics_va['logloss']:.5f}" f" | {metrics_va['roc_auc_score']:.5f}" f" | {metrics_va['auc_pr']:.5f}" f" | {metrics_va['f1_max']:.5f}" f" | {train_time:6.1f}" ) print(output_fstr) return if header: print("Epoch\tlogl_tr logl_va | auc_tr auc_va | aucpr_tr aucpr_va | maxf1_tr maxf1_va | tr_time") output_fstr = ( f"{epoch}.\t{metrics_tr['logloss']:.5f} {metrics_va['logloss']:.5f}" f" | {metrics_tr['roc_auc_score']:.5f} {metrics_va['roc_auc_score']:.5f}" f" | {metrics_tr['auc_pr']:.5f} {metrics_va['auc_pr']:.5f}" f" | {metrics_tr['f1_max']:.5f} {metrics_va['f1_max']:.5f}" f" | {train_time:6.1f}" ) print(output_fstr) def print_table(formats, data): for key, fmt in formats.items(): print(fmt.format(data[key]), end="") Column = namedtuple("Column", "key size dec title") columns_cr = [ Column("epoch", size=6, dec= 0, title="Epoch"), Column(None, size=1, dec=-1, title="|"), Column("logloss", size=8, dec= 5, title="logl"), Column("bceloss", size=8, dec= 5, title="bceloss"), Column("roc_auc_score", size=8, dec= 5, title="aucroc"), Column("auc_pr", size=8, dec= 5, title="aucpr"), Column("f1_max", size=8, dec= 5, title="f1_max"), Column(None, size=1, dec=-1, title="|"), Column("rmse", size=9, dec= 5, title="rmse"), Column("rsquared", size=9, dec= 5, title="rsquared"), Column("corrcoef", size=9, dec= 5, title="corrcoef"), Column(None, size=1, dec=-1, title="|"), Column("train_time", size=6, dec= 1, title="tr_time"), ] def print_cell(value, size, dec, left, end=" "): align = "<" if left else ">" if type(value) == str: print(("{:" + align + str(size) + "}").format(value), end=end) else: print(("{:" + align + str(size) + "." + str(dec) + "f}").format(value), end=end) def print_metrics_cr(epoch, train_time, results_tr, results_va, header): data = pd.concat([results_va["classification_agg"], results_va["regression_agg"]]) data["train_time"] = train_time data["epoch"] = epoch if header: for i, col in enumerate(columns_cr): print_cell(col.title, col.size, dec=0, left=(i==0)) print() for i, col in enumerate(columns_cr): print_cell(data.get(col.key, col.title), col.size, dec=col.dec, left=(i==0)) print() def evaluate_binary(net, loader, loss, dev, progress=True): net.eval() logloss_sum = 0.0 logloss_count = 0 y_ind_list = [] y_true_list = [] y_hat_list = [] num_tasks = loader.dataset.y.shape[1] with torch.no_grad(): for b in tqdm(loader, leave=False, disable=(progress == False)): X = torch.sparse_coo_tensor( b["x_ind"], b["x_data"], size = [b["batch_size"], loader.dataset.input_size]).to(dev) y_ind = b["y_ind"].to(dev) y_data = b["y_data"].to(dev) y_hat_all = net(X) y_hat = y_hat_all[y_ind[0], y_ind[1]] output = loss(y_hat, y_data).sum() logloss_sum += output logloss_count += y_data.shape[0] y_ind_list.append(b["y_ind"]) y_true_list.append(b["y_data"]) y_hat_list.append(y_hat.cpu()) if len(y_ind_list) == 0: return { "metrics": compute_metrics([], y_true=[], y_score=[], num_tasks=num_tasks), "logloss": np.nan, } y_ind = torch.cat(y_ind_list, dim=1).numpy() y_true = torch.cat(y_true_list, dim=0).numpy() y_hat = torch.cat(y_hat_list, dim=0).numpy() metrics = compute_metrics(y_ind[1], y_true=y_true, y_score=y_hat, num_tasks=num_tasks) return { 'metrics': metrics, 'logloss': logloss_sum.cpu().numpy() / logloss_count } def train_binary(net, optimizer, loader, loss, dev, task_weights, normalize_loss=None, num_int_batches=1, progress=True): net.train() logloss_sum = 0.0 logloss_count = 0 int_count = 0 for b in tqdm(loader, leave=False, disable=(progress == False)): if int_count == 0: optimizer.zero_grad() X = torch.sparse_coo_tensor( b["x_ind"], b["x_data"], size = [b["batch_size"], loader.dataset.input_size]).to(dev) y_ind = b["y_ind"].to(dev) y_w = task_weights[y_ind[1]] y_data = b["y_data"].to(dev) yhat_all = net(X) yhat = yhat_all[y_ind[0], y_ind[1]] norm = normalize_loss if norm is None: norm = b["batch_size"] * num_int_batches output = (loss(yhat, y_data) * y_w).sum() output_n = output / norm output_n.backward() int_count += 1 if int_count == num_int_batches: optimizer.step() int_count = 0 logloss_sum += output.detach() / y_data.shape[0] logloss_count += 1 if int_count > 0: optimizer.step() return logloss_sum / logloss_count def batch_forward(net, b, input_size, loss_class, loss_regr, weights_class, weights_regr, censored_weight=[], dev="cpu"): X = torch.sparse_coo_tensor( b["x_ind"], b["x_data"], size = [b["batch_size"], input_size]).to(dev, non_blocking=True) yc_hat_all, yr_hat_all = net(X) out = {} out["yc_hat_all"] = yc_hat_all out["yr_hat_all"] = yr_hat_all out["yc_loss"] = 0 out["yr_loss"] = 0 out["yc_weights"] = 0 out["yr_weights"] = 0 if net.class_output_size > 0: yc_ind = b["yc_ind"].to(dev, non_blocking=True) yc_w = weights_class[yc_ind[1]] yc_data = b["yc_data"].to(dev, non_blocking=True) yc_hat = yc_hat_all[yc_ind[0], yc_ind[1]] out["yc_ind"] = yc_ind out["yc_data"] = yc_data out["yc_hat"] = yc_hat out["yc_loss"] = (loss_class(yc_hat, yc_data) * yc_w).sum() out["yc_weights"] = yc_w.sum() if net.regr_output_size > 0: yr_ind = b["yr_ind"].to(dev, non_blocking=True) yr_w = weights_regr[yr_ind[1]] yr_data = b["yr_data"].to(dev, non_blocking=True) yr_hat = yr_hat_all[yr_ind[0], yr_ind[1]] out["ycen_data"] = b["ycen_data"] if out["ycen_data"] is not None: out["ycen_data"] = out["ycen_data"].to(dev, non_blocking=True) if len(censored_weight) > 0: yrcen_w = yr_w * censored_weight[yr_ind[1]] yr_w = torch.where(out["ycen_data"] == 0, yr_w, yrcen_w) out["yr_ind"] = yr_ind out["yr_data"] = yr_data out["yr_hat"] = yr_hat out["yr_loss"] = (loss_regr(input=yr_hat, target=yr_data, censor=out["ycen_data"]) * yr_w).sum() out["yr_weights"] = yr_w.sum() return out def train_class_regr(net, optimizer, loader, loss_class, loss_regr, dev, weights_class, weights_regr, censored_weight, normalize_loss=None, num_int_batches=1, progress=True): net.train() int_count = 0 for b in tqdm(loader, leave=False, disable=(progress == False)): if int_count == 0: optimizer.zero_grad() norm = normalize_loss if norm is None: norm = b["batch_size"] * num_int_batches fwd = batch_forward(net, b=b, input_size=loader.dataset.input_size, loss_class=loss_class, loss_regr=loss_regr, weights_class=weights_class, weights_regr=weights_regr, censored_weight=censored_weight, dev=dev) loss = fwd["yc_loss"] + fwd["yr_loss"] loss_norm = loss / norm loss_norm.backward() int_count += 1 if int_count == num_int_batches: optimizer.step() int_count = 0 if int_count > 0: optimizer.step() def aggregate_results(df, weights): wsum = weights.sum() if wsum == 0: return pd.Series(np.nan, index=df.columns) df2 = df.where(pd.isnull, 1) * weights[:,None] return (df2.multiply(1.0 / df2.sum(axis=0), axis=1) * df).sum(axis=0) def evaluate_class_regr(net, loader, loss_class, loss_regr, tasks_class, tasks_regr, dev, progress=True): class_w = tasks_class.aggregation_weight regr_w = tasks_regr.aggregation_weight net.eval() loss_class_sum = 0.0 loss_regr_sum = 0.0 loss_class_weights = 0.0 loss_regr_weights = 0.0 data = { "yc_ind": [], "yc_data": [], "yc_hat": [], "yr_ind": [], "yr_data": [], "yr_hat": [], "ycen_data": [], } num_class_tasks = loader.dataset.class_output_size num_regr_tasks = loader.dataset.regr_output_size with torch.no_grad(): for b in tqdm(loader, leave=False, disable=(progress == False)): fwd = batch_forward(net, b=b, input_size=loader.dataset.input_size, loss_class=loss_class, loss_regr=loss_regr, weights_class=tasks_class.training_weight, weights_regr=tasks_regr.training_weight, dev=dev) loss_class_sum += fwd["yc_loss"] loss_regr_sum += fwd["yr_loss"] loss_class_weights += fwd["yc_weights"] loss_regr_weights += fwd["yr_weights"] for key in data.keys(): if (key in fwd) and (fwd[key] is not None): data[key].append(fwd[key].cpu()) out = {} if len(data["yc_ind"]) == 0: out["classification"] = compute_metrics([], y_true=[], y_score=[], num_tasks=num_class_tasks) out["classification_agg"] = out["classification"].reindex(labels=[]).mean(0) out["classification_agg"]["logloss"] = np.nan else: yc_ind = torch.cat(data["yc_ind"], dim=1).numpy() yc_data = torch.cat(data["yc_data"], dim=0).numpy() yc_hat = torch.cat(data["yc_hat"], dim=0).numpy() out["classification"] = compute_metrics(yc_ind[1], y_true=yc_data, y_score=yc_hat, num_tasks=num_class_tasks) out["classification_agg"] = aggregate_results(out["classification"], weights=class_w) out["classification_agg"]["logloss"] = loss_class_sum.cpu().item() / loss_class_weights.cpu().item() if len(data["yr_ind"]) == 0: out["regression"] = compute_metrics_regr([], y_true=[], y_score=[], num_tasks=num_regr_tasks) out["regression_agg"] = out["regression"].reindex(labels=[]).mean(0) out["regression_agg"]["mseloss"] = np.nan else: yr_ind = torch.cat(data["yr_ind"], dim=1).numpy() yr_data = torch.cat(data["yr_data"], dim=0).numpy() yr_hat = torch.cat(data["yr_hat"], dim=0).numpy() if len(data["ycen_data"]) > 0: ycen_data = torch.cat(data["ycen_data"], dim=0).numpy() else: ycen_data = None out["regression"] = compute_metrics_regr(yr_ind[1], y_true=yr_data, y_score=yr_hat, y_censor=ycen_data, num_tasks=num_regr_tasks) out["regression"]["aggregation_weight"] = regr_w out["regression_agg"] = aggregate_results(out["regression"], weights=regr_w) out["regression_agg"]["mseloss"] = loss_regr_sum.cpu().item() / loss_regr_weights.cpu().item() out["classification_agg"]["num_tasks_total"] = loader.dataset.class_output_size out["classification_agg"]["num_tasks_agg"] = (tasks_class.aggregation_weight > 0).sum() out["regression_agg"]["num_tasks_total"] = loader.dataset.regr_output_size out["regression_agg"]["num_tasks_agg"] = (tasks_regr.aggregation_weight > 0).sum() return out def enable_dropout(m): if type(m) == torch.nn.Dropout: m.train() def predict(net, loader, dev, progress=True, dropout=False): net.eval() if dropout: net.apply(enable_dropout) y_class_list = [] y_regr_list = [] with torch.no_grad(): for b in tqdm(loader, leave=False, disable=(progress == False)): X = torch.sparse_coo_tensor( b["x_ind"], b["x_data"], size = [b["batch_size"], loader.dataset.input_size]).to(dev) y_class, y_regr = net(X) y_class_list.append(torch.sigmoid(y_class).cpu()) y_regr_list.append(y_regr.cpu()) y_class = torch.cat(y_class_list, dim=0) y_regr = torch.cat(y_regr_list, dim=0) return y_class.numpy(), y_regr.numpy() def predict_hidden(net, loader, dev, progress=True, dropout=False): net.eval() if dropout: net.apply(enable_dropout) out_list = [] with torch.no_grad(): for b in tqdm(loader, leave=False, disable=(progress == False)): X = torch.sparse_coo_tensor( b["x_ind"], b["x_data"], size = [b["batch_size"], loader.dataset.input_size]).to(dev) out_list.append( net(X, last_hidden=True).cpu() ) return torch.cat(out_list, dim=0) class SparseCollector(object): def __init__(self, label): self.y_hat_list = [] self.y_row_list = [] self.y_col_list = [] self.label = label self.row_count = 0 def append(self, batch, y_all): dev = y_all.device if self.label not in batch: return y_ind = batch[self.label].to(dev) y_hat = y_all[y_ind[0], y_ind[1]] self.y_hat_list.append(y_hat.cpu()) self.y_row_list.append(batch[self.label][0] + self.row_count) self.y_col_list.append(batch[self.label][1]) self.row_count += batch["batch_size"] def tocsr(self, shape, sigmoid): if len(self.y_row_list) == 0: return csr_matrix(shape, dtype=np.float32) y_hat = torch.cat(self.y_hat_list, dim=0) if sigmoid: y_hat = torch.sigmoid(y_hat) y_row = torch.cat(self.y_row_list, dim=0).numpy() y_col = torch.cat(self.y_col_list, dim=0).numpy() return csr_matrix((y_hat.numpy(), (y_row, y_col)), shape=shape)
MIT License
ucsbarchlab/pyrtl
pyrtl/compilesim.py
CompiledSimulation.inspect_mem
python
def inspect_mem(self, mem): return DllMemInspector(self, mem)
Get a view into the contents of a MemBlock.
https://github.com/ucsbarchlab/pyrtl/blob/8b42f566a3c2c23de21f1b534900232219a3b313/pyrtl/compilesim.py#L127-L129
from __future__ import print_function, unicode_literals import ctypes import subprocess import tempfile import shutil import collections from os import path import platform import sys import _ctypes from .core import working_block from .wire import Input, Output, Const, WireVector, Register from .memory import MemBlock, RomBlock from .pyrtlexceptions import PyrtlError, PyrtlInternalError from .simulation import SimulationTrace, _trace_sort_key __all__ = ['CompiledSimulation'] class DllMemInspector(collections.Mapping): def __init__(self, sim, mem): self._aw = mem.addrwidth self._limbs = sim._limbs(mem) self._vn = vn = sim.varname[mem] self._mem = ctypes.c_void_p.in_dll(sim._dll, vn) self._sim = sim def __getitem__(self, ind): arr = self._sim._mem_lookup(self._mem, ind) val = 0 for n in reversed(range(self._limbs)): val <<= 64 val |= arr[n] return val def __iter__(self): return iter(range(len(self))) def __len__(self): return 1 << self._aw def __eq__(self, other): if isinstance(other, DllMemInspector): if self._sim is other._sim and self._vn == other._vn: return True return all(self[x] == other.get(x, 0) for x in self) class CompiledSimulation(object): def __init__( self, tracer=True, register_value_map={}, memory_value_map={}, default_value=0, block=None): self._dll = self._dir = None self.block = working_block(block) self.block.sanity_check() if tracer is True: tracer = SimulationTrace() self.tracer = tracer self._remove_untraceable() self.default_value = default_value self._regmap = {} self._memmap = memory_value_map self._uid_counter = 0 self.varname = {} for r in self.block.wirevector_subset(Register): rval = register_value_map.get(r, r.reset_value) if rval is None: rval = self.default_value self._regmap[r] = rval self.tracer._set_initial_values(default_value, self._regmap, self._memmap) self._create_dll() self._initialize_mems()
BSD 3-Clause New or Revised License
romeodespres/reapy
reapy/core/project/region.py
Region.remove_rendered_track
python
def remove_rendered_track(self, track): RPR.SetRegionRenderMatrix(self.project_id, self.index, track.id, -1)
Remove track from region render matrix for this region. Parameters ---------- track : Track Track to remove. See also -------- Region.add_rendered_tracks Region.remove_rendered_track Region.remove_rendered_tracks Efficiently remove several tracks from render matrix.
https://github.com/romeodespres/reapy/blob/730627cee6f39fc26d6ebc8a3df0112e5921cd9f/reapy/core/project/region.py#L106-L122
import reapy from reapy import reascript_api as RPR from reapy.core import ReapyObject class Region(ReapyObject): _class_name = "Region" def __init__( self, parent_project=None, index=None, parent_project_id=None ): if parent_project_id is None: message = ( "One of `parent_project` or `parent_project_id` must be " "specified." ) assert parent_project is not None, message parent_project_id = parent_project.id self.project_id = parent_project_id self.index = index @reapy.inside_reaper() def _get_enum_index(self): return next( i for i, r in enumerate(reapy.Project(self.project_id).regions) if r.index == self.index ) @property def _kwargs(self): return { "index": self.index, "parent_project_id": self.project_id } def add_rendered_track(self, track): RPR.SetRegionRenderMatrix(self.project_id, self.index, track.id, 1) @reapy.inside_reaper() def add_rendered_tracks(self, tracks): for track in tracks: self.add_rendered_track(track) @reapy.inside_reaper() @property def end(self): index = self._get_enum_index() args = self.project_id, index, 0, 0, 0, 0, 0 return RPR.EnumProjectMarkers2(*args)[5] @end.setter def end(self, end): args = self.project_id, self.index, True, self.start, end, "" RPR.SetProjectMarker2(*args) def delete(self): RPR.DeleteProjectMarker(self.project_id, self.index, True)
MIT License
cupy/cupy
cupy/array_api/_creation_functions.py
eye
python
def eye( n_rows: int, n_cols: Optional[int] = None, /, *, k: int = 0, dtype: Optional[Dtype] = None, device: Optional[Device] = None, ) -> Array: from ._array_object import Array _check_valid_dtype(dtype) if device is not None and not isinstance(device, _Device): raise ValueError(f"Unsupported device {device!r}") if device is None: device = _Device() with device: return Array._new(np.eye(n_rows, M=n_cols, k=k, dtype=dtype))
Array API compatible wrapper for :py:func:`np.eye <numpy.eye>`. See its docstring for more information.
https://github.com/cupy/cupy/blob/a466b03ef0afd7c1ce1615e3f48da64ae38c1320/cupy/array_api/_creation_functions.py#L147-L169
from __future__ import annotations from typing import TYPE_CHECKING, List, Optional, Tuple, Union if TYPE_CHECKING: from ._typing import ( Array, Device, Dtype, NestedSequence, SupportsDLPack, SupportsBufferProtocol, ) from collections.abc import Sequence from ._dtypes import _all_dtypes import cupy as np from cupy.cuda import Device as _Device def _check_valid_dtype(dtype): for d in (None,) + _all_dtypes: if dtype is d: return raise ValueError("dtype must be one of the supported dtypes") def asarray( obj: Union[ Array, bool, int, float, NestedSequence[bool | int | float], SupportsDLPack, SupportsBufferProtocol, ], /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, copy: Optional[bool] = None, ) -> Array: from ._array_object import Array _check_valid_dtype(dtype) if device is not None and not isinstance(device, _Device): raise ValueError(f"Unsupported device {device!r}") if device is None: device = _Device() if copy is False: raise NotImplementedError("copy=False is not yet implemented") if isinstance(obj, Array) and (dtype is None or obj.dtype == dtype): if copy is True: return Array._new(np.array(obj._array, copy=True, dtype=dtype)) return obj if dtype is None and isinstance(obj, int) and (obj > 2 ** 64 or obj < -(2 ** 63)): raise OverflowError("Integer out of bounds for array dtypes") with device: res = np.asarray(obj, dtype=dtype) return Array._new(res) def arange( start: Union[int, float], /, stop: Optional[Union[int, float]] = None, step: Union[int, float] = 1, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, ) -> Array: from ._array_object import Array _check_valid_dtype(dtype) if device is not None and not isinstance(device, _Device): raise ValueError(f"Unsupported device {device!r}") if device is None: device = _Device() with device: return Array._new(np.arange(start, stop=stop, step=step, dtype=dtype)) def empty( shape: Union[int, Tuple[int, ...]], *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, ) -> Array: from ._array_object import Array _check_valid_dtype(dtype) if device is not None and not isinstance(device, _Device): raise ValueError(f"Unsupported device {device!r}") if device is None: device = _Device() with device: return Array._new(np.empty(shape, dtype=dtype)) def empty_like( x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None ) -> Array: from ._array_object import Array _check_valid_dtype(dtype) if device is not None and not isinstance(device, _Device): raise ValueError(f"Unsupported device {device!r}") if device is None: device = _Device() with device: return Array._new(np.empty_like(x._array, dtype=dtype))
MIT License
google/macops
can_haz_image/can_haz_image.py
CanHazImage.WriteCatalog
python
def WriteCatalog(self): try: f = open(self.new_catalogfile, 'w') print 'Writing %s to disk...' % self.new_catalogfile for line in self.newcatalog: if line: f.write('%s\n' % line) f.close() except IOError, e: print ('Writing new catalog failed! Check file permissions in your ' 'build directory!') print 'Error: %s' % e raise SystemExit
Writes the catalog file to disk.
https://github.com/google/macops/blob/8442745359c0c941cd4e4e7d243e43bd16b40dec/can_haz_image/can_haz_image.py#L128-L141
import datetime import hashlib from optparse import OptionParser import os import re import shutil import subprocess import sys import urllib2 BUILD = 'build/can_haz_image' TMPDIR = '/tmp/pkgs' TMPINDEX = os.path.join(TMPDIR, 'tmp_index.html') FREE_SPACE = 30000 def RunProcess(cmd, stream_out=False): if stream_out: task = subprocess.Popen(cmd) else: task = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = task.communicate() return stdout, stderr, task.returncode class CanHazImage(object): def __init__(self, location, webserver, pkgsource): self.cwd = os.getcwd() self.missing = [] self.pkgsource = pkgsource self.webserver = webserver self.catalog = '' self.thirdparty = '' self.changes = False self.thirdparty_location = location self.image_creation_time = datetime.datetime.now().strftime('%Y%m%d%H%M') self.os_version = RunProcess(['sw_vers'])[0].split('\t')[2][:4] self.installer_choices = '%s_InstallerChoices.xml' % self.os_version self.newimagepath = '' def CreateCatalogNames(self, catalog_name): self.new_catalogfile = '%s%s_new.catalog' % (catalog_name, self.os_version) self.old_catalogfile = '%s%s_old.catalog' % (catalog_name, self.os_version) def NewCatalog(self, catalog_name): self.CreateCatalogNames(catalog_name) print ('Generating new %s catalog - this may take a while. (up to 10 mins)' % catalog_name) self.newcatalog = [] if catalog_name == 'base': pkgsrc = os.path.join(self.pkgsource, self.os_version, 'base') elif catalog_name == 'thirdparty': if self.thirdparty_location == '': pkgsrc = os.path.join(self.pkgsource, self.os_version) self.thirdparty_location = pkgsrc else: pkgsrc = self.thirdparty_location else: print 'Unknown catalog name: %s' % catalog_name raise SystemExit linegen = LineGenerator(pkgsrc) print 'Generating catalog for %s' % pkgsrc print 'Generating catalog checksums and lines...' linegen.GenAllLines() for line in linegen.catalog_lines: last_pkg = line.split()[1] old_pkg = '' old_line = '' pkgname = '' for line in linegen.catalog_lines: pkg = line.split()[1] try: date = re.findall('-([0-9]+).dmg', pkg) if date: if len(date[0]) == 8 or len(date[0]) == 12: pkgname = os.path.basename(line.split(date[0])[0]) if old_pkg != pkgname: self.newcatalog.append(old_line) if pkg == last_pkg: self.newcatalog.append(line) old_pkg = pkgname except AttributeError: pass if catalog_name == 'base' and pkg != last_pkg: self.newcatalog.append(line) old_line = line self.RenameCatalog(catalog_name) self.WriteCatalog() def RenameCatalog(self, catalog_name): new_catalog = '%s%s_new.catalog' % (catalog_name, self.os_version) old_catalog = '%s%s_old.catalog' % (catalog_name, self.os_version) if os.path.exists(new_catalog): try: os.rename(new_catalog, old_catalog) print '%s renamed' % new_catalog except OSError, e: print ('Could not rename catalog! Check file permissions in your ' 'build directory!') print 'Error: %s' % e raise SystemExit
Apache License 2.0
aristoteleo/dynamo-release
dynamo/prediction/perturbation.py
rank_perturbation_cell_clusters
python
def rank_perturbation_cell_clusters(adata, pkey="j_delta_x_perturbation", prefix_store="rank", **kwargs): rdict = rank_cell_groups(adata, pkey, **kwargs) rdict_abs = rank_cell_groups(adata, pkey, abs=True, **kwargs) adata.uns[prefix_store + "_" + pkey + "_cell_groups"] = rdict adata.uns[prefix_store + "_abs_" + pkey + "_cells_groups"] = rdict_abs return adata
Rank cells based on their raw and absolute perturbation for each cell group. Parameters ---------- adata: :class:`~anndata.AnnData` AnnData object that contains the gene-wise velocities. pkey: str (default: 'perturbation_vector') The perturbation key. prefix_store: str (default: 'rank') The prefix added to the key for storing the returned in adata. kwargs: Keyword arguments passed to `vf.rank_cells`. Returns ------- adata: :class:`~anndata.AnnData` AnnData object which has the rank dictionary for perturbation effects in `.uns`.
https://github.com/aristoteleo/dynamo-release/blob/4e201df59e4d69ac642bc9de5a853cd038da891f/dynamo/prediction/perturbation.py#L443-L465
import numpy as np from scipy.sparse import csr_matrix import anndata from typing import Union, Callable from ..tools.cell_velocities import cell_velocities from .utils import ( expr_to_pca, pca_to_expr, z_score, z_score_inv, ) from ..vectorfield.vector_calculus import ( rank_genes, rank_cells, rank_cell_groups, vecfld_from_adata, jacobian, vector_transformation, ) from ..vectorfield.scVectorField import vector_field_function_knockout from ..vectorfield import SvcVectorField from ..dynamo_logger import LoggerManager def KO( adata: anndata.AnnData, KO_genes: Union[str, list], vecfld: Union[None, Callable] = None, vf_key: str = "VecFld", basis: str = "pca", emb_basis: str = "umap", velocity_ko_wt_difference: bool = False, add_ko_basis_key: Union[str, None] = None, add_embedding_key: Union[str, None] = None, store_vf_ko: bool = False, add_vf_ko_key: Union[str, None] = None, return_vector_field_class: bool = True, ): logger = LoggerManager.gen_logger("dynamo-KO") if basis != "pca": logger.error("Currently we can only perturb (KO) PCA space based vector field function.") raise ValueError() if vecfld is None: vf = SvcVectorField() vf.from_adata(adata, basis=basis, vf_key=vf_key) else: vf = vecfld logger.info(f"In silico knockout {KO_genes}") KO_genes = [KO_genes] if type(KO_genes) is str else KO_genes vf_ko = vector_field_function_knockout(adata, vf, KO_genes) if add_ko_basis_key is None: x_basis_key, v_basis_key = "X_" + basis + "_KO", "velocity_" + basis + "_KO" else: if not add_ko_basis_key.startswith("velocity_"): raise ValueError(f"add_ko_basis_key {add_ko_basis_key} must starts with `velocity_`") x_basis_key, v_basis_key = "X_" + add_ko_basis_key.split("velocity_")[1], add_ko_basis_key if add_embedding_key is None: x_emb_key, v_emb_key = "X_" + emb_basis + "_KO", "velocity_" + emb_basis + "_KO" else: if not add_embedding_key.startswith("velocity_"): raise ValueError(f"add_embedding_key {add_embedding_key} must starts with `velocity_`") x_emb_key, v_emb_key = "X_" + add_embedding_key.split("velocity_")[1], add_embedding_key logger.info_insert_adata(x_basis_key, "obsm") adata.obsm[x_basis_key] = adata.obsm["X_" + basis].copy() logger.info_insert_adata(v_basis_key, "obsm") adata.obsm[v_basis_key] = vf_ko.get_V() logger.info_insert_adata(x_emb_key, "obsm") adata.obsm[x_emb_key] = adata.obsm["X_" + emb_basis].copy() logger.info(f"Project the high dimensional vector field after KO to {emb_basis}.") cell_velocities( adata, X=adata.obsm["X_" + basis], V=adata.obsm["velocity_" + basis + "_KO"], basis=emb_basis + "_KO", enforce=True, add_velocity_key=v_emb_key, ) if velocity_ko_wt_difference: adata.obsm[v_emb_key] -= adata.obsm["velocity_" + emb_basis] if store_vf_ko: if add_vf_ko_key is None: add_vf_ko_key = "vf_KO" logger.info_insert_adata(add_vf_ko_key, "uns") adata.uns[add_vf_ko_key] = vf_ko if return_vector_field_class: return vf_ko def perturbation( adata: anndata.AnnData, genes: Union[str, list], expression: Union[float, list] = 10, perturb_mode: str = "raw", cells: Union[list, np.ndarray, None] = None, zero_perturb_genes_vel: bool = False, pca_key: Union[str, np.ndarray, None] = None, PCs_key: Union[str, np.ndarray, None] = None, pca_mean_key: Union[str, np.ndarray, None] = None, basis: str = "pca", emb_basis: str = "umap", jac_key: str = "jacobian_pca", X_pca: Union[np.ndarray, None] = None, delta_Y: Union[np.ndarray, None] = None, projection_method: str = "fp", pertubation_method: str = "j_delta_x", J_jv_delta_t: float = 1, delta_t: float = 1, add_delta_Y_key: str = None, add_transition_key: str = None, add_velocity_key: str = None, add_embedding_key: str = None, ): if pertubation_method.lower() not in ["j_delta_x", "j_x_prime", "j_jv", "f_x_prime", "f_x_prime_minus_f_x_0"]: raise ValueError( f"your method is set to be {pertubation_method.lower()} but must be one of `j_delta_x`, `j_x_prime`, " "`j_jv`,`f_x_prime`, `f_x_prime_minus_f_x_0`" ) logger = LoggerManager.get_main_logger() logger.info( "In silico perturbation of single-cells and prediction of cell fate after perturbation...", ) if type(genes) == str: genes = [genes] if type(expression) in [int, float]: expression = [expression] pca_genes = adata.var_names[adata.var.use_for_pca] valid_genes = pca_genes.intersection(genes) if len(valid_genes) == 0: raise ValueError("genes to perturb must be pca genes (genes used to perform the pca dimension reduction).") if len(expression) > 1: if len(expression) != len(valid_genes): raise ValueError( "if you want to set different values for different genes, you need to ensure those genes " "are included in the pca gene list and the length of those genes is the same as that of the" "expression." ) if X_pca is None: logger.info("Retrive X_pca, PCs, pca_mean...") pca_key = "X_pca" if pca_key is None else pca_key PCs_key = "PCs" if PCs_key is None else PCs_key pca_mean_key = "pca_mean" if pca_mean_key is None else pca_mean_key X_pca = adata.obsm[pca_key] if delta_Y is None: logger.info("Calculate perturbation effect matrix via \\delta Y = J \\dot \\delta X....") if type(PCs_key) == np.ndarray: PCs = PCs_key else: PCs = adata.uns[PCs_key] if type(pca_mean_key) == np.ndarray: means = pca_mean_key else: means = adata.uns[pca_mean_key] X = pca_to_expr(X_pca, PCs, means) gene_loc = [adata.var_names[adata.var.use_for_pca].get_loc(i) for i in valid_genes] X_perturb = X.copy() if cells is None: cells = np.arange(adata.n_obs) for i, gene in enumerate(gene_loc): if perturb_mode == "z_score": x = X_perturb[:, gene] _, m, s = z_score(x, 0) X_perturb[cells, gene] = z_score_inv(expression[i], m, s) elif perturb_mode == "raw": X_perturb[cells, gene] = expression[i] else: raise NotImplementedError(f"The perturbation mode {perturb_mode} is not supported.") X_perturb_pca = expr_to_pca(X_perturb, PCs, means) if jac_key not in adata.uns_keys(): jacobian(adata, regulators=valid_genes, effectors=valid_genes) Js = adata.uns[jac_key]["jacobian"] delta_Y = np.zeros_like(X_pca) if pertubation_method.lower() in ["j_delta_x", "j_x_prime", "j_jv"]: if pertubation_method.lower() == "j_delta_x": delta_X = X_perturb_pca - X_pca elif pertubation_method.lower() == "j_x_prime": delta_X = X_perturb_pca elif pertubation_method.lower() == "j_jv": tmp = X_perturb_pca - X_pca delta_X = np.zeros_like(X_pca) for i in np.arange(adata.n_obs): delta_X[i, :] = Js[:, :, i].dot(tmp[i] * J_jv_delta_t) for i in np.arange(adata.n_obs): delta_Y[i, :] = Js[:, :, i].dot(delta_X[i] * delta_t) if add_delta_Y_key is None: add_delta_Y_key = pertubation_method + "_perturbation" logger.info_insert_adata(add_delta_Y_key, "obsm", indent_level=1) if pertubation_method.lower() == "f_x_prime": _, func = vecfld_from_adata(adata, basis) vec_mat = func(X_perturb_pca) delta_Y = vec_mat elif pertubation_method.lower() == "f_x_prime_minus_f_x_0": _, func = vecfld_from_adata(adata, basis) vec_mat = func(X_perturb_pca) - func(X_pca) delta_Y = vec_mat adata.obsm[add_delta_Y_key] = delta_Y perturbation_csc = vector_transformation(delta_Y, PCs) adata.layers[add_delta_Y_key] = csr_matrix(adata.shape, dtype=np.float64) adata.layers[add_delta_Y_key][:, adata.var.use_for_pca] = perturbation_csc if zero_perturb_genes_vel: adata.layers[add_delta_Y_key][:, gene_loc] = 0 logger.info( "project the pca perturbation vector to low dimensional space....", ) if add_transition_key is None: transition_key = "perturbation_transition_matrix" else: transition_key = add_transition_key if add_velocity_key is None: velocity_key, embedding_key = "velocity_" + emb_basis + "_perturbation", "X_" + emb_basis + "_perturbation" else: velocity_key, embedding_key = add_velocity_key, add_embedding_key cell_velocities( adata, X=X_pca, V=delta_Y, basis=emb_basis, enforce=True, method=projection_method, add_transition_key=transition_key, add_velocity_key=velocity_key, ) logger.info_insert_adata("X_" + emb_basis + "_perturbation", "obsm", indent_level=1) logger.info( f"you can use dyn.pl.streamline_plot(adata, basis='{emb_basis}_perturbation') to visualize the " f"perturbation vector" ) adata.obsm[embedding_key] = adata.obsm["X_" + emb_basis].copy() def rank_perturbation_genes(adata, pkey="j_delta_x_perturbation", prefix_store="rank", **kwargs): rdict = rank_genes(adata, pkey, **kwargs) rdict_abs = rank_genes(adata, pkey, abs=True, **kwargs) adata.uns[prefix_store + "_" + pkey] = rdict adata.uns[prefix_store + "_abs_" + pkey] = rdict_abs return adata def rank_perturbation_cells(adata, pkey="j_delta_x_perturbation", prefix_store="rank", **kwargs): rdict = rank_cells(adata, pkey, **kwargs) rdict_abs = rank_cells(adata, pkey, abs=True, **kwargs) adata.uns[prefix_store + "_" + pkey + "_cells"] = rdict adata.uns[prefix_store + "_abs_" + pkey + "_cells"] = rdict_abs return adata
BSD 3-Clause New or Revised License
ashwinrj/federated-learning-pytorch
src/sampling.py
mnist_iid
python
def mnist_iid(dataset, num_users): num_items = int(len(dataset)/num_users) dict_users, all_idxs = {}, [i for i in range(len(dataset))] for i in range(num_users): dict_users[i] = set(np.random.choice(all_idxs, num_items, replace=False)) all_idxs = list(set(all_idxs) - dict_users[i]) return dict_users
Sample I.I.D. client data from MNIST dataset :param dataset: :param num_users: :return: dict of image index
https://github.com/ashwinrj/federated-learning-pytorch/blob/26eaec40fa8beb56777feb89756f6401c28c4736/src/sampling.py#L10-L23
import numpy as np from torchvision import datasets, transforms
MIT License
cornell-zhang/dnn-quant-ocs
distiller/quantization/range_linear.py
RangeLinearQuantWrapper.post_quantized_forward
python
def post_quantized_forward(self, accumulator): raise NotImplementedError
Calculate re-quantization scale factor (for converting the intermediate integer accumulator to output range), and output scale factor. :param accumulator: Tensor with accumulator values :return: Tuple of (re-quantization scale factor, output scale factor)
https://github.com/cornell-zhang/dnn-quant-ocs/blob/ca3a413c73850b4e5d7aac558f5856b44060e39c/distiller/quantization/range_linear.py#L79-L87
import torch.nn as nn from .quantizer import Quantizer from .q_utils import * class RangeLinearQuantWrapper(nn.Module): def __init__(self, wrapped_module, num_bits_acts, num_bits_accum=32): super(RangeLinearQuantWrapper, self).__init__() self.wrapped_module = wrapped_module self.num_bits_acts = num_bits_acts self.acts_min_q_val, self.acts_max_q_val = get_quantized_range(num_bits_acts, signed=True) self.accum_min_q_val, self.accum_max_q_val = get_quantized_range(num_bits_accum, signed=True) def forward(self, *inputs): in_scales = self.pre_quantized_forward(*inputs) inputs_q = [] for idx, input in enumerate(inputs): input_q = linear_quantize_clamp(input.data, in_scales[idx], self.acts_min_q_val, self.acts_max_q_val, inplace=False) inputs_q.append(torch.autograd.Variable(input_q)) accum = self.wrapped_module.forward(*inputs_q) clamp(accum.data, self.accum_min_q_val, self.accum_max_q_val, inplace=True) requant_scale, out_scale = self.post_quantized_forward(accum) out_q = linear_quantize_clamp(accum.data, requant_scale, self.acts_min_q_val, self.acts_max_q_val, inplace=True) out_f = linear_dequantize(out_q, out_scale, inplace=True) return torch.autograd.Variable(out_f) def pre_quantized_forward(self, *inputs): raise NotImplementedError
Apache License 2.0
seetaresearch/dragon
tensorlayer/core/layers/core.py
Layer._get_weights
python
def _get_weights( self, name=None, shape=None, init=initializers.glorot_uniform(), trainable=True, ): name = name if name else 'weights' shape = shape if shape is not None else [] weight = init(shape=shape, trainable=trainable) weight._name = context.get_name_scope() + name if trainable is True: if self._trainable_weights is None: self._trainable_weights = [] self._trainable_weights.append(weight) else: if self._nontrainable_weights is None: self._nontrainable_weights = [] self._nontrainable_weights.append(weight) return weight
Add a new weight into the layer.
https://github.com/seetaresearch/dragon/blob/3dfb6ea55d90d2fb2da9b1b471f5e1e7d7667810/tensorlayer/core/layers/core.py#L200-L220
from __future__ import absolute_import from __future__ import division from __future__ import print_function from dragon.core.framework import context from dragon.core.framework import workspace from dragon.core.util import nest from dragon.core.util import string from dragon.vm.tensorlayer.core import initializers class LayerMetaclass(object): def __init__(self, name=None): self._name = name self._all_weights = None self._trainable_weights = None self._nontrainable_weights = None self._nodes_fixed = False self._training = True @property def name(self): if self._name is None: self._init_set_name() return self._name @property def nontrainable_weights(self): return self._nontrainable_weights @property def training(self): return self._training @training.setter def training(self, value): self._training = value @property def trainable_weights(self): return self._trainable_weights def forward(self, inputs, **kwargs): pass def _init_set_name(self, name=None, zero_based=True): if name is None: self._name = workspace.get_workspace().unique_name( name=self.__class__.__name__.lower(), namespace='Object', zero_based=zero_based, ) else: self._name = name def _fix_nodes(self): self._nodes_fixed = True class Layer(LayerMetaclass): def __init__(self, name=None, act=None, *args, **kwargs): super(Layer, self).__init__(name=name) self._built = False self._nodes = [] self.act = act @staticmethod def _compute_shape(tensors): if isinstance(tensors, list): shape_mem = [t.shape for t in tensors] else: shape_mem = tensors.shape return shape_mem @property def all_weights(self): if self._all_weights is None: self._all_weights = [] if self._trainable_weights is not None: self._all_weights.extend(self._trainable_weights) if self._nontrainable_weights is not None: self._all_weights.extend(self._nontrainable_weights) return self._all_weights def build(self, inputs_shape): self._built = True def _add_node(self, inputs, outputs): inputs = nest.flatten(inputs) outputs = nest.flatten(outputs) input_info = [getattr(e, '_info', [None, None]) for e in inputs] self._nodes.append( LayerNode( self, node_index=len(self._nodes), in_nodes=[e[0] for e in input_info], in_tensor_idxes=[e[1] for e in input_info], in_tensors=inputs, out_tensors=outputs, ) ) for idx, tensor in enumerate(outputs): tensor._info = (self._nodes[-1], idx) def _release_memory(self): for node in self._nodes: node.in_tensors = None node.out_tensors = None
BSD 2-Clause Simplified License
csparpa/pyowm
pyowm/utils/timestamps.py
yesterday
python
def yesterday(hour=None, minute=None): now = datetime.now(timezone.utc) if hour is None: hour = now.hour if minute is None: minute = now.minute yesterday_date = now.date() + timedelta(days=-1) return datetime(yesterday_date.year, yesterday_date.month, yesterday_date.day, hour, minute, 0, 0, timezone.utc)
Gives the ``datetime.datetime`` object corresponding to yesterday. The default value for optional parameters is the current value of hour and minute. I.e: when called without specifying values for parameters, the resulting object will refer to the time = now - 24 hours; when called with only hour specified, the resulting object will refer to yesterday at the specified hour and at the current minute. :param hour: the hour for yesterday, in the format *0-23* (defaults to ``None``) :type hour: int :param minute: the minute for yesterday, in the format *0-59* (defaults to ``None``) :type minute: int :returns: a ``datetime.datetime`` object :raises: *ValueError* when hour or minute have bad values
https://github.com/csparpa/pyowm/blob/0474b61cc67fa3c95f9e572b96d3248031828fce/pyowm/utils/timestamps.py#L105-L130
from datetime import datetime, timedelta, timezone from pyowm.utils import formatting def now(timeformat='date'): return formatting.timeformat(datetime.now(timezone.utc), timeformat) def next_hour(date=None): return _timedelta_hours(1, date) def last_hour(date=None): return _timedelta_hours(-1, date) def next_three_hours(date=None): return _timedelta_hours(3, date) def last_three_hours(date=None): return _timedelta_hours(-3, date) def tomorrow(hour=None, minute=None): now = datetime.now(timezone.utc) if hour is None: hour = now.hour if minute is None: minute = now.minute tomorrow_date = now.date() + timedelta(days=1) return datetime(tomorrow_date.year, tomorrow_date.month, tomorrow_date.day, hour, minute, 0, 0, timezone.utc)
MIT License
plaid/plaid-python
plaid/model/income_breakdown.py
IncomeBreakdown.__init__
python
def __init__(self, type, rate, hours, total, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.type = type self.rate = rate self.hours = hours self.total = total for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None: continue setattr(self, var_name, var_value)
IncomeBreakdown - a model defined in OpenAPI Args: type (str, none_type): The type of income. Possible values include: `\"regular\"`: regular income `\"overtime\"`: overtime income `\"bonus\"`: bonus income rate (float, none_type): The hourly rate at which the income is paid. hours (float, none_type): The number of hours logged for this income for this pay period. total (float, none_type): The total pay for this pay period. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,)
https://github.com/plaid/plaid-python/blob/950d04d621a5f5b92a7705cc30d14d4004db8543/plaid/model/income_breakdown.py#L116-L192
import re import sys from plaid.model_utils import ( ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) class IncomeBreakdown(ModelNormal): allowed_values = { ('type',): { 'None': None, 'BONUS': "bonus", 'OVERTIME': "overtime", 'REGULAR': "regular", 'NULL': "null", }, } validations = { } @cached_property def additional_properties_type(): return (bool, date, datetime, dict, float, int, list, str, none_type,) _nullable = False @cached_property def openapi_types(): return { 'type': (str, none_type,), 'rate': (float, none_type,), 'hours': (float, none_type,), 'total': (float, none_type,), } @cached_property def discriminator(): return None attribute_map = { 'type': 'type', 'rate': 'rate', 'hours': 'hours', 'total': 'total', } _composed_schemas = {} required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args
MIT License
catalyst-team/catalyst
catalyst/contrib/nn/criterion/lovasz.py
_lovasz_softmax
python
def _lovasz_softmax(probabilities, targets, classes="present", per_image=False, ignore=None): if per_image: loss = mean( _lovasz_softmax_flat( *_flatten_probabilities(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes ) for prob, lab in zip(probabilities, targets) ) else: loss = _lovasz_softmax_flat( *_flatten_probabilities(probabilities, targets, ignore), classes=classes ) return loss
The multiclass Lovasz-Softmax loss. Args: probabilities: [B, C, H, W] class probabilities at each prediction (between 0 and 1). Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. targets: [B, H, W] ground truth targets (between 0 and C - 1) classes: "all" for all, "present" for classes present in targets, or a list of classes to average. per_image: compute the loss per image instead of per batch ignore: void class targets
https://github.com/catalyst-team/catalyst/blob/a6fc305eaddc499c17584824794fa8d006072842/catalyst/contrib/nn/criterion/lovasz.py#L173-L200
from itertools import filterfalse as ifilterfalse import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss def isnan(x): return x != x def mean(values, ignore_nan=False, empty=0): values = iter(values) if ignore_nan: values = ifilterfalse(isnan, values) try: n = 1 acc = next(values) except StopIteration: if empty == "raise": raise ValueError("Empty mean") return empty for n, v in enumerate(values, 2): acc += v if n == 1: return acc return acc / n def _lovasz_grad(gt_sorted): p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts - gt_sorted.float().cumsum(0) union = gts + (1 - gt_sorted).float().cumsum(0) jaccard = 1.0 - intersection / union if p > 1: jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] return jaccard def _flatten_binary_scores(logits, targets, ignore=None): logits = logits.reshape(-1) targets = targets.reshape(-1) if ignore is None: return logits, targets valid = targets != ignore logits_ = logits[valid] targets_ = targets[valid] return logits_, targets_ def _lovasz_hinge_flat(logits, targets): if len(targets) == 0: return logits.sum() * 0.0 signs = 2.0 * targets.float() - 1.0 errors = 1.0 - logits * signs errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = targets[perm] grad = _lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), grad) return loss def _lovasz_hinge(logits, targets, per_image=True, ignore=None): if per_image: loss = mean( _lovasz_hinge_flat( *_flatten_binary_scores(logit.unsqueeze(0), target.unsqueeze(0), ignore) ) for logit, target in zip(logits, targets) ) else: loss = _lovasz_hinge_flat(*_flatten_binary_scores(logits, targets, ignore)) return loss def _flatten_probabilities(probabilities, targets, ignore=None): if probabilities.dim() == 3: B, H, W = probabilities.size() probabilities = probabilities.view(B, 1, H, W) B, C, H, W = probabilities.size() probabilities = probabilities.permute(0, 2, 3, 1).contiguous().view(-1, C) targets = targets.view(-1) if ignore is None: return probabilities, targets valid = targets != ignore probabilities_ = probabilities[valid.nonzero().squeeze()] targets_ = targets[valid] return probabilities_, targets_ def _lovasz_softmax_flat(probabilities, targets, classes="present"): if probabilities.numel() == 0: return probabilities * 0.0 C = probabilities.size(1) losses = [] class_to_sum = list(range(C)) if classes in ["all", "present"] else classes for c in class_to_sum: fg = (targets == c).float() if classes == "present" and fg.sum() == 0: continue if C == 1: if len(class_to_sum) > 1: raise ValueError("Sigmoid output possible only with 1 class") class_pred = probabilities[:, 0] else: class_pred = probabilities[:, c] errors = (fg - class_pred).abs() errors_sorted, perm = torch.sort(errors, 0, descending=True) perm = perm.data fg_sorted = fg[perm] losses.append(torch.dot(errors_sorted, _lovasz_grad(fg_sorted))) return mean(losses)
Apache License 2.0
klieret/ankipandas
ankipandas/raw.py
close_db
python
def close_db(db: sqlite3.Connection) -> None: db.close()
Close the database. Args: db: Database (:class:`sqlite3.Connection`) Returns: None
https://github.com/klieret/ankipandas/blob/acf0fdb6e57dc19cf39894662d235b3684cf7d37/ankipandas/raw.py#L56-L65
from collections import defaultdict import sqlite3 import json import pathlib from functools import lru_cache from typing import Dict, List, Union import pandas as pd import numpy as np from ankipandas.util.log import log from ankipandas._columns import tables_ours2anki, anki_columns from ankipandas.util.misc import nested_dict, defaultdict2dict CACHE_SIZE = 32 def load_db(path: Union[str, pathlib.PurePath]) -> sqlite3.Connection: path = pathlib.Path(path) if not path.is_file(): raise FileNotFoundError(f"Not a file/file not found: {path}") return sqlite3.connect(str(path.resolve()))
MIT License
bepasty/bepasty-server
src/bepasty/utils/permissions.py
lookup_permissions
python
def lookup_permissions(token): return current_app.config['PERMISSIONS'].get(token)
look up the permissions string for the secret <token> in the configuration. if no such secret is configured, return None
https://github.com/bepasty/bepasty-server/blob/8f4c23f6ff819bdd2e05ab443eb9b998fec259ce/src/bepasty/utils/permissions.py#L27-L32
from flask import request, session, current_app from flask import g as flaskg ADMIN = 'admin' LIST = 'list' CREATE = 'create' MODIFY = 'modify' READ = 'read' DELETE = 'delete' PERMISSIONS = 'permissions' LOGGEDIN = 'loggedin' permission_icons = { 'admin': 'user', 'list': 'list', 'create': 'plus', 'modify': 'edit', 'read': 'book', 'delete': 'trash' }
BSD 2-Clause Simplified License
nasa/crisismappingtoolkit
bin/detect_flood_modis.py
evaluation_function
python
def evaluation_function(pair, alg): (precision, recall, evalRes, noTruth) = pair print '%s: (%4g, %4g, %4g)' % (get_algorithm_name(alg), precision, recall, noTruth)
Pretty print an algorithm and its statistics
https://github.com/nasa/crisismappingtoolkit/blob/0296487974d74cec6aa8be42eafbb5cd24dc6a51/bin/detect_flood_modis.py#L67-L70
import logging logging.basicConfig(level=logging.ERROR) try: import cmt.ee_authenticate except: import sys import os.path sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) import cmt.ee_authenticate import matplotlib import sys import os import ee import functools import threading import cmt.domain from cmt.modis.flood_algorithms import * from cmt.mapclient_qt import centerMap, addToMap import cmt.util.evaluation import cmt.util.gui_util ALGORITHMS = [DIFFERENCE, EVI, XIAO, ADABOOST]
Apache License 2.0
rouge8/20questions
web/session.py
Store.encode
python
def encode(self, session_dict): pickled = pickle.dumps(session_dict) return base64.encodestring(pickled)
encodes session dict as a string
https://github.com/rouge8/20questions/blob/8845184114109c7e34eecd69c2689d6ef6fc3084/web/session.py#L165-L168
import os, time, datetime, random, base64 try: import cPickle as pickle except ImportError: import pickle try: import hashlib sha1 = hashlib.sha1 except ImportError: import sha sha1 = sha.new import utils import webapi as web __all__ = [ 'Session', 'SessionExpired', 'Store', 'DiskStore', 'DBStore', ] web.config.session_parameters = utils.storage({ 'cookie_name': 'webpy_session_id', 'cookie_domain': None, 'timeout': 86400, 'ignore_expiry': True, 'ignore_change_ip': True, 'secret_key': 'fLjUfxqXtfNoIldA0A0J', 'expired_message': 'Session expired', }) class SessionExpired(web.HTTPError): def __init__(self, message): web.HTTPError.__init__(self, '200 OK', {}, data=message) class Session(utils.ThreadedDict): def __init__(self, app, store, initializer=None): self.__dict__['store'] = store self.__dict__['_initializer'] = initializer self.__dict__['_last_cleanup_time'] = 0 self.__dict__['_config'] = utils.storage(web.config.session_parameters) if app: app.add_processor(self._processor) def _processor(self, handler): self._cleanup() self._load() try: return handler() finally: self._save() def _load(self): cookie_name = self._config.cookie_name cookie_domain = self._config.cookie_domain self.session_id = web.cookies().get(cookie_name) if self.session_id and not self._valid_session_id(self.session_id): self.session_id = None self._check_expiry() if self.session_id: d = self.store[self.session_id] self.update(d) self._validate_ip() if not self.session_id: self.session_id = self._generate_session_id() if self._initializer: if isinstance(self._initializer, dict): self.update(self._initializer) elif hasattr(self._initializer, '__call__'): self._initializer() self.ip = web.ctx.ip def _check_expiry(self): if self.session_id and self.session_id not in self.store: if self._config.ignore_expiry: self.session_id = None else: return self.expired() def _validate_ip(self): if self.session_id and self.get('ip', None) != web.ctx.ip: if not self._config.ignore_change_ip: return self.expired() def _save(self): cookie_name = self._config.cookie_name cookie_domain = self._config.cookie_domain if not self.get('_killed'): web.setcookie(cookie_name, self.session_id, domain=cookie_domain) self.store[self.session_id] = dict(self) else: web.setcookie(cookie_name, self.session_id, expires=-1, domain=cookie_domain) def _generate_session_id(self): while True: rand = os.urandom(16) now = time.time() secret_key = self._config.secret_key session_id = sha1("%s%s%s%s" %(rand, now, utils.safestr(web.ctx.ip), secret_key)) session_id = session_id.hexdigest() if session_id not in self.store: break return session_id def _valid_session_id(self, session_id): rx = utils.re_compile('^[0-9a-fA-F]+$') return rx.match(session_id) def _cleanup(self): current_time = time.time() timeout = self._config.timeout if current_time - self._last_cleanup_time > timeout: self.store.cleanup(timeout) self.__dict__['_last_cleanup_time'] = current_time def expired(self): self._killed = True self._save() raise SessionExpired(self._config.expired_message) def kill(self): del self.store[self.session_id] self._killed = True class Store: def __contains__(self, key): raise NotImplementedError def __getitem__(self, key): raise NotImplementedError def __setitem__(self, key, value): raise NotImplementedError def cleanup(self, timeout): raise NotImplementedError
MIT License
pelioniot/mbed-cloud-sdk-python
src/mbed_cloud/_backends/billing/models/subtenant_account_report.py
SubtenantAccountReport.service_package
python
def service_package(self, service_package): self._service_package = service_package
Sets the service_package of this SubtenantAccountReport. Report service package for subtenant account. :param service_package: The service_package of this SubtenantAccountReport. :type: SubtenantServicePackageReport
https://github.com/pelioniot/mbed-cloud-sdk-python/blob/71dc67fc2a8d1aff31e35ec781fb328e6a60639c/src/mbed_cloud/_backends/billing/models/subtenant_account_report.py#L117-L126
from pprint import pformat from six import iteritems import re class SubtenantAccountReport(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'account': 'SubtenantReportAccountContactInfo', 'billing_data': 'ReportBillingData', 'service_package': 'SubtenantServicePackageReport' } attribute_map = { 'account': 'account', 'billing_data': 'billing_data', 'service_package': 'service_package' } def __init__(self, account=None, billing_data=None, service_package=None): self._account = account self._billing_data = billing_data self._service_package = service_package self.discriminator = None @property def account(self): return self._account @account.setter def account(self, account): if account is None: raise ValueError("Invalid value for `account`, must not be `None`") self._account = account @property def billing_data(self): return self._billing_data @billing_data.setter def billing_data(self, billing_data): if billing_data is None: raise ValueError("Invalid value for `billing_data`, must not be `None`") self._billing_data = billing_data @property def service_package(self): return self._service_package @service_package.setter
Apache License 2.0
svanoort/pyresttest
pyresttest/tests.py
Test.realize
python
def realize(self, context=None): if not self.is_dynamic() or context is None: return self else: selfcopy = self.ninja_copy() selfcopy.templates = None if isinstance(self._body, ContentHandler): selfcopy._body = self._body.get_content(context) selfcopy._url = self.get_url(context=context) selfcopy._headers = self.get_headers(context=context) return selfcopy
Return a fully-templated test object, for configuring curl Warning: this is a SHALLOW copy, mutation of fields will cause problems! Can accept a None context
https://github.com/svanoort/pyresttest/blob/f92acf8e838c4623ddd8e12e880f31046ff9317f/pyresttest/tests.py#L241-L254
import string import os import copy import json import pycurl import sys from . import contenthandling from .contenthandling import ContentHandler from . import validators from . import parsing from .parsing import * try: from cStringIO import StringIO as MyIO except: try: from StringIO import StringIO as MyIO except ImportError: from io import BytesIO as MyIO PYTHON_MAJOR_VERSION = sys.version_info[0] if PYTHON_MAJOR_VERSION > 2: import urllib.parse as urlparse from past.builtins import basestring else: import urlparse from . import six from .six import binary_type from .six import text_type from .six import iteritems from .six.moves import filter as ifilter BASECURL = pycurl.Curl() DEFAULT_TIMEOUT = 10 HTTP_METHODS = {u'GET': pycurl.HTTPGET, u'PUT': pycurl.UPLOAD, u'PATCH': pycurl.POSTFIELDS, u'POST': pycurl.POST, u'DELETE': 'DELETE'} def coerce_to_string(val): if isinstance(val, text_type): return val elif isinstance(val, int): return text_type(val) elif isinstance(val, binary_type): return val.decode('utf-8') else: raise TypeError("Input {0} is not a string or integer, and it needs to be!".format(val)) def coerce_string_to_ascii(val): if isinstance(val, text_type): return val.encode('ascii') elif isinstance(val, binary_type): return val else: raise TypeError("Input {0} is not a string, string expected".format(val)) def coerce_http_method(val): myval = val if not isinstance(myval, basestring) or len(val) == 0: raise TypeError("Invalid HTTP method name: input {0} is not a string or has 0 length".format(val)) if isinstance(myval, binary_type): myval = myval.decode('utf-8') return myval.upper() def coerce_list_of_ints(val): if isinstance(val, list): return [int(x) for x in val] else: return [int(val)] class Test(object): _url = None expected_status = [200] _body = None _headers = dict() method = u'GET' group = u'Default' name = u'Unnamed' validators = None stop_on_failure = False failures = None auth_username = None auth_password = None auth_type = pycurl.HTTPAUTH_BASIC delay = 0 curl_options = None templates = None variable_binds = None generator_binds = None extract_binds = None @staticmethod def has_contains(): return 'contains' in validators.VALIDATORS def ninja_copy(self): output = Test() myvars = vars(self) output.__dict__ = myvars.copy() return output def set_template(self, variable_name, template_string): if self.templates is None: self.templates = dict() self.templates[variable_name] = string.Template(template_string) def del_template(self, variable_name): if self.templates is not None and variable_name in self.templates: del self.templates[variable_name] def realize_template(self, variable_name, context): val = None if context is None or self.templates is None or variable_name not in self.templates: return None return self.templates[variable_name].safe_substitute(context.get_values()) def set_body(self, value): self._body = value def get_body(self, context=None): if self._body is None: return None elif isinstance(self._body, basestring): return self._body else: return self._body.get_content(context=context) body = property(get_body, set_body, None, 'Request body, if any (for POST/PUT methods)') NAME_URL = 'url' def set_url(self, value, isTemplate=False): if isTemplate: self.set_template(self.NAME_URL, value) else: self.del_template(self.NAME_URL) self._url = value def get_url(self, context=None): val = self.realize_template(self.NAME_URL, context) if val is None: val = self._url return val url = property(get_url, set_url, None, 'URL fragment for request') NAME_HEADERS = 'headers' def set_headers(self, value, isTemplate=False): if isTemplate: self.set_template(self.NAME_HEADERS, 'Dict_Templated') else: self.del_template(self.NAME_HEADERS) self._headers = value def get_headers(self, context=None): if not context or not self.templates or self.NAME_HEADERS not in self.templates: return self._headers vals = context.get_values() def template_tuple(tuple_input): return (string.Template(str(tuple_item)).safe_substitute(vals) for tuple_item in tuple_input) return dict(map(template_tuple, self._headers.items())) headers = property(get_headers, set_headers, None, 'Headers dictionary for request') def update_context_before(self, context): if self.variable_binds: context.bind_variables(self.variable_binds) if self.generator_binds: for key, value in self.generator_binds.items(): context.bind_generator_next(key, value) def update_context_after(self, response_body, headers, context): if self.extract_binds: for key, value in self.extract_binds.items(): result = value.extract( body=response_body, headers=headers, context=context) context.bind_variable(key, result) def is_context_modifier(self): return self.variable_binds or self.generator_binds or self.extract_binds def is_dynamic(self): if self.templates: return True elif isinstance(self._body, ContentHandler) and self._body.is_dynamic(): return True return False
Apache License 2.0
talwalkarlab/paleo
paleo/profiler.py
Profiler.profile
python
def profile(self, device_name, options, executor=None): device_spec = device.DEVICES[device_name] logger.info('Profiling for device %s' % device_spec.name) results = [] for layer_spec in self.graph.topology_order: layer = layer_spec.layer_op if executor == 'tensorflow': options.use_cudnn_heuristics = False flops_profiler = profilers.FlopsProfiler(options, device_spec) flop_based_time = flops_profiler.profile(layer) logger.info('Layer: %s' % layer_spec.name) logger.info('- %s: %s %s' % (flops_profiler.name, flop_based_time, flops_profiler.message)) if device_spec.is_gpu: profiler = None if executor == 'cudnn': from profilers.cudnn_profiler import CudnnProfiler profiler = CudnnProfiler(options) elif executor == 'tensorflow': from profilers.tensorflow_profiler import ( TensorFlowProfiler) profiler = TensorFlowProfiler(options) if profiler: executor_time = profiler.profile(layer) logger.info('- %s: %s %s' % (profiler.name, executor_time, profiler.message)) results.append( (layer_spec.name, flop_based_time.total_time, executor_time.total_time, 0, flops_profiler.message, profiler.message)) return results
Profile the network with the given device spec. Returns: A dictionary contains the following keys: (layers, flops, executor, executor_std, flops_message, executor_msg)
https://github.com/talwalkarlab/paleo/blob/984bbf5d2942f28b8599db4374c0ad788efe0f6e/paleo/profiler.py#L64-L113
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import os import click import numpy as np from paleo import __version__ from paleo.graph import OperationGraph from paleo import device from paleo import profilers from paleo import simulation from paleo.utils import save_layer from paleo import comm FORMAT = "%(levelname)s %(pathname)s:%(lineno)d] %(message)s" logging.basicConfig(format=FORMAT) logger = logging.getLogger("paleo") logger.setLevel(logging.INFO) class Profiler(): def __init__(self, filename, separator='\t'): self._filename = filename self.graph = OperationGraph(filename) logger.debug('Net spec loaded from %s.' % filename) logger.debug('Dependencies: %s' % str(self.graph.nested_list)) self._separator = separator def print_static_summary(self): print('A summary of static characteristics of network.') print(' LAYER\tOUTPUTS') num_params = 0 weights_in_bytes = 0 num_activations = 0 for layer_spec in self.graph.topology_order: layer = layer_spec.layer_op print(' %s' % layer) num_params += layer.num_params weights_in_bytes += layer.weights_in_bytes num_activations += np.prod(layer.outputs) print('Number of params: {:,} ({:,} Bytes)'.format(num_params, weights_in_bytes)) print('Activation: {:,} Bytes'.format(num_activations * 4)) def save_conv_layers(self, save_dir): for layer_spec in self.graph.topology_order: if layer_spec['type'] != 'Convolution': continue layer = layer_spec.layer_op outfilename = os.path.join(save_dir, "%s.json" % layer_spec.name) save_layer.save_conv_layer(outfilename, layer)
Apache License 2.0
morsecorp/snappiershot
snappiershot/compare.py
ObjectComparison.equal
python
def equal(self) -> bool: return bool(self)
Returns True if no differences were detected.
https://github.com/morsecorp/snappiershot/blob/acb6a8d01d4496abe0f2fe83c7e7af9cf77aac8e/snappiershot/compare.py#L32-L34
from math import isclose, isnan from operator import itemgetter from typing import Any, Callable, Dict, Iterable, List, Sequence, Set, Tuple from .config import Config class ObjectComparison: def __init__(self, value: Any, expected: Any, config: Config, exact: bool = False): self.value = value self.expected = expected self.config = config self.exact = exact self.differences = _Differences() self._compare(self.value, self.expected, operations=[]) def __bool__(self) -> bool: return not bool(self.differences.items) @property
Apache License 2.0
scille/umongo
umongo/builder.py
BaseBuilder.build_from_template
python
def build_from_template(self, template): base_tmpl_cls = _get_base_template_cls(template) base_impl_cls = TEMPLATE_IMPLEMENTATION_MAPPING[base_tmpl_cls] is_child = _is_child(template, base_tmpl_cls) name = template.__name__ bases = self._convert_bases(template.__bases__) nmspc, schema_fields, schema_non_fields = _collect_schema_attrs(template) opts = self._build_document_opts(template, bases, is_child) nmspc['opts'] = opts schema_bases = tuple( base.Schema for base in bases if issubclass(base, Implementation) and hasattr(base, 'Schema') ) if not schema_bases: schema_bases = (BaseSchema, ) if base_tmpl_cls is DocumentTemplate: nmspc['pk_field'] = _on_need_add_id_field(schema_bases, schema_fields) if base_tmpl_cls is not MixinDocumentTemplate: if is_child: schema_fields['cls'] = fields.StringField( attribute='_cls', default=name, dump_only=True ) schema_cls = self._build_schema(template, schema_bases, schema_fields, schema_non_fields) nmspc['Schema'] = schema_cls schema = schema_cls() nmspc['schema'] = schema if base_tmpl_cls is not MixinDocumentTemplate: nmspc['DataProxy'] = data_proxy_factory(name, schema, strict=opts.strict) nmspc['_fields'] = set(schema.fields.keys()) implementation = type(name, bases, nmspc) self._templates_lookup[template] = implementation if base_tmpl_cls is not MixinDocumentTemplate: for base in bases: for parent in base.mro(): if issubclass(parent, base_impl_cls) and parent is not base_impl_cls: parent.opts.offspring.add(implementation) return implementation
Generate a :class:`umongo.document.DocumentImplementation` for this instance from the given :class:`umongo.document.DocumentTemplate`.
https://github.com/scille/umongo/blob/28168d67f1c0a5030b65da1717859bd7c3a0bba0/umongo/builder.py#L216-L264
import re from copy import copy import marshmallow as ma from .abstract import BaseSchema from .template import Template, Implementation from .data_proxy import data_proxy_factory from .document import DocumentTemplate, DocumentOpts, DocumentImplementation from .embedded_document import ( EmbeddedDocumentTemplate, EmbeddedDocumentOpts, EmbeddedDocumentImplementation) from .mixin import MixinDocumentTemplate, MixinDocumentOpts, MixinDocumentImplementation from .exceptions import DocumentDefinitionError, NotRegisteredDocumentError from . import fields TEMPLATE_IMPLEMENTATION_MAPPING = { DocumentTemplate: DocumentImplementation, EmbeddedDocumentTemplate: EmbeddedDocumentImplementation, MixinDocumentTemplate: MixinDocumentImplementation, } TEMPLATE_OPTIONS_MAPPING = { DocumentTemplate: DocumentOpts, EmbeddedDocumentTemplate: EmbeddedDocumentOpts, MixinDocumentTemplate: MixinDocumentOpts, } def _get_base_template_cls(template): if issubclass(template, DocumentTemplate): return DocumentTemplate if issubclass(template, EmbeddedDocumentTemplate): return EmbeddedDocumentTemplate if issubclass(template, MixinDocumentTemplate): return MixinDocumentTemplate assert False def camel_to_snake(name): tmp_str = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', tmp_str).lower() def _is_child(template, base_tmpl_cls): return any( b for b in template.__bases__ if issubclass(b, base_tmpl_cls) and b is not base_tmpl_cls and ('Meta' not in b.__dict__ or not getattr(b.Meta, 'abstract', False)) ) def _on_need_add_id_field(bases, fields_dict): def find_id_field(fields_dict): for name, field in fields_dict.items(): if not isinstance(field, ma.fields.Field): continue if (name == '_id' and not field.attribute) or field.attribute == '_id': return name return None for base in bases: schema = base() name = find_id_field(schema.fields) if name is not None: return name name = find_id_field(fields_dict) if name is not None: return name fields_dict['id'] = fields.ObjectIdField(attribute='_id', dump_only=True) return 'id' def _collect_schema_attrs(template): schema_fields = {} schema_non_fields = {} nmspc = {} for key, item in template.__dict__.items(): if hasattr(item, '__marshmallow_hook__'): schema_non_fields[key] = item elif isinstance(item, ma.fields.Field): schema_fields[key] = copy(item) else: nmspc[key] = item return nmspc, schema_fields, schema_non_fields class BaseBuilder: BASE_DOCUMENT_CLS = None def __init__(self, instance): assert self.BASE_DOCUMENT_CLS self.instance = instance self._templates_lookup = { DocumentTemplate: self.BASE_DOCUMENT_CLS, EmbeddedDocumentTemplate: EmbeddedDocumentImplementation, MixinDocumentTemplate: MixinDocumentImplementation, } def _convert_bases(self, bases): converted_bases = [] for base in bases: assert not issubclass(base, Implementation), 'Document cannot inherit of implementations' if issubclass(base, Template): if base not in self._templates_lookup: raise NotRegisteredDocumentError('Unknown document `%r`' % base) converted_bases.append(self._templates_lookup[base]) else: converted_bases.append(base) return tuple(converted_bases) def _patch_field(self, field): field.instance = self.instance if isinstance(field, fields.ListField): self._patch_field(field.inner) elif isinstance(field, fields.DictField): if field.key_field: self._patch_field(field.key_field) if field.value_field: self._patch_field(field.value_field) def _build_schema(self, template, schema_bases, schema_fields, schema_non_fields): for field in schema_fields.values(): self._patch_field(field) schema_nmspc = {} schema_nmspc.update(schema_fields) schema_nmspc.update(schema_non_fields) schema_nmspc['MA_BASE_SCHEMA_CLS'] = template.MA_BASE_SCHEMA_CLS return type('%sSchema' % template.__name__, schema_bases, schema_nmspc) def _build_document_opts(self, template, bases, is_child): base_tmpl_cls = _get_base_template_cls(template) base_impl_cls = TEMPLATE_IMPLEMENTATION_MAPPING[base_tmpl_cls] base_opts_cls = TEMPLATE_OPTIONS_MAPPING[base_tmpl_cls] kwargs = {} kwargs['instance'] = self.instance kwargs['template'] = template if base_tmpl_cls in (DocumentTemplate, EmbeddedDocumentTemplate): meta = template.__dict__.get('Meta') kwargs['abstract'] = getattr(meta, 'abstract', False) kwargs['is_child'] = is_child kwargs['strict'] = getattr(meta, 'strict', True) if base_tmpl_cls is DocumentTemplate: collection_name = getattr(meta, 'collection_name', None) for base in bases: if not issubclass(base, base_impl_cls): continue popts = base.opts if kwargs['abstract'] and not popts.abstract: raise DocumentDefinitionError( "Abstract document should have all its parents abstract") if base_tmpl_cls is DocumentTemplate: if popts.collection_name: if collection_name: raise DocumentDefinitionError( "Cannot redefine collection_name in a child, use abstract instead") collection_name = popts.collection_name if base_tmpl_cls is DocumentTemplate: if collection_name: if kwargs['abstract']: raise DocumentDefinitionError( 'Abstract document cannot define collection_name') elif not kwargs['abstract']: collection_name = camel_to_snake(template.__name__) kwargs['collection_name'] = collection_name return base_opts_cls(**kwargs)
MIT License
jcyk/amr-gs
stog/utils/environment.py
move_to_device
python
def move_to_device(obj, device): if not has_tensor(obj): return obj elif isinstance(obj, torch.Tensor): return obj.to(device) elif isinstance(obj, dict): return {key: move_to_device(value, device) for key, value in obj.items()} elif isinstance(obj, list): return [move_to_device(item, device) for item in obj] elif isinstance(obj, tuple): return tuple([move_to_device(item, device) for item in obj]) else: return obj
Given a structure (possibly) containing Tensors on the CPU, move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).
https://github.com/jcyk/amr-gs/blob/5666215b04151cadf121917826376acc16cb8b30/stog/utils/environment.py#L183-L199
import os import sys import random import subprocess try: import resource except ImportError: resource = None import numpy import torch from torch import cuda from stog.utils import logging from stog.utils.logging import TeeLogger from stog.utils.tqdm import Tqdm from stog.utils.checks import ConfigurationError logger = logging.init_logger() def set_seed(params): seed, numpy_seed, torch_seed = params['seed'], params['numpy_seed'], params['torch_seed'] if seed is not None: random.seed(seed) if numpy_seed is not None: numpy.random.seed(numpy_seed) if torch_seed is not None: torch.manual_seed(torch_seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(torch_seed) logger.info('Init random seeds => tseed: {seed} numpy_seed: {numpy_seed} torch_seed: {torch_seed}'.format( seed=seed, numpy_seed=numpy_seed, torch_seed=torch_seed )) def prepare_global_logging(params) -> None: serialization_dir = params['serialization_dir'] file_friendly_logging = params['file_friendly_logging'] Tqdm.set_slower_interval(file_friendly_logging) std_out_file = os.path.join(serialization_dir, "stdout.log") sys.stdout = TeeLogger(std_out_file, sys.stdout, file_friendly_logging) sys.stderr = TeeLogger(os.path.join(serialization_dir, "stderr.log"), sys.stderr, file_friendly_logging) logging.init_logger(log_file=std_out_file) def check_for_gpu(params) -> object: device_id = params['cuda_device'] if device_id is not None and device_id >= cuda.device_count(): raise ConfigurationError("Experiment specified a GPU but none is available;" " if you want to run on CPU use the override" " 'trainer.cuda_device=-1' in the json config file.") def device_mapping(cuda_device: int): def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: if cuda_device >= 0: return storage.cuda(cuda_device) else: return storage return inner_device_mapping def peak_memory_mb() -> float: if resource is None or sys.platform not in ('linux', 'darwin'): return 0.0 peak = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss if sys.platform == 'darwin': return peak / 1_000_000 else: return peak / 1_000 def gpu_memory_mb() -> dict: try: result = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader'], encoding='utf-8') gpu_memory = [int(x) for x in result.strip().split('\n')] return {gpu: memory for gpu, memory in enumerate(gpu_memory)} except FileNotFoundError: return {} except: logger.exception("unable to check gpu_memory_mb(), continuing") return {} def get_frozen_and_tunable_parameter_names(model: torch.nn.Module): frozen_parameter_names = [] tunable_parameter_names = [] for name, parameter in model.named_parameters(): if not parameter.requires_grad: frozen_parameter_names.append(name) else: tunable_parameter_names.append(name) return [frozen_parameter_names, tunable_parameter_names] def has_tensor(obj) -> bool: if isinstance(obj, torch.Tensor): return True elif isinstance(obj, dict): return any(has_tensor(value) for value in obj.values()) elif isinstance(obj, (list, tuple)): return any(has_tensor(item) for item in obj) else: return False
MIT License
olitheolix/aiokubernetes
aiokubernetes/models/v1beta2_stateful_set_condition.py
V1beta2StatefulSetCondition.last_transition_time
python
def last_transition_time(self, last_transition_time): self._last_transition_time = last_transition_time
Sets the last_transition_time of this V1beta2StatefulSetCondition. Last time the condition transitioned from one status to another. # noqa: E501 :param last_transition_time: The last_transition_time of this V1beta2StatefulSetCondition. # noqa: E501 :type: datetime
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1beta2_stateful_set_condition.py#L79-L88
import pprint import re class V1beta2StatefulSetCondition(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'last_transition_time': 'datetime', 'message': 'str', 'reason': 'str', 'status': 'str', 'type': 'str' } attribute_map = { 'last_transition_time': 'lastTransitionTime', 'message': 'message', 'reason': 'reason', 'status': 'status', 'type': 'type' } def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None): self._last_transition_time = None self._message = None self._reason = None self._status = None self._type = None self.discriminator = None if last_transition_time is not None: self.last_transition_time = last_transition_time if message is not None: self.message = message if reason is not None: self.reason = reason self.status = status self.type = type @property def last_transition_time(self): return self._last_transition_time @last_transition_time.setter
Apache License 2.0
olitheolix/aiokubernetes
aiokubernetes/models/v1_config_map_key_selector.py
V1ConfigMapKeySelector.__init__
python
def __init__(self, key=None, name=None, optional=None): self._key = None self._name = None self._optional = None self.discriminator = None self.key = key if name is not None: self.name = name if optional is not None: self.optional = optional
V1ConfigMapKeySelector - a model defined in Swagger
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1_config_map_key_selector.py#L44-L56
import pprint import re class V1ConfigMapKeySelector(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'key': 'str', 'name': 'str', 'optional': 'bool' } attribute_map = { 'key': 'key', 'name': 'name', 'optional': 'optional' }
Apache License 2.0
pyansys/pymapdl
ansys/mapdl/core/inline_functions/normals_queries.py
_NodeNormalQueries.normny
python
def normny(self, n1: int, n2: int, n3: int) -> float: return self._run_query(f"NORMNY({n1}, {n2}, {n3})", integer=False)
Y-direction cosine of the normal to the plane containing the given nodes. Y-direction cosine of the normal to the plane containing nodes `n1`, `n2`, and `n3`, reported in the global Cartesian coordinate system. Parameters ---------- n1 : int Node number n2 : int Node number n3 : int Node number Returns ------- float Y-direction cosine of the normal Examples -------- Here we create three nodes in the x-z plane and interrogate the y-component of the normal to that plane, which is trivially 1.0. >>> from ansys.mapdl.core import launch_mapdl >>> mapdl = launch_mapdl() >>> mapdl.prep7() >>> n1 = mapdl.n(1, 0, 0, 0) >>> n2 = mapdl.n(2, 1, 0, 0) >>> n3 = mapdl.n(3, 1, 0, 1) >>> q = mapdl.queries >>> q.normny(n1, n2, n3) 1.0
https://github.com/pyansys/pymapdl/blob/e5cc21471c3a8fcef1f7b88359e38aa89cd63f73/ansys/mapdl/core/inline_functions/normals_queries.py#L47-L85
from .core import _QueryExecution class _NodeNormalQueries(_QueryExecution): _mapdl = None def normnx(self, n1: int, n2: int, n3: int) -> float: return self._run_query(f"NORMNX({n1}, {n2}, {n3})", integer=False)
MIT License
pythonistaguild/wavelink
examples/advanced.py
InteractiveController.volup_command
python
async def volup_command(self, payload: discord.RawReactionActionEvent): ctx = self.update_context(payload) command = self.bot.get_command('vol_up') ctx.command = command await self.bot.invoke(ctx)
Volume up button
https://github.com/pythonistaguild/wavelink/blob/3e11c16516dd89791c1247032045385979736554/examples/advanced.py#L276-L283
import asyncio import async_timeout import copy import datetime import discord import math import random import re import typing import wavelink from discord.ext import commands, menus URL_REG = re.compile(r'https?://(?:www\.)?.+') class NoChannelProvided(commands.CommandError): pass class IncorrectChannelError(commands.CommandError): pass class Track(wavelink.Track): __slots__ = ('requester', ) def __init__(self, *args, **kwargs): super().__init__(*args) self.requester = kwargs.get('requester') class Player(wavelink.Player): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.context: commands.Context = kwargs.get('context', None) if self.context: self.dj: discord.Member = self.context.author self.queue = asyncio.Queue() self.controller = None self.waiting = False self.updating = False self.pause_votes = set() self.resume_votes = set() self.skip_votes = set() self.shuffle_votes = set() self.stop_votes = set() async def do_next(self) -> None: if self.is_playing or self.waiting: return self.pause_votes.clear() self.resume_votes.clear() self.skip_votes.clear() self.shuffle_votes.clear() self.stop_votes.clear() try: self.waiting = True with async_timeout.timeout(300): track = await self.queue.get() except asyncio.TimeoutError: return await self.teardown() await self.play(track) self.waiting = False await self.invoke_controller() async def invoke_controller(self) -> None: if self.updating: return self.updating = True if not self.controller: self.controller = InteractiveController(embed=self.build_embed(), player=self) await self.controller.start(self.context) elif not await self.is_position_fresh(): try: await self.controller.message.delete() except discord.HTTPException: pass self.controller.stop() self.controller = InteractiveController(embed=self.build_embed(), player=self) await self.controller.start(self.context) else: embed = self.build_embed() await self.controller.message.edit(content=None, embed=embed) self.updating = False def build_embed(self) -> typing.Optional[discord.Embed]: track = self.current if not track: return channel = self.bot.get_channel(int(self.channel_id)) qsize = self.queue.qsize() embed = discord.Embed(title=f'Music Controller | {channel.name}', colour=0xebb145) embed.description = f'Now Playing:\n**`{track.title}`**\n\n' embed.set_thumbnail(url=track.thumb) embed.add_field(name='Duration', value=str(datetime.timedelta(milliseconds=int(track.length)))) embed.add_field(name='Queue Length', value=str(qsize)) embed.add_field(name='Volume', value=f'**`{self.volume}%`**') embed.add_field(name='Requested By', value=track.requester.mention) embed.add_field(name='DJ', value=self.dj.mention) embed.add_field(name='Video URL', value=f'[Click Here!]({track.uri})') return embed async def is_position_fresh(self) -> bool: try: async for message in self.context.channel.history(limit=5): if message.id == self.controller.message.id: return True except (discord.HTTPException, AttributeError): return False return False async def teardown(self): try: await self.controller.message.delete() except discord.HTTPException: pass self.controller.stop() try: await self.destroy() except KeyError: pass class InteractiveController(menus.Menu): def __init__(self, *, embed: discord.Embed, player: Player): super().__init__(timeout=None) self.embed = embed self.player = player def update_context(self, payload: discord.RawReactionActionEvent): ctx = copy.copy(self.ctx) ctx.author = payload.member return ctx def reaction_check(self, payload: discord.RawReactionActionEvent): if payload.event_type == 'REACTION_REMOVE': return False if not payload.member: return False if payload.member.bot: return False if payload.message_id != self.message.id: return False if payload.member not in self.bot.get_channel(int(self.player.channel_id)).members: return False return payload.emoji in self.buttons async def send_initial_message(self, ctx: commands.Context, channel: discord.TextChannel) -> discord.Message: return await channel.send(embed=self.embed) @menus.button(emoji='\u25B6') async def resume_command(self, payload: discord.RawReactionActionEvent): ctx = self.update_context(payload) command = self.bot.get_command('resume') ctx.command = command await self.bot.invoke(ctx) @menus.button(emoji='\u23F8') async def pause_command(self, payload: discord.RawReactionActionEvent): ctx = self.update_context(payload) command = self.bot.get_command('pause') ctx.command = command await self.bot.invoke(ctx) @menus.button(emoji='\u23F9') async def stop_command(self, payload: discord.RawReactionActionEvent): ctx = self.update_context(payload) command = self.bot.get_command('stop') ctx.command = command await self.bot.invoke(ctx) @menus.button(emoji='\u23ED') async def skip_command(self, payload: discord.RawReactionActionEvent): ctx = self.update_context(payload) command = self.bot.get_command('skip') ctx.command = command await self.bot.invoke(ctx) @menus.button(emoji='\U0001F500') async def shuffle_command(self, payload: discord.RawReactionActionEvent): ctx = self.update_context(payload) command = self.bot.get_command('shuffle') ctx.command = command await self.bot.invoke(ctx) @menus.button(emoji='\u2795')
MIT License
kroger/pyknon
pyknon/MidiFile.py
MIDITrack.writeEventsToStream
python
def writeEventsToStream(self): for event in self.MIDIEventList: if event.type == "NoteOn": code = 0x9 << 4 | event.channel varTime = writeVarLength(event.time) for timeByte in varTime: self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte) self.MIDIdata = self.MIDIdata + struct.pack('>B',code) self.MIDIdata = self.MIDIdata + struct.pack('>B',event.pitch) self.MIDIdata = self.MIDIdata + struct.pack('>B',event.volume) elif event.type == "NoteOff": code = 0x8 << 4 | event.channel varTime = writeVarLength(event.time) for timeByte in varTime: self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte) self.MIDIdata = self.MIDIdata + struct.pack('>B',code) self.MIDIdata = self.MIDIdata + struct.pack('>B',event.pitch) self.MIDIdata = self.MIDIdata + struct.pack('>B',event.volume) elif event.type == "Tempo": code = 0xFF subcode = 0x51 fourbite = struct.pack('>L', event.tempo) threebite = fourbite[1:4] varTime = writeVarLength(event.time) for timeByte in varTime: self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte) self.MIDIdata = self.MIDIdata + struct.pack('>B',code) self.MIDIdata = self.MIDIdata + struct.pack('>B',subcode) self.MIDIdata = self.MIDIdata + struct.pack('>B', 0x03) self.MIDIdata = self.MIDIdata + threebite elif event.type == 'ProgramChange': code = 0xC << 4 | event.channel varTime = writeVarLength(event.time) for timeByte in varTime: self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte) self.MIDIdata = self.MIDIdata + struct.pack('>B',code) self.MIDIdata = self.MIDIdata + struct.pack('>B',event.programNumber) elif event.type == 'TrackName': varTime = writeVarLength(event.time) for timeByte in varTime: self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte) self.MIDIdata = self.MIDIdata + struct.pack('B',0xFF) self.MIDIdata = self.MIDIdata + struct.pack('B',0X03) dataLength = len(event.trackName) dataLenghtVar = writeVarLength(dataLength) for i in range(0,len(dataLenghtVar)): self.MIDIdata = self.MIDIdata + struct.pack("b",dataLenghtVar[i]) if PYTHON3: self.MIDIdata = self.MIDIdata + event.trackName.encode() else: self.MIDIdata = self.MIDIdata + event.trackName elif event.type == "ControllerEvent": code = 0xB << 4 | event.channel varTime = writeVarLength(event.time) for timeByte in varTime: self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte) self.MIDIdata = self.MIDIdata + struct.pack('>B',code) self.MIDIdata = self.MIDIdata + struct.pack('>B',event.eventType) self.MIDIdata = self.MIDIdata + struct.pack('>B',event.paramerter1) elif event.type == "SysEx": code = 0xF0 varTime = writeVarLength(event.time) for timeByte in varTime: self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte) self.MIDIdata = self.MIDIdata + struct.pack('>B', code) payloadLength = writeVarLength(len(event.payload)+2) for lenByte in payloadLength: self.MIDIdata = self.MIDIdata + struct.pack('>B',lenByte) self.MIDIdata = self.MIDIdata + struct.pack('>B', event.manID) self.MIDIdata = self.MIDIdata + event.payload self.MIDIdata = self.MIDIdata + struct.pack('>B',0xF7) elif event.type == "UniversalSysEx": code = 0xF0 varTime = writeVarLength(event.time) for timeByte in varTime: self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte) self.MIDIdata = self.MIDIdata + struct.pack('>B', code) payloadLength = writeVarLength(len(event.payload)+5) for lenByte in payloadLength: self.MIDIdata = self.MIDIdata + struct.pack('>B',lenByte) if event.realTime : self.MIDIdata = self.MIDIdata + struct.pack('>B', 0x7F) else: self.MIDIdata = self.MIDIdata + struct.pack('>B', 0x7E) self.MIDIdata = self.MIDIdata + struct.pack('>B', event.sysExChannel) self.MIDIdata = self.MIDIdata + struct.pack('>B', event.code) self.MIDIdata = self.MIDIdata + struct.pack('>B', event.subcode) self.MIDIdata = self.MIDIdata + event.payload self.MIDIdata = self.MIDIdata + struct.pack('>B',0xF7)
Write the events in MIDIEvents to the MIDI stream.
https://github.com/kroger/pyknon/blob/14dfe74c95a271c5bb98841ef3b0dbfa10d55128/pyknon/MidiFile.py#L484-L582
import struct import sys import math PYTHON3 = True if sys.version_info[0] == 3 else False TICKSPERBEAT = 128 controllerEventTypes = { 'pan' : 0x0a } class MIDIEvent: def __init__(self): self.type='unknown' self.time=0 self.ord = 0 def __lt__(self, other): if self.time < other.time: return True elif self.time > other.time: return False else: if self.ord < other.ord: return True elif self.ord > other.ord: return False else: return False def __cmp__(self, other): if self.time < other.time: return -1 elif self.time > other.time: return 1 else: if self.ord < other.ord: return -1 elif self.ord > other.ord: return 1 else: return 0 class GenericEvent(): def __init__(self,time): self.time = time self.type = 'Unknown' def __eq__(self, other): if self.time != other.time or self.type != other.type: return False if self.type == 'note': if self.pitch != other.pitch or self.channel != other.channel: return False if self.type == 'tempo': if self.tempo != other.tempo: return False if self.type == 'programChange': if self.programNumber != other.programNumber or self.channel != other.channel: return False if self.type == 'trackName': if self.trackName != other.trackName: return False if self.type == 'controllerEvent': if self.parameter1 != other.parameter1 or self.parameter2 != other.parameter2 or self.channel != other.channel or self.eventType != other.eventType: return False if self.type == 'SysEx': if self.manID != other.manID: return False if self.type == 'UniversalSysEx': if self.code != other.code or self.subcode != other.subcode or self.sysExChannel != other.sysExChannel: return False return True def __hash__(self): a = int(self.time) a = (a+0x7ed55d16) + (a<<12) a = (a^0xc761c23c) ^ (a>>19) a = (a+0x165667b1) + (a<<5) a = (a+0xd3a2646c) ^ (a<<9) a = (a+0xfd7046c5) + (a<<3) a = (a^0xb55a4f09) ^ (a>>16) return a class MIDITrack: class note(GenericEvent): def __init__(self,channel, pitch,time,duration,volume): GenericEvent.__init__(self,time) self.pitch = pitch self.duration = duration self.volume = volume self.type = 'note' self.channel = channel def compare(self, other): if self.pitch == other.pitch and self.time == other.time and self.duration == other.duration and self.volume == other.volume and self.type == other.type and self.channel == other.channel: return True else: return False class tempo(GenericEvent): def __init__(self,time,tempo): GenericEvent.__init__(self,time) self.type = 'tempo' self.tempo = int(60000000 / tempo) class programChange(GenericEvent): def __init__(self, channel, time, programNumber): GenericEvent.__init__(self, time,) self.type = 'programChange' self.programNumber = programNumber self.channel = channel class SysExEvent(GenericEvent): def __init__(self, time, manID, payload): GenericEvent.__init__(self, time,) self.type = 'SysEx' self.manID = manID self.payload = payload class UniversalSysExEvent(GenericEvent): def __init__(self, time, realTime, sysExChannel, code, subcode, payload): GenericEvent.__init__(self, time,) self.type = 'UniversalSysEx' self.realTime = realTime self.sysExChannel = sysExChannel self.code = code self.subcode = subcode self.payload = payload class ControllerEvent(GenericEvent): def __init__(self, channel, time, eventType, parameter1,): GenericEvent.__init__(self, time,) self.type = 'controllerEvent' self.parameter1 = parameter1 self.channel = channel self.eventType = eventType class trackName(GenericEvent): def __init__(self, time, trackName): GenericEvent.__init__(self, time,) self.type = 'trackName' self.trackName = trackName def __init__(self, removeDuplicates, deinterleave): self.headerString = struct.pack('cccc',b'M',b'T',b'r',b'k') self.dataLength = 0 if PYTHON3: self.MIDIdata = b"" else: self.MIDIdata = "" self.closed = False self.eventList = [] self.MIDIEventList = [] self.remdep = removeDuplicates self.deinterleave = deinterleave def addNoteByNumber(self,channel, pitch,time,duration,volume): self.eventList.append(MIDITrack.note(channel, pitch,time,duration,volume)) def addControllerEvent(self,channel,time,eventType, paramerter1): self.eventList.append(MIDITrack.ControllerEvent(channel,time,eventType, paramerter1)) def addTempo(self,time,tempo): self.eventList.append(MIDITrack.tempo(time,tempo)) def addSysEx(self,time,manID, payload): self.eventList.append(MIDITrack.SysExEvent(time, manID, payload)) def addUniversalSysEx(self,time,code, subcode, payload, sysExChannel=0x7F, realTime=False): self.eventList.append(MIDITrack.UniversalSysExEvent(time, realTime, sysExChannel, code, subcode, payload)) def addProgramChange(self,channel, time, program): self.eventList.append(MIDITrack.programChange(channel, time, program)) def addTrackName(self,time,trackName): self.eventList.append(MIDITrack.trackName(time,trackName)) def changeNoteTuning(self, tunings, sysExChannel=0x7F, realTime=False, tuningProgam=0): payload = struct.pack('>B', tuningProgam) payload = payload + struct.pack('>B', len(tunings)) for (noteNumber, frequency) in tunings: payload = payload + struct.pack('>B', noteNumber) MIDIFreqency = frequencyTransform(frequency) for byte in MIDIFreqency: payload = payload + struct.pack('>B', byte) self.eventList.append(MIDITrack.UniversalSysExEvent(0, realTime, sysExChannel, 8, 2, payload)) def processEventList(self): for thing in self.eventList: if thing.type == 'note': event = MIDIEvent() event.type = "NoteOn" event.time = thing.time * TICKSPERBEAT event.pitch = thing.pitch event.volume = thing.volume event.channel = thing.channel event.ord = 3 self.MIDIEventList.append(event) event = MIDIEvent() event.type = "NoteOff" event.time = (thing.time + thing.duration) * TICKSPERBEAT event.pitch = thing.pitch event.volume = thing.volume event.channel = thing.channel event.ord = 2 self.MIDIEventList.append(event) elif thing.type == 'tempo': event = MIDIEvent() event.type = "Tempo" event.time = thing.time * TICKSPERBEAT event.tempo = thing.tempo event.ord = 3 self.MIDIEventList.append(event) elif thing.type == 'programChange': event = MIDIEvent() event.type = "ProgramChange" event.time = thing.time * TICKSPERBEAT event.programNumber = thing.programNumber event.channel = thing.channel event.ord = 1 self.MIDIEventList.append(event) elif thing.type == 'trackName': event = MIDIEvent() event.type = "TrackName" event.time = thing.time * TICKSPERBEAT event.trackName = thing.trackName event.ord = 0 self.MIDIEventList.append(event) elif thing.type == 'controllerEvent': event = MIDIEvent() event.type = "ControllerEvent" event.time = thing.time * TICKSPERBEAT event.eventType = thing.eventType event.channel = thing.channel event.paramerter1 = thing.parameter1 event.ord = 1 self.MIDIEventList.append(event) elif thing.type == 'SysEx': event = MIDIEvent() event.type = "SysEx" event.time = thing.time * TICKSPERBEAT event.manID = thing.manID event.payload = thing.payload event.ord = 1 self.MIDIEventList.append(event) elif thing.type == 'UniversalSysEx': event = MIDIEvent() event.type = "UniversalSysEx" event.realTime = thing.realTime event.sysExChannel = thing.sysExChannel event.time = thing.time * TICKSPERBEAT event.code = thing.code event.subcode = thing.subcode event.payload = thing.payload event.ord = 1 self.MIDIEventList.append(event) else: print ("Error in MIDITrack: Unknown event type") sys.exit(2) if PYTHON3: self.MIDIEventList.sort(key=lambda x: (x.time)) else: self.MIDIEventList.sort(lambda x, y: int( 1000 * (x.time - y.time))) if self.deinterleave: self.deInterleaveNotes() def removeDuplicates(self): tempDict = {} for item in self.eventList: tempDict[item] = 1 if PYTHON3: self.eventList = list(tempDict.keys()) else: self.eventList = tempDict.keys() if PYTHON3: self.eventList.sort(key=lambda x: (x.type)) self.eventList.sort(key=lambda x: (x.time)) else: self.eventList.sort(lambda x, y: cmp(x.type , y.type)) self.eventList.sort(lambda x, y: int( 1000 * (x.time - y.time))) def closeTrack(self): if self.closed == True: return self.closed = True if self.remdep: self.removeDuplicates() self.processEventList() def writeMIDIStream(self): self.writeEventsToStream() self.MIDIdata = self.MIDIdata + struct.pack('BBBB',0x00,0xFF, 0x2F,0x00) self.dataLength = struct.pack('>L',len(self.MIDIdata))
MIT License
cta-observatory/ctapipe
ctapipe/io/dl1eventsource.py
DL1EventSource.has_simulated_dl1
python
def has_simulated_dl1(self): if self.is_simulation: if "telescope" in self.file_.root.simulation.event: return True return False
True for files with telescope-wise event information in the simulation group
https://github.com/cta-observatory/ctapipe/blob/8851e1214409eac4564996cc0f4b76dfe05cf9cf/ctapipe/io/dl1eventsource.py#L173-L180
import astropy.units as u from astropy.utils.decorators import lazyproperty import logging import numpy as np import tables from ..core import Container, Field from ..instrument import SubarrayDescription from ..containers import ( ConcentrationContainer, ArrayEventContainer, DL1CameraContainer, EventIndexContainer, CameraHillasParametersContainer, HillasParametersContainer, IntensityStatisticsContainer, LeakageContainer, MorphologyContainer, SimulationConfigContainer, SimulatedShowerContainer, SimulatedEventContainer, PeakTimeStatisticsContainer, CameraTimingParametersContainer, TimingParametersContainer, TriggerContainer, ImageParametersContainer, ) from .eventsource import EventSource from .hdf5tableio import HDF5TableReader from .datalevels import DataLevel from ..utils import IndexFinder __all__ = ["DL1EventSource"] logger = logging.getLogger(__name__) COMPATIBLE_DL1_VERSIONS = [ "v1.0.0", "v1.0.1", "v1.0.2", "v1.0.3", "v1.1.0", "v1.2.0", "v2.0.0", "v2.1.0", ] class DL1EventSource(EventSource): def __init__(self, input_url=None, config=None, parent=None, **kwargs): super().__init__(input_url=input_url, config=config, parent=parent, **kwargs) self.file_ = tables.open_file(self.input_url) self._full_subarray_info = SubarrayDescription.from_hdf(self.input_url) if self.allowed_tels: self._subarray_info = self._full_subarray_info.select_subarray( self.allowed_tels ) else: self._subarray_info = self._full_subarray_info self._simulation_configs = self._parse_simulation_configs() self.datamodel_version = self.file_.root._v_attrs[ "CTA PRODUCT DATA MODEL VERSION" ] params = "parameters" in self.file_.root.dl1.event.telescope images = "images" in self.file_.root.dl1.event.telescope if params and images: self._datalevels = (DataLevel.DL1_IMAGES, DataLevel.DL1_PARAMETERS) elif params: self._datalevels = (DataLevel.DL1_PARAMETERS,) elif images: self._datalevels = (DataLevel.DL1_IMAGES,) def __exit__(self, exc_type, exc_val, exc_tb): self.close() def close(self): self.file_.close() @staticmethod def is_compatible(file_path): with open(file_path, "rb") as f: magic_number = f.read(8) if magic_number != b"\x89HDF\r\n\x1a\n": return False with tables.open_file(file_path) as f: metadata = f.root._v_attrs if "CTA PRODUCT DATA LEVEL" not in metadata._v_attrnames: return False if "DL1" not in metadata["CTA PRODUCT DATA LEVEL"]: return False if "CTA PRODUCT DATA MODEL VERSION" not in metadata._v_attrnames: return False version = metadata["CTA PRODUCT DATA MODEL VERSION"] if version not in COMPATIBLE_DL1_VERSIONS: logger.error( f"File is DL1 file but has unsupported version {version}" f", supported versions are {COMPATIBLE_DL1_VERSIONS}" ) return False return True @property def is_simulation(self): return "simulation" in self.file_.root @property
BSD 3-Clause New or Revised License
timorthi/pyfootball
pyfootball/globals.py
update_prev_response
python
def update_prev_response(r, endpoint): global prev_response prev_response = r.headers prev_response['Status-Code'] = r.status_code prev_response['Endpoint'] = endpoint
Sets the prev_response attribute to contain a dict that includes the response status code and headers of the most recent HTTP request. Arguments: r -- The response object (of the latest HTTP request). endpoint -- The endpoint used (in the latest HTTP request).
https://github.com/timorthi/pyfootball/blob/22ed081720c52ef58ec4d1b9ddeb0bf4542a03b4/pyfootball/globals.py#L20-L32
api_key = "" headers = {} prev_response = {} _base = 'http://api.football-data.org' endpoints = { 'fixture': _base + '/v1/fixtures/{}', 'all_fixtures': _base + '/v1/fixtures/', 'competition': _base + '/v1/competitions/{}', 'all_competitions': _base + '/v1/competitions/', 'comp_teams': _base + '/v1/competitions/{}/teams', 'comp_fixtures': _base + '/v1/competitions/{}/fixtures', 'team': _base + '/v1/teams/{}', 'team_players': _base + '/v1/teams/{}/players', 'team_fixtures': _base + '/v1/teams/{}/fixtures/', 'league_table': _base + '/v1/competitions/{}/leagueTable' }
MIT License
vbwagner/ctypescrypto
ctypescrypto/x509.py
X509_EXT.__init__
python
def __init__(self, ptr, copy=False): if copy: self.ptr = libcrypto.X509_EXTENSION_dup(ptr) else: self.ptr = cast(ptr, POINTER(_x509_ext))
Initializes from the pointer to X509_EXTENSION. If copy is True, creates a copy, otherwise just stores pointer.
https://github.com/vbwagner/ctypescrypto/blob/90b32fda368a119c1a1e44b8c501653beaa2ca97/ctypescrypto/x509.py#L250-L258
from ctypes import c_void_p, c_long, c_ulong, c_int, POINTER, c_char_p, Structure, cast from ctypescrypto.bio import Membio from ctypescrypto.pkey import PKey from ctypescrypto.oid import Oid from ctypescrypto.exception import LibCryptoError from ctypescrypto import libcrypto, pyver, chartype, inttype, bintype from datetime import datetime import sys try: from pytz import utc except ImportError: from datetime import timedelta, tzinfo ZERO = timedelta(0) class UTC(tzinfo): def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO utc = UTC() __all__ = ['X509', 'X509Error', 'X509Name', 'X509Store', 'StackOfX509'] if hasattr(libcrypto,"X509_get_version"): _X509_get_version = libcrypto.X509_get_version _X509_get_version.restype = c_long _X509_get_version.argtypes = (c_void_p,) _X509_get_notBefore=libcrypto.X509_getm_notBefore _X509_get_notBefore.restype = c_void_p _X509_get_notBefore.argtypes = (c_void_p,) _X509_get_notAfter=libcrypto.X509_getm_notAfter _X509_get_notAfter.restype = c_void_p _X509_get_notAfter.argtypes = (c_void_p,) else: class _validity(Structure): _fields_ = [('notBefore', c_void_p), ('notAfter', c_void_p)] class _cinf(Structure): _fields_ = [('version', c_void_p), ('serialNumber', c_void_p), ('sign_alg', c_void_p), ('issuer', c_void_p), ('validity', POINTER(_validity)), ('subject', c_void_p), ('pubkey', c_void_p), ('issuerUID', c_void_p), ('subjectUID', c_void_p), ('extensions', c_void_p), ] class _x509(Structure): _fields_ = [('cert_info', POINTER(_cinf)), ('sig_alg', c_void_p), ('signature', c_void_p), ] _px509 = POINTER(_x509) def _X509_get_version(ptr): asn1int = cast(ptr, _px509)[0].cert_info[0].version return libcrypto.ASN1_INTEGER_get(asn1int) def _X509_get_notBefore(ptr): return cast(ptr, _px509)[0].cert_info[0].validity[0].notBefore def _X509_get_notAfter(ptr): return cast(ptr, _px509)[0].cert_info[0].validity[0].notAfter if hasattr(libcrypto,'sk_num'): sk_num = libcrypto.sk_num sk_set = libcrypto.sk_set sk_value = libcrypto.sk_value sk_delete = libcrypto.sk_delete sk_new_null = libcrypto.sk_new_null sk_pop_free = libcrypto.sk_pop_free sk_push = libcrypto.sk_push else: sk_num = libcrypto.OPENSSL_sk_num sk_set = libcrypto.OPENSSL_sk_set sk_value = libcrypto.OPENSSL_sk_value sk_delete = libcrypto.OPENSSL_sk_delete sk_new_null = libcrypto.OPENSSL_sk_new_null sk_pop_free = libcrypto.OPENSSL_sk_pop_free sk_push = libcrypto.OPENSSL_sk_push class X509Error(LibCryptoError): pass class X509Name(object): PRINT_FLAG = 0x10010 ESC_MSB = 4 def __init__(self, ptr=None, copy=False): if ptr is not None: self.ptr = ptr self.need_free = copy self.writable = False else: self.ptr = libcrypto.X509_NAME_new() self.need_free = True self.writable = True def __del__(self): if self.need_free: libcrypto.X509_NAME_free(self.ptr) def __bytes__(self): bio = Membio() libcrypto.X509_NAME_print_ex(bio.bio, self.ptr, 0, self.PRINT_FLAG | self.ESC_MSB) return bio.__bytes__() def __unicode__(self): bio = Membio() libcrypto.X509_NAME_print_ex(bio.bio, self.ptr, 0, self.PRINT_FLAG) return bio.__unicode__() if pyver == 2: __str__ = __bytes__ else: __str__ = __unicode__ def __len__(self): return libcrypto.X509_NAME_entry_count(self.ptr) def __cmp__(self, other): return libcrypto.X509_NAME_cmp(self.ptr, other.ptr) def __eq__(self, other): return libcrypto.X509_NAME_cmp(self.ptr, other.ptr) == 0 def __gt__(self, other): return libcrypto.X509_NAME_cmp(self.ptr, other.ptr) > 0 def __lt__(self, other): return libcrypto.X509_NAME_cmp(self.ptr, other.ptr) < 0 def __getitem__(self, key): if isinstance(key, Oid): idx = libcrypto.X509_NAME_get_index_by_NID(self.ptr, key.nid, -1) if idx < 0: raise KeyError("Key not found " + str(Oid)) entry = libcrypto.X509_NAME_get_entry(self.ptr, idx) value = libcrypto.X509_NAME_ENTRY_get_data(entry) bio = Membio() libcrypto.ASN1_STRING_print_ex(bio.bio, value, self.PRINT_FLAG) return chartype(bio) elif isinstance(key, inttype): entry = libcrypto.X509_NAME_get_entry(self.ptr, key) if entry is None: raise IndexError("name entry index out of range") oid = Oid.fromobj(libcrypto.X509_NAME_ENTRY_get_object(entry)) value = libcrypto.X509_NAME_ENTRY_get_data(entry) bio = Membio() libcrypto.ASN1_STRING_print_ex(bio.bio, value, self.PRINT_FLAG) return (oid, chartype(bio)) else: raise TypeError("X509 NAME can be indexed by Oids or integers only") def __setitem__(self, key, val): if not self.writable: raise ValueError("Attempt to modify constant X509 object") else: raise NotImplementedError def __delitem__(self, key): if not self.writable: raise ValueError("Attempt to modify constant X509 object") else: raise NotImplementedError def __hash__(self): return libcrypto.X509_NAME_hash(self.ptr) class _x509_ext(Structure): _fields_ = [("object", c_void_p), ("critical", c_int), ("value", c_void_p) ] class X509_EXT(object):
MIT License
daxm/fmcapi
fmcapi/api_objects/object_services/ranges.py
Ranges.parse_kwargs
python
def parse_kwargs(self, **kwargs): super().parse_kwargs(**kwargs) logging.debug("In parse_kwargs() for AccessRules class.") if "value" in kwargs: value_type = get_networkaddress_type(kwargs["value"]) if value_type == "host" or value_type == "network": logging.warning( f"value, {kwargs['value']}, is of type {value_type}. Limited functionality for this " f"object due to it being created via the Ranges function." ) if validate_ip_bitmask_range(value=kwargs["value"], value_type=value_type): self.value = kwargs["value"] else: logging.error( f"Provided value, {kwargs['value']}, has an error with the IP address(es)." )
Parse the kwargs and set self variables to match. :return: None
https://github.com/daxm/fmcapi/blob/fc4bad7ff733a6283e83970d7844c73e7e88a50c/fmcapi/api_objects/object_services/ranges.py#L28-L48
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate from fmcapi.api_objects.helper_functions import * import logging class Ranges(APIClassTemplate): VALID_JSON_DATA = ["id", "name", "value", "description"] VALID_FOR_KWARGS = VALID_JSON_DATA + [] URL_SUFFIX = "/object/ranges" REQUIRED_FOR_POST = ["name", "value"] def __init__(self, fmc, **kwargs): super().__init__(fmc, **kwargs) logging.debug("In __init__() for Ranges class.") self.parse_kwargs(**kwargs)
BSD 3-Clause New or Revised License
mac-automl/xnas
xnas/core/logging.py
load_log_data
python
def load_log_data(log_file, data_types_to_skip=()): assert os.path.exists(log_file), "Log file not found: {}".format(log_file) with open(log_file, "r") as f: lines = f.readlines() lines = [_l[_l.find(_TAG) + len(_TAG):] for _l in lines if _TAG in _l] lines = [simplejson.loads(_l) for _l in lines] lines = [_l for _l in lines if _TYPE in _l and not _l[_TYPE] in data_types_to_skip] data_types = [_l[_TYPE] for _l in lines] data = {t: [] for t in data_types} for t, line in zip(data_types, lines): del line[_TYPE] data[t].append(line) for t in data: metrics = sorted(data[t][0].keys()) err_str = "Inconsistent metrics in log for _type={}: {}".format( t, metrics) assert all(sorted(d.keys()) == metrics for d in data[t]), err_str data[t] = {m: [d[m] for d in data[t]] for m in metrics} return data
Loads log data into a dictionary of the form data[data_type][metric][index].
https://github.com/mac-automl/xnas/blob/0b8f8680af89c236acdb2be9b54b7e96fd5bda1f/xnas/core/logging.py#L95-L119
import builtins import decimal import logging import os import sys import xnas.core.distributed as dist import simplejson from xnas.core.config import cfg _FORMAT = "[%(filename)s: %(lineno)3d]: %(message)s" _LOG_FILE = "stdout.log" _TAG = "json_stats: " _TYPE = "_type" def _suppress_print(): def ignore(*_objects, _sep=" ", _end="\n", _file=sys.stdout, _flush=False): pass builtins.print = ignore def setup_logging(): if dist.is_master_proc(): logging.root.handlers = [] logging_config = {"level": logging.INFO, "format": _FORMAT} if cfg.LOG_DEST == "stdout": logging_config["stream"] = sys.stdout else: logging_config["filename"] = os.path.join(cfg.OUT_DIR, _LOG_FILE) logging.basicConfig(**logging_config) else: _suppress_print() def get_logger(name): return logging.getLogger(name) def dump_log_data(data, data_type, prec=4): data[_TYPE] = data_type data = float_to_decimal(data, prec) data_json = simplejson.dumps(data, sort_keys=True, use_decimal=True) return "{:s}{:s}".format(_TAG, data_json) def float_to_decimal(data, prec=4): if isinstance(data, dict): return {k: float_to_decimal(v, prec) for k, v in data.items()} if isinstance(data, float): return decimal.Decimal(("{:." + str(prec) + "f}").format(data)) else: return data def get_log_files(log_dir, name_filter=""): names = [n for n in sorted(os.listdir(log_dir)) if name_filter in n] files = [os.path.join(log_dir, n, _LOG_FILE) for n in names] f_n_ps = [(f, n) for (f, n) in zip(files, names) if os.path.exists(f)] files, names = zip(*f_n_ps) if f_n_ps else [], [] return files, names
MIT License
calebbell/thermo
thermo/phases/phase.py
Phase.dP_dV
python
def dP_dV(self): raise NotImplementedError("Must be implemented by subphases")
r'''Method to calculate and return the first volume derivative of pressure of the phase. Returns ------- dP_dV : float First volume derivative of pressure, [Pa*mol/m^3]
https://github.com/calebbell/thermo/blob/554425bd7b6fae231b9659f09fce392f347505fd/thermo/phases/phase.py#L441-L450
__all__ = [ 'Phase', 'derivatives_thermodynamic', 'derivatives_thermodynamic_mass', 'derivatives_jacobian', ] from fluids.constants import R, R_inv from math import sqrt from thermo.serialize import arrays_to_lists from fluids.numerics import (horner, horner_log, jacobian, poly_fit_integral_value, poly_fit_integral_over_T_value, newton_system, trunc_exp, is_micropython) from chemicals.utils import (log, Cp_minus_Cv, phase_identification_parameter, Joule_Thomson, speed_of_sound, dxs_to_dns, dns_to_dn_partials, hash_any_primitive) from thermo.utils import POLY_FIT from thermo import phases from .phase_utils import object_lookups class Phase(object): INCOMPRESSIBLE_CONST = 1e30 R = R R2 = R*R R_inv = R_inv is_solid = False ideal_gas_basis = False T_REF_IG = 298.15 T_REF_IG_INV = 1.0/T_REF_IG P_REF_IG = 101325. P_REF_IG_INV = 1.0/P_REF_IG LOG_P_REF_IG = log(P_REF_IG) T_MAX_FIXED = 10000.0 T_MIN_FIXED = 1e-3 P_MAX_FIXED = 1e9 P_MIN_FIXED = 1e-2 V_MIN_FIXED = 1e-9 V_MAX_FIXED = 1e9 T_MIN_FLASH = 1e-300 force_phase = None _Psats_data = None _Cpgs_data = None Psats_poly_fit = False Cpgs_poly_fit = False composition_independent = False scalar = True pure_references = () pure_reference_types = () obj_references = () pointer_references = () pointer_reference_dicts = () reference_pointer_dicts = () if not is_micropython: def __init_subclass__(cls): cls.__full_path__ = "%s.%s" %(cls.__module__, cls.__qualname__) else: __full_path__ = None def __str__(self): s = '<%s, ' %(self.__class__.__name__) try: s += 'T=%g K, P=%g Pa' %(self.T, self.P) except: pass s += '>' return s def as_json(self): d = self.__dict__.copy() if not self.scalar: d = arrays_to_lists(d) for obj_name in self.obj_references: o = d[obj_name] if type(o) is list: d[obj_name] = [v.as_json() for v in o] else: d[obj_name] = o.as_json() for prop_name in self.pure_references: l = d[prop_name] if l: d[prop_name] = [v.as_json() for v in l] for ref_name, ref_lookup in zip(self.pointer_references, self.reference_pointer_dicts): d[ref_name] = ref_lookup[d[ref_name]] d["py/object"] = self.__full_path__ d['json_version'] = 1 return d @classmethod def from_json(cls, json_repr): d = json_repr phase_name = d['py/object'] del d['py/object'] del d['json_version'] phase = phases.phase_full_path_dict[phase_name] new = phase.__new__(phase) for obj_name, obj_cls in zip(new.pure_references, new.pure_reference_types): l = d[obj_name] if l: for i, v in enumerate(l): l[i] = obj_cls.from_json(v) for obj_name in new.obj_references: o = d[obj_name] if type(o) is list: d[obj_name] = [object_lookups[v['py/object']].from_json(v) for v in o] else: obj_cls = object_lookups[o['py/object']] d[obj_name] = obj_cls.from_json(o) for ref_name, ref_lookup in zip(new.pointer_references, new.pointer_reference_dicts): d[ref_name] = ref_lookup[d[ref_name]] new.__dict__ = d return new def __hash__(self): self.model_hash(False) self.model_hash(True) self.state_hash() d = self.__dict__ ans = hash_any_primitive((self.__class__.__name__, d)) return ans def __eq__(self, other): return self.__hash__() == hash(other) def state_hash(self): return hash_any_primitive((self.model_hash(), self.T, self.P, self.V(), self.zs)) def model_hash(self, ignore_phase=False): if ignore_phase: try: return self._model_hash_ignore_phase except AttributeError: pass else: try: return self._model_hash except AttributeError: pass to_hash = [getattr(self, v) for v in self.model_attributes] self._model_hash_ignore_phase = h = hash_any_primitive(to_hash) self._model_hash = hash((self.__class__.__name__, h)) if ignore_phase: return self._model_hash_ignore_phase else: return self._model_hash def value(self, name): if name in ('beta_mass',): return self.result.value(name, self) v = getattr(self, name) try: v = v() except: pass return v def to_TP_zs(self, T, P, zs): raise NotImplementedError("Must be implemented by subphases") def to(self, zs, T=None, P=None, V=None): raise NotImplementedError("Must be implemented by subphases") def V(self): raise NotImplementedError("Must be implemented by subphases") def dP_dT(self): raise NotImplementedError("Must be implemented by subphases")
MIT License
cc1-cloud/cc1
src/cm/models/command.py
Command.execute
python
def execute(name, user_id, vm_id, **kwargs): vm = VM.get(user_id, vm_id) try: cmd = Command.add_command(name, user_id, vm_id, **kwargs) transaction.commit() log.debug(user_id, "Command state %s for machine %s" % (cmd.state, vm_id)) dom = vm.lv_domain() dom.sendKey(0, 500, [113], 1, 0) retry = 3 retry_factor = 1.2 retry_time = 1 try: while retry > 0: log.debug(user_id, "Check if command %s is finished for machine %s" % (cmd.id, vm_id)) Command.objects.update() cmd = Command.objects.get(id=cmd.id) log.debug(user_id, "Checked command status: %s, %s, %s" % (cmd.state, command_states['finished'], bool(cmd.state == command_states['finished']))) if cmd.state == command_states['finished']: log.debug(user_id, "Response %s from machine %s" % (cmd.response, vm_id)) break elif cmd.state == command_states['failed']: raise CMException('ctx_' + name) retry -= 1 retry_time *= retry_factor sleep(retry_time) except: raise finally: cmd.delete() if retry == 0: log.debug(user_id, "Command %s for machine %s - TIMEOUT" % (name, vm_id)) raise CMException('ctx_timeout') return cmd.response or '' except CMException: raise except Exception: log.exception(user_id, 'Execute command') raise CMException('ctx_execute_command')
Method executes command @prm{name} on the specified VM. User with id @prm{user_id} must be the owner of that VM. @parameter{name,string} name of the function to execute @parameter{user_id,long} id of the declared VM owner @parameter{vm_id,int} id of the VM on which command needs to be executed @parameter{kwargs,dict} keyword args for the called function @raises{ctx_timeout,CMException} @raises{ctx_execute_command,CMException}
https://github.com/cc1-cloud/cc1/blob/8113673fa13b6fe195cea99dedab9616aeca3ae8/src/cm/models/command.py#L87-L141
import json import threading from time import sleep from django.db import models, transaction from cm.models.vm import VM from cm.utils import log from cm.utils.exception import CMException from cm.utils.threads.vm import VMThread from common import response from common.states import command_states, vm_states, farm_states from cm.utils import message class Command(models.Model): name = models.CharField(max_length=1000) args = models.CharField(max_length=100000) state = models.IntegerField() response = models.CharField(max_length=100000, null=True) vm = models.ForeignKey(VM) class Meta: app_label = 'cm' def __unicode__(self): return "%s %s" % (self.name, self.args) def dict(self): r = {} r['id'] = self.id r['name'] = self.name r['args'] = self.args r['state'] = self.state r['response'] = self.response return r @staticmethod def add_command(name, user_id, vm_id, **kwargs): cmd = Command() cmd.vm_id = vm_id cmd.name = name cmd.args = json.dumps(kwargs) cmd.state = command_states['pending'] cmd.response = None log.debug(user_id, "Add command %s for machine %s" % (name, vm_id)) cmd.save() return cmd @staticmethod
Apache License 2.0
sec-edgar/sec-edgar
secedgar/client.py
NetworkClient.rate_limit
python
def rate_limit(self): return self._rate_limit
int: Number of requests to limit client to per second.
https://github.com/sec-edgar/sec-edgar/blob/7cba98531d730f75588d1ec64691a82d5db209d3/secedgar/client.py#L119-L121
import asyncio import os import time import aiohttp import requests import tqdm from bs4 import BeautifulSoup from requests.adapters import HTTPAdapter from urllib3.util.retry import Retry from secedgar.exceptions import EDGARQueryError from secedgar.utils import make_path class NetworkClient: _BASE = "http://www.sec.gov/" def __init__(self, user_agent, retry_count=3, batch_size=10, backoff_factor=0, rate_limit=10): self.retry_count = retry_count self.batch_size = batch_size self.backoff_factor = backoff_factor self.rate_limit = rate_limit self.user_agent = user_agent @property def retry_count(self): return self._retry_count @retry_count.setter def retry_count(self, value): if not isinstance(value, int): raise TypeError("Retry count must be int. Given type {0}.".format( type(value))) elif value < 0: raise ValueError( "Retry count must be greater than 0. Given {0}.".format(value)) self._retry_count = value @property def batch_size(self): return self._batch_size @batch_size.setter def batch_size(self, value): if not isinstance(value, int): raise TypeError("Batch size must be int. Given type {0}".format( type(value))) elif value < 1: raise ValueError("Batch size must be positive integer.") self._batch_size = value @property def backoff_factor(self): return self._backoff_factor @backoff_factor.setter def backoff_factor(self, value): if not isinstance(value, (int, float)): raise TypeError( "Backoff factor must be int or float. Given type {0}".format( type(value))) self._backoff_factor = value @property
Apache License 2.0
jmchilton/galaxy-central
galaxy/datatypes/data.py
Text.delete
python
def delete(self): obj.DBObj.delete(self) try: os.remove(self.file_name) except OSError, e: log.critical('%s delete error %s' % (self.__class__.__name__, e))
Remove the file that corresponds to this data
https://github.com/jmchilton/galaxy-central/blob/31e2fd3a32b06ddfba06ae5b044efdce1d93f08c/galaxy/datatypes/data.py#L74-L80
import logging, os, sys, time, sets, tempfile from galaxy import util from cgi import escape log = logging.getLogger(__name__) DATA_NEW, DATA_OK, DATA_FAKE = 'new', 'ok', 'fake' class Data( object ): def set_peek( self, dataset ): dataset.peek = '' dataset.blurb = 'data' def init_meta( self, dataset ): pass def missing_meta( self, dataset): return False def bed_viewport( self, dataset ): raise Exception( "'bed_viewport' not supported for this datatype" ) def as_bedfile( self, dataset ): raise Exception( "'as_bedfile' not supported for this datatype" ) def display_peek(self, dataset): try: return escape(dataset.peek) except: return "peek unavailable" def display_name(self, dataset): try: return escape(dataset.name) except: return "name unavailable" def display_info(self, dataset): try: return escape(dataset.info) except: return "info unavailable" def get_ucsc_sites(self, dataset): return util.get_ucsc_by_build(dataset.dbkey) class Text( Data ): def write_from_stream(self, stream): fd, temp_name = tempfile.mkstemp() while 1: chunk = stream.read(1048576) if not chunk: break os.write(fd, chunk) os.close(fd) fp = open(self.file_name, 'wt') for line in file(temp_name, "U"): line = line.strip() + '\n' fp.write(line) fp.close() def set_raw_data(self, data): fd, temp_name = tempfile.mkstemp() os.write(fd, data) os.close(fd) fp = open(self.file_name, 'wt') for line in file(temp_name, "U"): line = line.strip() + '\n' fp.write(line) fp.close() os.remove( temp_name )
MIT License
google/uncertainty-baselines
uncertainty_baselines/datasets/genomics_ood.py
GenomicsOodDataset.__init__
python
def __init__(self, split: str, shuffle_buffer_size: Optional[int] = None, num_parallel_parser_calls: int = 64, eval_filter_class_id: int = -1, data_mode: str = 'ind', data_dir: Optional[str] = None, is_training: Optional[bool] = None, validation_percent: Optional[float] = None, normalize_by_cifar: Optional[bool] = None): del validation_percent del normalize_by_cifar if data_dir is None: builder = tfds.builder('genomics_ood') data_dir = builder.data_dir super().__init__( name='genomics_ood', dataset_builder=_GenomicsOodDatasetBuilder(data_dir, data_mode), split=split, is_training=is_training, shuffle_buffer_size=shuffle_buffer_size, num_parallel_parser_calls=num_parallel_parser_calls, download_data=False)
Create an Genomics OOD tf.data.Dataset builder. Args: split: a dataset split, either a custom tfds.Split or one of the tfds.Split enums [TRAIN, VALIDAITON, TEST] or their lowercase string names. shuffle_buffer_size: the number of example to use in the shuffle buffer for tf.data.Dataset.shuffle(). num_parallel_parser_calls: the number of parallel threads to use while preprocessing in tf.data.Dataset.map(). eval_filter_class_id: evalulate inputs from a particular class only. data_mode: either 'ind' or 'ood' to decide whether to read in-distribution data or out-of-domain data. data_dir: path to a directory containing the Genomics OOD dataset, with filenames train-*-of-*', 'validate.tfr', 'test.tfr'. is_training: Whether or not the given `split` is the training split. Only required when the passed split is not one of ['train', 'validation', 'test', tfds.Split.TRAIN, tfds.Split.VALIDATION, tfds.Split.TEST]. validation_percent: the percent of the training set to use as a validation set. It is not used. normalize_by_cifar: Whether normalize SVHN by CIFAR statistics. It is not used.
https://github.com/google/uncertainty-baselines/blob/d37c17c4b08a88d6546bbf299b59127a03398404/uncertainty_baselines/datasets/genomics_ood.py#L136-L182
import os from typing import Dict, Optional import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds from uncertainty_baselines.datasets import base _NUM_TRAIN = 100000 * 10 _NUM_VAL = 10000 * 10 _NUM_TEST = 10000 * 10 _TRAIN_FILEPATTERN = 'genomics_ood-train.tfrecord*' _VAL_FILEPATTERN = 'genomics_ood-validation.tfrecord*' _TEST_FILEPATTERN = 'genomics_ood-test.tfrecord*' _VAL_OOD_FILEPATTERN = 'genomics_ood-validation_ood.tfrecord*' _TEST_OOD_FILEPATTERN = 'genomics_ood-test_ood.tfrecord*' def _tfrecord_filepattern(split, data_mode): if split == tfds.Split.TRAIN and data_mode == 'ind': return _TRAIN_FILEPATTERN elif split == tfds.Split.VALIDATION and data_mode == 'ind': return _VAL_FILEPATTERN elif split == tfds.Split.TEST and data_mode == 'ind': return _TEST_FILEPATTERN elif split == tfds.Split.VALIDATION and data_mode == 'ood': return _VAL_OOD_FILEPATTERN elif split == tfds.Split.TEST and data_mode == 'ood': return _TEST_OOD_FILEPATTERN else: raise ValueError( 'No such a combination of split={} and data_mode={}'.format( split, data_mode)) class _GenomicsOodDatasetBuilder(tfds.core.DatasetBuilder): VERSION = tfds.core.Version('1.0.0') RELEASE_NOTES = { '1.0.0': 'Initial release.', } def __init__(self, data_dir, data_mode, **kwargs): super().__init__( data_dir=data_dir, **kwargs) self._data_dir = data_dir self._data_mode = data_mode def _download_and_prepare(self, dl_manager, download_config=None): raise NotImplementedError( 'Must provide a data_dir with the files already downloaded to.') def _as_dataset( self, split: tfds.Split, decoders=None, read_config=None, shuffle_files=False) -> tf.data.Dataset: del decoders del read_config del shuffle_files file_pattern = _tfrecord_filepattern(split, self._data_mode) file_list = tf.io.gfile.glob(os.path.join(self._data_dir, file_pattern)) dataset = tf.data.TFRecordDataset(file_list) return dataset def _info(self) -> tfds.core.DatasetInfo: features = { 'seq': tfds.features.Tensor(shape=[], dtype=tf.string), 'label': tfds.features.ClassLabel(num_classes=10), 'seq_info': tfds.features.Tensor(shape=(None,), dtype=tf.string), 'domain': tfds.features.Tensor(shape=(None,), dtype=tf.string), } info = tfds.core.DatasetInfo( builder=self, description='Genomics OOD dataset.', features=tfds.features.FeaturesDict(features), metadata=tfds.core.MetadataDict()) split_infos = [ tfds.core.SplitInfo( name=tfds.Split.VALIDATION, shard_lengths=[_NUM_VAL], num_bytes=0, ), tfds.core.SplitInfo( name=tfds.Split.TEST, shard_lengths=[_NUM_TEST], num_bytes=0, ), tfds.core.SplitInfo( name=tfds.Split.TRAIN, shard_lengths=[_NUM_TRAIN], num_bytes=0, ), ] split_dict = tfds.core.SplitDict( split_infos, dataset_name='__genomics_ood_dataset_builder') info.set_splits(split_dict) return info class GenomicsOodDataset(base.BaseDataset):
Apache License 2.0
python-useful-helpers/exec-helpers
exec_helpers/subprocess.py
_SubprocessExecuteContext.__enter__
python
def __enter__(self) -> SubprocessExecuteAsyncResult: started = datetime.datetime.utcnow() self.__process = subprocess.Popen( args=self.command, stdout=subprocess.PIPE if self.open_stdout else subprocess.DEVNULL, stderr=subprocess.PIPE if self.open_stderr else subprocess.DEVNULL, stdin=subprocess.PIPE, shell=True, cwd=self.__cwd, env=self.__env, universal_newlines=False, **_subprocess_helpers.subprocess_kw, ) process = self.__process.__enter__() if self.stdin is not None: if process.stdin is None: self.logger.warning("STDIN pipe is not set, but STDIN data is available to send.") else: try: process.stdin.write(self.stdin) except BrokenPipeError: self.logger.warning("STDIN Send failed: broken PIPE") except OSError as exc: if exc.errno == errno.EINVAL: self.logger.warning("STDIN Send failed: closed PIPE") else: _subprocess_helpers.kill_proc_tree(process.pid) process.kill() raise try: process.stdin.close() except BrokenPipeError: self.logger.warning("STDIN Send failed: broken PIPE") except OSError as exc: if exc.errno != errno.EINVAL: process.kill() raise return SubprocessExecuteAsyncResult( interface=process, stdin=None, stderr=process.stderr, stdout=process.stdout, started=started, )
Context manager enter. :return: raw execution information :rtype: SshExecuteAsyncResult :raises OSError: stdin write failed/stdin close failed Command is executed only in context manager to be sure, that everything will be cleaned up properly.
https://github.com/python-useful-helpers/exec-helpers/blob/3e0adfa7dded72ac1c9c93bd88db070f4c9050b6/exec_helpers/subprocess.py#L176-L234
from __future__ import annotations import concurrent.futures import copy import datetime import errno import logging import os import pathlib import subprocess import typing import warnings from exec_helpers import api from exec_helpers import constants from exec_helpers import exceptions from exec_helpers import exec_result from exec_helpers import proc_enums from . import _log_templates from . import _subprocess_helpers if typing.TYPE_CHECKING: import types from exec_helpers.api import CalledProcessErrorSubClassT from exec_helpers.api import CommandT from exec_helpers.api import ErrorInfoT from exec_helpers.api import ExpectedExitCodesT from exec_helpers.api import LogMaskReT from exec_helpers.api import OptionalStdinT from exec_helpers.api import OptionalTimeoutT __all__ = ("Subprocess", "SubprocessExecuteAsyncResult", "EnvT", "CwdT") EnvT = typing.Optional[ typing.Union[typing.Mapping[bytes, typing.Union[bytes, str]], typing.Mapping[str, typing.Union[bytes, str]]] ] CwdT = typing.Optional[typing.Union[str, bytes, pathlib.Path]] _OptionalIOBytes = typing.Optional[typing.IO[bytes]] class SubprocessExecuteAsyncResult(api.ExecuteAsyncResult): __slots__ = () @property def interface(self) -> subprocess.Popen[bytes]: return super().interface @property def stdin(self) -> _OptionalIOBytes: warnings.warn("stdin access deprecated: FIFO is often closed on execution and direct access is not expected.") return super().stdin @property def stderr(self) -> _OptionalIOBytes: return super().stderr @property def stdout(self) -> _OptionalIOBytes: return super().stdout class _SubprocessExecuteContext(api.ExecuteContext, typing.ContextManager[SubprocessExecuteAsyncResult]): __slots__ = ("__cwd", "__env", "__process") def __init__( self, *, command: str, stdin: typing.Optional[bytes] = None, open_stdout: bool = True, open_stderr: bool = True, cwd: CwdT = None, env: EnvT = None, logger: logging.Logger, **kwargs: typing.Any, ) -> None: super().__init__( command=command, stdin=stdin, open_stdout=open_stdout, open_stderr=open_stderr, logger=logger, **kwargs, ) self.__cwd = cwd self.__env = env self.__process: typing.Optional[subprocess.Popen[bytes]] = None def __repr__(self) -> str: return ( f"<Subprocess().open_execute_context(" f"command={self.command!r}, " f"stdin={self.stdin!r}, " f"open_stdout={self.open_stdout!r}, " f"open_stderr={self.open_stderr!r}, " f"cwd={self.__cwd!r}, " f"env={self.__env!r}, " f"logger={self.logger!r}) " f"at {id(self)}>" )
Apache License 2.0
idealo/imagededup
imagededup/handlers/metrics/classification.py
_make_positive_duplicate_pairs
python
def _make_positive_duplicate_pairs(ground_truth: Dict, retrieved: Dict) -> List[Tuple]: pairs = [] for mapping in [ground_truth, retrieved]: valid_pairs = [] for k, v in mapping.items(): valid_pairs.extend(list(zip([k]*len(v), v))) pairs.append(_get_unique_ordered_tuples(valid_pairs)) return pairs[0], pairs[1]
Given ground_truth and retrieved dictionary, generate all unique positive pairs.
https://github.com/idealo/imagededup/blob/3465540cc5c8fdf9254aff76069e28641dfc515f/imagededup/handlers/metrics/classification.py#L36-L49
import itertools from typing import Dict, List, Tuple import numpy as np from sklearn.metrics import ( classification_report, precision_score, recall_score, precision_recall_fscore_support, ) from imagededup.utils.logger import return_logger logger = return_logger(__name__) def _get_unique_ordered_tuples(unique_tuples: List[Tuple]) -> List[Tuple]: return list(set([tuple(sorted(i)) for i in unique_tuples])) def _make_all_unique_possible_pairs(ground_truth_dict: Dict) -> List[Tuple]: all_files = list(ground_truth_dict.keys()) all_tuples = [i for i in itertools.product(all_files, all_files) if i[0] != i[1]] return _get_unique_ordered_tuples(all_tuples)
Apache License 2.0
qiskit/qiskit-aqua
qiskit/aqua/operators/legacy/common.py
evolution_instruction
python
def evolution_instruction(pauli_list, evo_time, num_time_slices, controlled=False, power=1, use_basis_gates=True, shallow_slicing=False, barrier=False): if not isinstance(power, int) or power < 1: raise AquaError("power must be an integer and greater or equal to 1.") state_registers = QuantumRegister(pauli_list[0][1].num_qubits) if controlled: inst_name = 'Controlled-Evolution^{}'.format(power) ancillary_registers = QuantumRegister(1) qc_slice = QuantumCircuit(state_registers, ancillary_registers, name=inst_name) else: inst_name = 'Evolution^{}'.format(power) qc_slice = QuantumCircuit(state_registers, name=inst_name) cnot_qubit_pairs = [None] * len(pauli_list) top_xyz_pauli_indices = [-1] * len(pauli_list) for pauli_idx, pauli in enumerate(reversed(pauli_list)): n_qubits = pauli[1].num_qubits nontrivial_pauli_indices = [] for qubit_idx in range(n_qubits): if not pauli[1].z[qubit_idx] and not pauli[1].x[qubit_idx]: continue if cnot_qubit_pairs[pauli_idx] is None: nontrivial_pauli_indices.append(qubit_idx) if pauli[1].x[qubit_idx]: if not pauli[1].z[qubit_idx]: if use_basis_gates: qc_slice.h(state_registers[qubit_idx]) else: qc_slice.h(state_registers[qubit_idx]) elif pauli[1].z[qubit_idx]: if use_basis_gates: qc_slice.u(pi / 2, -pi / 2, pi / 2, state_registers[qubit_idx]) else: qc_slice.rx(pi / 2, state_registers[qubit_idx]) elif pauli[1].z[qubit_idx] and not pauli[1].x[qubit_idx]: pass else: raise ValueError('Unrecognized pauli: {}'.format(pauli[1])) if nontrivial_pauli_indices: top_xyz_pauli_indices[pauli_idx] = nontrivial_pauli_indices[-1] if cnot_qubit_pairs[pauli_idx] is None: cnot_qubit_pairs[pauli_idx] = list(zip( sorted(nontrivial_pauli_indices)[:-1], sorted(nontrivial_pauli_indices)[1:] )) for pair in cnot_qubit_pairs[pauli_idx]: qc_slice.cx(state_registers[pair[0]], state_registers[pair[1]]) if top_xyz_pauli_indices[pauli_idx] >= 0: if isinstance(evo_time, (Parameter, ParameterExpression)): lam = 2.0 * pauli[0] / num_time_slices lam = lam.real if lam.imag == 0 else lam lam = lam * evo_time else: lam = (2.0 * pauli[0] * evo_time / num_time_slices).real if not controlled: if use_basis_gates: qc_slice.p(lam, state_registers[top_xyz_pauli_indices[pauli_idx]]) else: qc_slice.rz(lam, state_registers[top_xyz_pauli_indices[pauli_idx]]) else: if use_basis_gates: qc_slice.p(lam / 2, state_registers[top_xyz_pauli_indices[pauli_idx]]) qc_slice.cx(ancillary_registers[0], state_registers[top_xyz_pauli_indices[pauli_idx]]) qc_slice.p(-lam / 2, state_registers[top_xyz_pauli_indices[pauli_idx]]) qc_slice.cx(ancillary_registers[0], state_registers[top_xyz_pauli_indices[pauli_idx]]) else: qc_slice.crz(lam, ancillary_registers[0], state_registers[top_xyz_pauli_indices[pauli_idx]]) for pair in reversed(cnot_qubit_pairs[pauli_idx]): qc_slice.cx(state_registers[pair[0]], state_registers[pair[1]]) for qubit_idx in range(n_qubits): if pauli[1].x[qubit_idx]: if not pauli[1].z[qubit_idx]: if use_basis_gates: qc_slice.h(state_registers[qubit_idx]) else: qc_slice.h(state_registers[qubit_idx]) elif pauli[1].z[qubit_idx]: if use_basis_gates: qc_slice.u(-pi / 2, -pi / 2, pi / 2, state_registers[qubit_idx]) else: qc_slice.rx(-pi / 2, state_registers[qubit_idx]) if shallow_slicing: logger.info('Under shallow slicing mode, the qc.data reference is repeated shallowly. ' 'Thus, changing gates of one slice of the output circuit might affect ' 'other slices.') if barrier: qc_slice.barrier(state_registers) qc_slice.data *= (num_time_slices * power) qc = qc_slice else: qc = QuantumCircuit(*qc_slice.qregs, name=inst_name) for _ in range(num_time_slices * power): qc.append(qc_slice, qc.qubits) if barrier: qc.barrier(state_registers) return qc.to_instruction()
Construct the evolution circuit according to the supplied specification. Args: pauli_list (list([[complex, Pauli]])): The list of pauli terms corresponding to a single time slice to be evolved evo_time (Union(complex, float, Parameter, ParameterExpression)): The evolution time num_time_slices (int): The number of time slices for the expansion controlled (bool, optional): Controlled circuit or not power (int, optional): The power to which the unitary operator is to be raised use_basis_gates (bool, optional): boolean flag for indicating only using basis gates when building circuit. shallow_slicing (bool, optional): boolean flag for indicating using shallow qc.data reference repetition for slicing barrier (bool, optional): whether or not add barrier for every slice Returns: Instruction: The Instruction corresponding to specified evolution. Raises: AquaError: power must be an integer and greater or equal to 1 ValueError: Unrecognized pauli
https://github.com/qiskit/qiskit-aqua/blob/5ccf0e20129880e78a57f2f78c59b9a362ebb208/qiskit/aqua/operators/legacy/common.py#L230-L384
import copy import logging import numpy as np from qiskit.quantum_info import Pauli from qiskit import QuantumCircuit, QuantumRegister from qiskit.qasm import pi from qiskit.circuit import Parameter, ParameterExpression from qiskit.aqua import AquaError logger = logging.getLogger(__name__) def pauli_measurement(circuit, pauli, qr, cr, barrier=False): num_qubits = pauli.num_qubits for qubit_idx in range(num_qubits): if pauli.x[qubit_idx]: if pauli.z[qubit_idx]: circuit.sdg(qr[qubit_idx]) circuit.h(qr[qubit_idx]) else: circuit.h(qr[qubit_idx]) if barrier: circuit.barrier(qr[qubit_idx]) circuit.measure(qr[qubit_idx], cr[qubit_idx]) return circuit def measure_pauli_z(data, pauli): observable = 0.0 num_shots = sum(data.values()) p_z_or_x = np.logical_or(pauli.z, pauli.x) for key, value in data.items(): bitstr = np.asarray(list(key))[::-1].astype(int).astype(bool) sign = -1.0 if np.logical_xor.reduce(np.logical_and(bitstr, p_z_or_x)) else 1.0 observable += sign * value observable /= num_shots return observable def covariance(data, pauli_1, pauli_2, avg_1, avg_2): cov = 0.0 num_shots = sum(data.values()) if num_shots == 1: return cov p1_z_or_x = np.logical_or(pauli_1.z, pauli_1.x) p2_z_or_x = np.logical_or(pauli_2.z, pauli_2.x) for key, value in data.items(): bitstr = np.asarray(list(key))[::-1].astype(int).astype(bool) sign_1 = -1.0 if np.logical_xor.reduce(np.logical_and(bitstr, p1_z_or_x)) else 1.0 sign_2 = -1.0 if np.logical_xor.reduce(np.logical_and(bitstr, p2_z_or_x)) else 1.0 cov += (sign_1 - avg_1) * (sign_2 - avg_2) * value cov /= (num_shots - 1) return cov def row_echelon_F2(matrix_in): size = matrix_in.shape for i in range(size[0]): pivot_index = 0 for j in range(size[1]): if matrix_in[i, j] == 1: pivot_index = j break for k in range(size[0]): if k != i and matrix_in[k, pivot_index] == 1: matrix_in[k, :] = np.mod(matrix_in[k, :] + matrix_in[i, :], 2) matrix_out_temp = copy.deepcopy(matrix_in) indices = [] matrix_out = np.zeros(size) for i in range(size[0] - 1): if np.array_equal(matrix_out_temp[i, :], np.zeros(size[1])): indices.append(i) for row in np.sort(indices)[::-1]: matrix_out_temp = np.delete(matrix_out_temp, (row), axis=0) matrix_out[0:size[0] - len(indices), :] = matrix_out_temp matrix_out = matrix_out.astype(int) return matrix_out def kernel_F2(matrix_in): size = matrix_in.shape kernel = [] matrix_in_id = np.vstack((matrix_in, np.identity(size[1]))) matrix_in_id_ech = (row_echelon_F2(matrix_in_id.transpose())).transpose() for col in range(size[1]): if (np.array_equal(matrix_in_id_ech[0:size[0], col], np.zeros(size[0])) and not np.array_equal(matrix_in_id_ech[size[0]:, col], np.zeros(size[1]))): kernel.append(matrix_in_id_ech[size[0]:, col]) return kernel def suzuki_expansion_slice_pauli_list(pauli_list, lam_coef, expansion_order): if expansion_order == 1: half = [[lam_coef / 2 * c, p] for c, p in pauli_list] return half + list(reversed(half)) else: p_k = (4 - 4 ** (1 / (2 * expansion_order - 1))) ** -1 side_base = suzuki_expansion_slice_pauli_list( pauli_list, lam_coef * p_k, expansion_order - 1 ) side = side_base * 2 middle = suzuki_expansion_slice_pauli_list( pauli_list, lam_coef * (1 - 4 * p_k), expansion_order - 1 ) return side + middle + side def check_commutativity(op_1, op_2, anti=False): com = op_1 * op_2 - op_2 * op_1 if not anti else op_1 * op_2 + op_2 * op_1 com.simplify() return bool(com.is_empty())
Apache License 2.0
google-research/language
language/relation_learning/data/fewrel.py
FewRelProcessor.process_file
python
def process_file(self, file_name): data = self._read_json(os.path.join(file_name)) num_classes = len(data[0]["meta_train"]) num_examples_per_class = len(data[0]["meta_train"][0]) tf.logging.info("Number of tests: %s, classes: %s, examples per class %s", len(data), num_classes, num_examples_per_class) output = [] for entry in data: candidate_sets = entry["meta_train"] query = entry["meta_test"] query_example = self._json_entry_to_example(query, -1, -1) output_sets = {} for (candidate_index, candidate_set) in enumerate(candidate_sets): output_candidate_set = [] for (example_index, candidate) in enumerate(candidate_set): relation_input_example = self._json_entry_to_example( candidate, candidate_index, example_index) output_candidate_set.append(relation_input_example) output_sets[candidate_index] = output_candidate_set output.append( FewShotRelationInputExample( guid="", sets=output_sets, query=query_example)) return output, num_classes, num_examples_per_class
Gets a collection of `RelationInputExample`s for the train set.
https://github.com/google-research/language/blob/240cd2a1fd0307c6822b6f1f6c2abf1349a5a4da/language/relation_learning/data/fewrel.py#L165-L197
from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os from absl import flags import tensorflow.compat.v1 as tf FLAGS = flags.FLAGS _RESERVED_WORDPIECES = [ "[PAD]", "[CLS]", "[SEP]", "[MASK]", "[E1]", "[/E1]", "[E2]", "[/E2]" ] def reserved_wordpieces(): return _RESERVED_WORDPIECES def _tokenize_with_entity_markers(tokens, tokenizer, e1, e2): def tokenize(start, end): return tokenizer.tokenize(" ".join(tokens[start:end])) if e1[0] < e2[0]: return (tokenize(0, e1[0]) + ["[E1]"] + tokenize(e1[0], e1[1] + 1) + ["[/E1]"] + tokenize(e1[1] + 1, e2[0]) + ["[E2]"] + tokenize(e2[0], e2[1] + 1) + ["[/E2]"] + tokenize(e2[1] + 1, None)) else: return (tokenize(0, e2[0]) + ["[E2]"] + tokenize(e2[0], e2[1] + 1) + ["[/E2]"] + tokenize(e2[1] + 1, e1[0]) + ["[E1]"] + tokenize(e1[0], e1[1] + 1) + ["[/E1]"] + tokenize(e1[1] + 1, None)) class RelationInputExample(object): def __init__(self, guid, wordpieces, label, e1, e2): self.guid = guid self.wordpieces = wordpieces self.label = label self.e1 = e1 self.e2 = e2 def __str__(self): return "%s %s %s %s %s" % (self.guid, " ".join(self.wordpieces), self.label, self.e1, self.e2) class FewShotRelationInputExample(object): def __init__(self, guid, sets, query): self.guid = guid self.sets = sets self.query = query def __str__(self): out = "%s" % self.query for _, setlist in self.sets.iteritems(): for item in setlist: out += "%s" % item return out class FewRelProcessor(object): def __init__(self, tokenizer, max_seq_length, add_entity_markers=False): self._tokenizer = tokenizer self._max_seq_length = max_seq_length self._add_entity_markers = add_entity_markers @classmethod def _read_json(cls, intput_file): with tf.gfile.Open(intput_file, "r") as f: return json.load(f) def _json_entry_to_example(self, entry, candidate_index, example_index): text = " ".join(entry["tokens"]) e1_tokens = [entry["h"][2][0][0], entry["h"][2][0][-1]] e2_tokens = [entry["t"][2][0][0], entry["t"][2][0][-1]] e1 = [0, 0] e2 = [0, 0] if self._add_entity_markers: wordpieces = _tokenize_with_entity_markers( entry["tokens"], self._tokenizer, e1_tokens, e2_tokens) else: wordpieces = self._tokenizer.tokenize(text) if len(wordpieces) > self._max_seq_length - 2: tf.logging.info("[_create_entrys] Truncating sentence [%s] %s", len(wordpieces), " ".join(wordpieces).encode("utf-8")) wordpieces = wordpieces[0:(self._max_seq_length - 2)] wordpieces = ["[CLS]"] + wordpieces + ["[SEP]"] token_index = 0 cur_token_length = 0 for i, wordpiece in enumerate(wordpieces): if wordpiece in reserved_wordpieces(): continue if e1[0] == 0 and e1_tokens[0] == token_index: e1[0] = i if e1_tokens[1] == token_index: e1[1] = i if e2[0] == 0 and e2_tokens[0] == token_index: e2[0] = i if e2_tokens[1] == token_index: e2[1] = i cur_token_length += len(wordpiece) - 2 * int(wordpiece.startswith("##")) if cur_token_length == len(entry["tokens"][token_index]): token_index += 1 cur_token_length = 0 return RelationInputExample( guid="%s:%s" % (candidate_index, example_index), wordpieces=wordpieces, label=candidate_index, e1=e1, e2=e2)
Apache License 2.0
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/camera/august.py
AugustCamera.is_recording
python
def is_recording(self): return self._doorbell.has_subscription
Return true if the device is recording.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/camera/august.py#L48-L50
from datetime import timedelta import requests from homeassistant.components.august import DATA_AUGUST, DEFAULT_TIMEOUT from homeassistant.components.camera import Camera DEPENDENCIES = ['august'] SCAN_INTERVAL = timedelta(seconds=5) def setup_platform(hass, config, add_devices, discovery_info=None): data = hass.data[DATA_AUGUST] devices = [] for doorbell in data.doorbells: devices.append(AugustCamera(data, doorbell, DEFAULT_TIMEOUT)) add_devices(devices, True) class AugustCamera(Camera): def __init__(self, data, doorbell, timeout): super().__init__() self._data = data self._doorbell = doorbell self._timeout = timeout self._image_url = None self._image_content = None @property def name(self): return self._doorbell.device_name @property
MIT License
nlitsme/zipdump
urlstream.py
urlstream.clearrange
python
def clearrange(self): if hasattr(self.req, 'remove_header'): self.req.remove_header('Range') else: self.req.headers.pop('Range', None)
Remove Range header from request.
https://github.com/nlitsme/zipdump/blob/3526c8253171c963517a6bf0d3b415b4b899fe30/urlstream.py#L77-L84
import sys import re from errno import EINVAL, ENOENT from os import SEEK_SET, SEEK_CUR, SEEK_END if sys.version_info[0] == 3: import urllib.request from urllib.request import Request urllib2 = urllib.request import urllib.parse urllib2.quote = urllib.parse.quote else: import urllib2 from urllib2 import Request Request.urlopen = urllib2.urlopen debuglog = False def open(url, mode=None): m = re.match(r'(\w+://)([^/]+?)(?::([^/]+))?@(.*)', url) if m: url = m.group(1)+m.group(4) authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm() authinfo.add_password(None, url, m.group(2), m.group(3)) urllib2.install_opener(urllib2.build_opener(urllib2.HTTPBasicAuthHandler(authinfo))) m = re.match(r'^(\S+?://)(.*?)(\?.*)?$', url) if not m: print("unrecognized url: %s" % url) return method = m.group(1) basepath = urllib2.quote(m.group(2)) query = m.group(3) or "" return urlstream(Request(method + basepath + query)) class urlstream(object): def __init__(self, req): self.req = req self.absolutepos = 0 self.buffer = None self.bufferstart = None if debuglog: print("URL: %s" % req.get_full_url())
MIT License
esterni/pyracing
pyracing/client.py
Client.race_laps_driver
python
async def race_laps_driver( self, cust_id, subsession_id, sim_session_type=ct.SimSessionType.race.value ): payload = { 'subsessionid': subsession_id, 'simsessnum': sim_session_type, 'groupid': cust_id } url = ct.URL_LAPS_SINGLE response = await self._build_request(url, payload) return session_data.RaceLapsDriver(response.json())
Returns data for all laps completed of a single driver. sim_sess_id specifies the laps from practice, qual, or race.
https://github.com/esterni/pyracing/blob/fc75579edb0c689d62228ed511fd22dd62807d47/pyracing/client.py#L659-L676
from pyracing import constants as ct from pyracing import logger from pyracing.helpers import now_five_min_floor from pyracing.response_objects import ( career_stats, chart_data, historical_data, iracing_data, session_data, upcoming_events, league_data ) from .exceptions.authentication_error import AuthenticationError from datetime import datetime import httpx import time class Client: def __init__(self, username: str, password: str): self.username = username self.password = password self.session = httpx.AsyncClient() async def _authenticate(self): logger.info('Authenticating...') login_data = { 'username': self.username, 'password': self.password, 'utcoffset': round(abs(time.localtime().tm_gmtoff / 60)), 'todaysdate': '' } auth_response = await self.session.post(ct.URL_LOGIN2, data=login_data) if 'failedlogin' in str(auth_response.url): logger.warning( 'The login POST request was redirected to /failedlogin, ' 'indicating an authentication failure. If credentials are ' 'correct, check that a captcha is not required by manually ' 'visiting members.iracing.com') raise AuthenticationError('Login Failed', auth_response) else: logger.info('Login successful') async def _build_request(self, url, params): if not self.session.cookies.__bool__(): logger.info("No cookies in cookie jar.") await self._authenticate() logger.info(f'Request being sent to: {url} with params: {params}') response = await self.session.get( url, params=params, allow_redirects=False, timeout=10.0 ) logger.info(f'Request sent for URL: {response.url}') logger.info(f'Status code of response: {response.status_code}') logger.debug(f'Contents of the response object: {response.__dict__}') if response.is_error or response.is_redirect: logger.info( 'Request was redirected, indicating that the cookies are ' 'invalid. Initiating authentication and retrying the request.' ) await self._authenticate() return await self._build_request(url, params) return response async def active_op_counts( self, count_max=250, include_empty='n', cust_id=None ): url = ct.URL_ACTIVEOP_COUNT payload = { 'custid': cust_id, 'maxcount': count_max, 'include_empty': include_empty, 'excludeLite': None } response = await self._build_request(url, payload) return [upcoming_events.OpenPractice(x) for x in response.json()["d"]] async def all_subsessions(self, subsession_id): payload = {'subsessionid': subsession_id} url = ct.URL_ALL_SUBSESSIONS response = await self._build_request(url, payload) return [x['subsessionid'] for x in response.json()] async def car_class(self, car_class_id=0): payload = {'carclassid': car_class_id} url = ct.URL_CAR_CLASS response = await self._build_request(url, payload) return iracing_data.CarClass(response.json()['carclass']) async def career_stats(self, cust_id): payload = {'custid': cust_id} url = ct.URL_CAREER_STATS response = await self._build_request(url, payload) if not response.json(): return [] return [career_stats.CareerStats(x) for x in response.json()] async def current_seasons( self, only_active=True, series_name_short=True, cat_id=True, season_id=True, year=True, quarter=True, series_id=True, active=True, license_eligible=True, is_lite=True, car_classes=True, tracks=True, start=True, end=True, cars=True, race_week=True, category=True, series_lic_group_id=True, car_id=True ): field_dict = { 'year': year, 'quarter': quarter, 'seriesshortname': series_name_short, 'seriesid': series_id, 'active': active, 'catid': cat_id, 'licenseeligible': license_eligible, 'islite': is_lite, 'carclasses': car_classes, 'tracks': tracks, 'start': start, 'end': end, 'cars': cars, 'raceweek': race_week, 'category': category, 'serieslicgroupid': series_lic_group_id, 'carid': car_id, 'seasonid': season_id, } key_list = [key for key in field_dict if field_dict.get(key)] key_list = ','.join(key_list) payload = { 'onlyActive': 1 if only_active else 0, 'fields': key_list } url = ct.URL_CURRENT_SEASONS response = await self._build_request(url, payload) return [iracing_data.Season(x) for x in response.json()] async def driver_stats( self, search='null', country='null', category=ct.Category.road.value, class_low=None, class_high=None, irating_low=None, irating_high=None, ttrating_low=None, ttrating_high=None, starts_avg_low=None, starts_avg_high=None, finish_avg_low=None, finish_avg_high=None, points_avg_low=None, points_avg_high=None, inc_avg_low=None, inc_avg_high=None, result_num_low=1, result_num_high=25, sort=ct.Sort.irating.value, order=ct.Sort.descending.value, active=1, friend=None, watched=None, recent=None, cust_id=None ): payload = { 'search': search, 'friend': friend, 'watched': watched, 'recent': recent, 'country': country, 'category': category, 'classlow': class_low, 'classhigh': class_high, 'iratinglow': irating_low, 'iratinghigh': irating_high, 'ttratinglow': ttrating_low, 'ttratinghigh': ttrating_high, 'avgstartlow': starts_avg_low, 'avgstarthigh': starts_avg_high, 'avgfinishlow': finish_avg_low, 'avgfinishhigh': finish_avg_high, 'avgpointslow': points_avg_low, 'avgpointshigh': points_avg_high, 'avgincidentslow': inc_avg_low, 'avgincidentshigh': inc_avg_high, 'custid': cust_id, 'lowerbound': result_num_low, 'upperbound': result_num_high, 'sort': sort, 'order': order, 'active': active } url = ct.URL_DRIVER_STATS response = await self._build_request(url, payload) return [historical_data.DriverStats(x) for x in response.json()["d"]["r"]] async def event_results( self, cust_id, quarter, show_races=1, show_quals=None, show_tts=None, show_ops=None, show_official=1, show_unofficial=None, show_rookie=1, show_class_d=1, show_class_c=1, show_class_b=1, show_class_a=1, show_pro=1, show_prowc=1, result_num_low=1, result_num_high=25, sort=ct.Sort.start_time.value, order=ct.Sort.descending.value, data_format='json', category=ct.Category.road.value, year=datetime.today().year, race_week=None, track_id=None, car_class=None, car_id=None, start_low=None, start_high=None, finish_low=None, finish_high=None, incidents_low=None, incidents_high=None, points_champ_low=None, points_champ_high=None ): payload = { 'custid': cust_id, 'showraces': show_races, 'showquals': show_quals, 'showtts': show_tts, 'showops': show_ops, 'showofficial': show_official, 'showunofficial': show_unofficial, 'showrookie': show_rookie, 'showclassd': show_class_d, 'showclassc': show_class_c, 'showclassb': show_class_b, 'showclassa': show_class_a, 'showpro': show_pro, 'showprowc': show_prowc, 'lowerbound': result_num_low, 'upperbound': result_num_high, 'sort': sort, 'order': order, 'format': data_format, 'category[]': category, 'seasonyear': year, 'seasonquarter': quarter, 'raceweek': race_week, 'trackid': track_id, 'carclassid': car_class, 'carid': car_id, 'start_low': start_low, 'start_high': start_high, 'finish_low': finish_low, 'finish_high': finish_high, 'incidents_low': incidents_low, 'incidents_high': incidents_high, 'champpoints_low': points_champ_low, 'champpoints_high': points_champ_high } url = ct.URL_RESULTS response = await self._build_request(url, payload) event_result_dict = response.json()['d'] if event_result_dict: return [historical_data.EventResults(x) for x in response.json()["d"]["r"]] else: return [] async def irating(self, cust_id, category) -> chart_data.ChartData[chart_data.IRating]: chart_type = ct.ChartType.irating.value response = await self._stats_chart(cust_id, category, chart_type) ir_list = [] for irating in response.json(): ir_list.append( chart_data.IRating(timestamp=irating[0], value=irating[1]) ) return chart_data.ChartData( category=category, type=chart_type, content=ir_list) async def ttrating(self, cust_id, category) -> chart_data.ChartData[chart_data.TTRating]: chart_type = ct.ChartType.ttrating.value response = await self._stats_chart(cust_id, category, chart_type) ttrating_list = [] for ttrating in response.json(): ttrating_list.append( chart_data.TTRating(timestamp=ttrating[0], value=ttrating[1]) ) return chart_data.ChartData( category=category, type=chart_type, content=ttrating_list) async def license_class(self, cust_id, category) -> chart_data.ChartData[chart_data.LicenseClass]: chart_type = ct.ChartType.license_class.value response = await self._stats_chart(cust_id, category, chart_type) license_class_list = [] for license_class in response.json(): license_class_list.append( chart_data.LicenseClass( timestamp=license_class[0], license_number=license_class[1] )) return chart_data.ChartData( category=category, type=chart_type, content=license_class_list) async def _stats_chart(self, cust_id, category, chart_type): payload = { 'custId': cust_id, 'catId': category, 'chartType': chart_type } url = ct.URL_STATS_CHART return await self._build_request(url, payload) async def last_races_stats(self, cust_id): payload = {'custid': cust_id} url = ct.URL_LASTRACE_STATS response = await self._build_request(url, payload) if not response.json(): return [] return [career_stats.LastRacesStats(x) for x in response.json()] async def last_series(self, cust_id): payload = {'custid': cust_id} url = ct.URL_LAST_SERIES response = await self._build_request(url, payload) return [career_stats.LastSeries(x) for x in response.json()] async def member_cars_driven(self, cust_id): payload = {'custid': cust_id} url = ct.URL_CARS_DRIVEN response = await self._build_request(url, payload) return response.json() async def member_division(self, cust_id, season_id): payload = { 'seasonid': season_id, 'custid': cust_id, 'pointstype': 'race' } url = ct.URL_MEM_DIVISION response = await self._build_request(url, payload) return [iracing_data.MemberDivision(x) for x in response.json()] async def member_subsession_id_from_session(self, cust_id, session_id): payload = {'custid': cust_id, 'sessionID': session_id} url = ct.URL_MEM_SUBSESSID response = await self._build_request(url, payload) return response.json() async def driver_status(self, cust_id): payload = {'custId': cust_id} url = ct.URL_DRIVER_STATUS response = await self._build_request(url, payload) return iracing_data.DriverStatus(response.json()) async def next_event( self, series_id, event_type=ct.EventType.race.value, date=now_five_min_floor() ): payload = { 'seriesID': series_id, 'evtType': event_type, 'date': date } url = ct.URL_NEXT_EVENT response = await self._build_request(url, payload) return upcoming_events.NextEvent(response.json()) async def next_session_times(self, season_id): payload = {'season': season_id} url = ct.URL_SESSION_TIMES response = await self._build_request(url, payload) return [upcoming_events.NextSessionTimes(x) for x in response.json()["d"]["r"]] async def personal_bests(self, cust_id, car_id): payload = {'custid': cust_id, 'carid': car_id} url = ct.URL_PERSONAL_BESTS response = await self._build_request(url, payload) return [career_stats.PersonalBests(x) for x in response.json()] async def private_results( self, cust_id, start_time_lower, start_time_upper, result_num_low=1, result_num_high=25, sort=ct.Sort.session_name.value, order=ct.Sort.ascending.value ): payload = { 'participant_custid': cust_id, 'start_time_lowerbound': start_time_lower, 'start_time_upperbound': start_time_upper, 'lowerbound': result_num_low, 'upperbound': result_num_high, 'sort': sort, 'order': order } url = ct.URL_PRIVATE_RESULTS response = await self._build_request(url, payload) return [historical_data.PrivateResults(x) for x in response.json()['rows']] async def race_guide( self, rookie=None, class_d=None, class_c=None, class_b=None, class_a=None, class_pro=None, class_prowc=None, oval=None, road=None, dirt_oval=None, dirt_road=None, fixed=None, multiclass=None, meets_mpr=None, populated=None, eligible=None, official=None, time=now_five_min_floor() ): payload = { 'at': time, 'showRookie': rookie, 'showClassD': class_d, 'showClassC': class_c, 'showClassB': class_b, 'showClassA': class_a, 'showPro': class_pro, 'showProWC': class_prowc, 'showOval': oval, 'showRoad': road, 'showDirtOval': dirt_oval, 'showDirtRoad': dirt_road, 'hideNotFixedSetup': fixed, 'hideNotMultiClass': multiclass, 'meetsMPR': meets_mpr, 'hideUnpopulated': populated, 'hideIneligible': eligible, 'showOfficial': official } url = ct.URL_RACEGUIDE response = await self._build_request(url, payload) return [upcoming_events.RaceGuide(x) for x in response.json()['series']] async def race_laps_all( self, subsession_id, car_class_id=None, sim_session_type=ct.SimSessionType.race.value ): payload = { 'subsessionid': subsession_id, 'carclassid': car_class_id, 'simsesnum': sim_session_type } url = ct.URL_LAPS_ALL response = await self._build_request(url, payload) return session_data.RaceLapsAll(response.json())
MIT License
jacekm-git/betboy
tools/odds_net/bb_engine.py
Database.__init__
python
def __init__(self, parent=None): Shared.__init__(self) self.mybase = sqlite3.connect(':memory:') self.relations_base = self.mybase.cursor() self.stop_action = 0 self.relations_base.execute('''CREATE TABLE results (id INTEGER PRIMARY KEY, date_txt txt, date_num FLOAT, home TEXT, away TEXT, gHomeEnd INTEGER, gAwayEnd INTEGER, odd_home FLOAT, odd_draw FLOAT, odd_away FLOAT fake TEXT NOT NULL DEFAULT "-")''') self.relations_base.execute('''CREATE TABLE league (id INTEGER PRIMARY KEY, team TEXT, matches INTEGER DEFAULT 000.0, matchesHome INTEGER DEFAULT 000.0, matchesAway INTEGER DEFAULT 000.0, points INTEGER DEFAULT 0, pointsHome INTEGER DEFAULT 0, pointsAway INTEGER DEFAULT 0, form INTEGER DEFAULT 000.0, formHome INTEGER DEFAULT 000.0, formAway INTEGER DEFAULT 000.0, pointsBB FLOAT DEFAULT 0.000, pointsBBHome FLOAT DEFAULT 000.0, pointsBBAway FLOAT DEFAULT 000.0, formBB FLOAT DEFAULT 000.0, formBBHome FLOAT DEFAULT 000.0, formBBAway FLOAT DEFAULT 000.0, wins INTEGER DEFAULT 000.0, draws INTEGER DEFAULT 000.0, loses INTEGER DEFAULT 000.0, winhome INTEGER DEFAULT 000.0, drawhome INTEGER DEFAULT 000.0, losehome INTEGER DEFAULT 000.0, winaway INTEGER DEFAULT 000.0, drawaway INTEGER VARCHAR(2) NOT NULL DEFAULT 0, loseaway INTEGER VARCHAR(2) NOT NULL DEFAULT 0, goalsscored INTEGER NOT NULL DEFAULT 0, goalslost INTEGER NOT NULL DEFAULT 0, goalsscoredhome INTEGER NOT NULL DEFAULT 0, goalslosthome INTEGER NOT NULL DEFAULT 0, goalsscoredaway INTEGER NOT NULL DEFAULT 0, goalslostaway INTEGER NOT NULL DEFAULT 0, mowins FLOAT DEFAULT 0.0, moloses FLOAT DEFAULT 0.0, diffgwins FLOAT DEFAULT 0.0, diffgloses FLOAT DEFAULT 0.0, mowinsHome FLOAT DEFAULT 0.0, molosesHome FLOAT DEFAULT 0.0, diffgwinsHome FLOAT DEFAULT 0.0, diffglosesHome FLOAT DEFAULT 0.0, mowinsAway FLOAT DEFAULT 0.0, molosesAway FLOAT DEFAULT 0.0, diffgwinsAway FLOAT DEFAULT 0.0, diffglosesAway FLOAT DEFAULT 0.0, f1 INTEGER DEFAULT 000.0, f2 INTEGER DEFAULT 000.0, f3 INTEGER DEFAULT 000.0, f4 INTEGER DEFAULT 000.0, f1Home INTEGER DEFAULT 000.0, f2Home INTEGER DEFAULT 000.0, f1Away INTEGER DEFAULT 000.0, f2Away INTEGER DEFAULT 000.0, f1BB FLOAT DEFAULT 000.0, f2BB FLOAT DEFAULT 000.0, f3BB FLOAT DEFAULT 000.0, f4BB FLOAT DEFAULT 000.0, f1BBHome FLOAT DEFAULT 000.0, f2BBHome FLOAT DEFAULT 000.0, f1BBAway FLOAT DEFAULT 000.0, f2BBAway FLOAT DEFAULT 000.0, f1op TEXT, f2op TEXT, f3op TEXT, f4op TEXT, f1opHome TEXT, f2opHome TEXT, f1opAway TEXT, f2opAway TEXT, bts INTEGER DEFAULT 000.0, btsHome INTEGER DEFAULT 000.0, btsAway INTEGER DEFAULT 000.0, over25 INTEGER DEFAULT 000.0, over25Home INTEGER DEFAULT 000.0, over25Away INTEGER DEFAULT 000.0, under25 INTEGER DEFAULT 000.0, under25Home INTEGER DEFAULT 000.0, under25Away INTEGER DEFAULT 000.0, fake TEXT NOT NULL DEFAULT "-")''') self.relations_base.execute('''CREATE TABLE series (id INTEGER PRIMARY KEY, team TEXT, series_wins INTEGER NOT NULL DEFAULT 0, series_draws INTEGER NOT NULL DEFAULT 0, series_loses INTEGER NOT NULL DEFAULT 0, series_winshome INTEGER NOT NULL DEFAULT 0, series_drawshome INTEGER NOT NULL DEFAULT 0, series_loseshome INTEGER NOT NULL DEFAULT 0, series_winsaway INTEGER NOT NULL DEFAULT 0, series_drawsaway INTEGER NOT NULL DEFAULT 0, series_losesaway INTEGER NOT NULL DEFAULT 0, series_noloses INTEGER NOT NULL DEFAULT 0, series_noloseshome INTEGER NOT NULL DEFAULT 0, series_nolosesaway INTEGER NOT NULL DEFAULT 0, series_nowins INTEGER NOT NULL DEFAULT 0, series_nowinshome INTEGER NOT NULL DEFAULT 0, series_nowinsaway INTEGER NOT NULL DEFAULT 0, series_nodraws INTEGER NOT NULL DEFAULT 0, series_nodrawshome INTEGER NOT NULL DEFAULT 0, series_nodrawsaway INTEGER NOT NULL DEFAULT 0, series_bts INTEGER DEFAULT 000.0, series_btsHome INTEGER DEFAULT 000.0, series_btsAway INTEGER DEFAULT 000.0, series_over25 INTEGER DEFAULT 000.0, series_over25Home INTEGER DEFAULT 000.0, series_over25Away INTEGER DEFAULT 000.0, series_under25 INTEGER DEFAULT 000.0, series_under25Home INTEGER DEFAULT 000.0, series_under25Away INTEGER DEFAULT 000.0)''') self.relations_base.execute('''CREATE TABLE scaled (id INTEGER PRIMARY KEY, team TEXT, matches FLOAT DEFAULT 000.0, points FLOAT DEFAULT 000.0, pointsHome FLOAT DEFAULT 000.0, pointsAway FLOAT DEFAULT 000.0, pointsBB FLOAT DEFAULT 0.000, pointsBBHome FLOAT DEFAULT 000.0, pointsBBAway FLOAT DEFAULT 000.0, form FLOAT DEFAULT 000.0, formHome FLOAT DEFAULT 000.0, formAway FLOAT DEFAULT 000.0, formBB FLOAT DEFAULT 000.0, formBBHome FLOAT DEFAULT 000.0, formBBAway FLOAT DEFAULT 000.0, points_b FLOAT DEFAULT 000.0, pointsHome_b FLOAT DEFAULT 000.0, pointsAway_b FLOAT DEFAULT 000.0, pointsBB_b FLOAT DEFAULT 0.000, pointsBBHome_b FLOAT DEFAULT 000.0, pointsBBAway_b FLOAT DEFAULT 000.0, form_b FLOAT DEFAULT 000.0, formHome_b FLOAT DEFAULT 000.0, formAway_b FLOAT DEFAULT 000.0, formBB_b FLOAT DEFAULT 000.0, formBBHome_b FLOAT DEFAULT 000.0, formBBAway_b FLOAT DEFAULT 000.0, winhome FLOAT DEFAULT 000.0, drawhome FLOAT DEFAULT 000.0, losehome FLOAT DEFAULT 000.0, winaway FLOAT DEFAULT 000.0, drawaway FLOAT DEFAULT 000.0, loseaway FLOAT DEFAULT 000.0, winhome_b FLOAT DEFAULT 000.0, drawhome_b FLOAT DEFAULT 000.0, losehome_b FLOAT DEFAULT 000.0, winaway_b FLOAT DEFAULT 000.0, drawaway_b FLOAT DEFAULT 000.0, loseaway_b FLOAT DEFAULT 000.0, goalsscored FLOAT NOT NULL DEFAULT 0, goalslost FLOAT NOT NULL DEFAULT 0, goalsscoredhome FLOAT NOT NULL DEFAULT 0, goalslosthome FLOAT NOT NULL DEFAULT 0, goalsscoredaway FLOAT NOT NULL DEFAULT 0, goalslostaway FLOAT NOT NULL DEFAULT 0, goalsscored_b FLOAT NOT NULL DEFAULT 0, goalslost_b FLOAT NOT NULL DEFAULT 0, goalsscoredhome_b FLOAT NOT NULL DEFAULT 0, goalslosthome_b FLOAT NOT NULL DEFAULT 0, goalsscoredaway_b FLOAT NOT NULL DEFAULT 0, goalslostaway_b FLOAT NOT NULL DEFAULT 0, mowins FLOAT DEFAULT 0.0, moloses FLOAT DEFAULT 0.0, mowinsHome FLOAT DEFAULT 0.0, molosesHome FLOAT DEFAULT 0.0, mowinsAway FLOAT DEFAULT 0.0, molosesAway FLOAT DEFAULT 0.0, f1 FLOAT DEFAULT 000.0, f2 FLOAT DEFAULT 000.0, f3 FLOAT DEFAULT 000.0, f4 FLOAT DEFAULT 000.0, f1Home FLOAT DEFAULT 000.0, f2Home FLOAT DEFAULT 000.0, f1Away FLOAT DEFAULT 000.0, f2Away FLOAT DEFAULT 000.0, f1BB FLOAT DEFAULT 000.0, f2BB FLOAT DEFAULT 000.0, f3BB FLOAT DEFAULT 000.0, f4BB FLOAT DEFAULT 000.0, f1BBHome FLOAT DEFAULT 000.0, f2BBHome FLOAT DEFAULT 000.0, f1BBAway FLOAT DEFAULT 000.0, f2BBAway FLOAT DEFAULT 000.0, bts INTEGER DEFAULT 000.0, btsHome INTEGER DEFAULT 000.0, btsAway INTEGER DEFAULT 000.0, over25 INTEGER DEFAULT 000.0, over25Home INTEGER DEFAULT 000.0, over25Away INTEGER DEFAULT 000.0, under25 INTEGER DEFAULT 000.0, under25Home INTEGER DEFAULT 000.0, under25Away INTEGER DEFAULT 000.0, series_wins FLOAT DEFAULT 000.0, series_draws FLOAT DEFAULT 000.0, series_loses FLOAT DEFAULT 000.0, series_winshome FLOAT DEFAULT 000.0, series_drawshome FLOAT DEFAULT 000.0, series_loseshome FLOAT DEFAULT 000.0, series_winsaway FLOAT DEFAULT 000.0, series_drawsaway FLOAT DEFAULT 000.0, series_losesaway FLOAT DEFAULT 000.0, series_noloses FLOAT DEFAULT 000.0, series_noloseshome FLOAT DEFAULT 000.0, series_nolosesaway FLOAT DEFAULT 000.0, series_nowins FLOAT DEFAULT 000.0, series_nowinshome FLOAT DEFAULT 000.0, series_nowinsaway FLOAT DEFAULT 000.0, series_nodraws FLOAT DEFAULT 000.0, series_nodrawshome FLOAT DEFAULT 000.0, series_nodrawsaway FLOAT DEFAULT 000.0, series_bts INTEGER DEFAULT 000.0, series_btsHome INTEGER DEFAULT 000.0, series_btsAway INTEGER DEFAULT 000.0, series_over25 INTEGER DEFAULT 000.0, series_over25Home INTEGER DEFAULT 000.0, series_over25Away INTEGER DEFAULT 000.0, series_under25 INTEGER DEFAULT 000.0, series_under25Home INTEGER DEFAULT 000.0, series_under25Away INTEGER DEFAULT 000.0, fake TEXT NOT NULL DEFAULT "-")''') self.relations_base.execute('''CREATE TABLE odds (id INTEGER PRIMARY KEY, name TETX, odd_home FLOAT DEFAULT 000.0, odd_draw FLOAT DEFAULT 000.0, odd_away FLOAT DEFAULT 000.0)''')
Creates all nessesary tables in sql
https://github.com/jacekm-git/betboy/blob/d7a83d34743fbdde40b50c9c3772510a272abbff/tools/odds_net/bb_engine.py#L41-L285
import sqlite3 from csv import reader import os import locale import codecs import unicodedata locale.setlocale(locale.LC_ALL, "C") import platform system = platform.system() if system == 'Windows': from pyfann_win import libfann elif system == 'Linux': from pyfann import libfann else: from pyfann import libfann from bb_shared import Shared from PySide import QtGui class Database(Shared):
Apache License 2.0
ngalongc/openapi_security_scanner
schemathesis/specs/openapi/schemas.py
in_scopes
python
def in_scopes(resolver: jsonschema.RefResolver, scopes: List[str]) -> Generator[None, None, None]: with ExitStack() as stack: for scope in scopes: stack.enter_context(resolver.in_scope(scope)) yield
Push all available scopes into the resolver. There could be an additional scope change during a schema resolving in `get_response_schema`, so in total there could be a stack of two scopes maximum. This context manager handles both cases (1 or 2 scope changes) in the same way.
https://github.com/ngalongc/openapi_security_scanner/blob/9ba2244bf0e52db6f149243de403c8c7c157216f/schemathesis/specs/openapi/schemas.py#L370-L380
import itertools from collections import defaultdict from contextlib import ExitStack, contextmanager from copy import deepcopy from difflib import get_close_matches from json import JSONDecodeError from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Sequence, Tuple, Type, Union from urllib.parse import urlsplit import jsonschema import requests from hypothesis.strategies import SearchStrategy from ...constants import DataGenerationMethod from ...exceptions import ( InvalidSchema, get_missing_content_type_error, get_response_parsing_error, get_schema_validation_error, ) from ...hooks import HookContext, HookDispatcher from ...models import Case, Endpoint, EndpointDefinition from ...schemas import BaseSchema from ...stateful import APIStateMachine, Feedback, Stateful, StatefulTest from ...types import FormData from ...utils import GenericResponse, get_response_payload, is_json_media_type from . import links, serialization from ._hypothesis import get_case_strategy from .converter import to_json_schema_recursive from .examples import get_strategies_from_examples from .filters import ( should_skip_by_operation_id, should_skip_by_tag, should_skip_deprecated, should_skip_endpoint, should_skip_method, ) from .parameters import ( OpenAPI20Body, OpenAPI20CompositeBody, OpenAPI20Parameter, OpenAPI30Body, OpenAPI30Parameter, OpenAPIParameter, ) from .references import ConvertingResolver from .security import BaseSecurityProcessor, OpenAPISecurityProcessor, SwaggerSecurityProcessor from .stateful import create_state_machine class BaseOpenAPISchema(BaseSchema): nullable_name: str links_field: str operations: Tuple[str, ...] security: BaseSecurityProcessor parameter_cls: Type[OpenAPIParameter] _endpoints_by_operation_id: Dict[str, Endpoint] @property def spec_version(self) -> str: raise NotImplementedError def get_stateful_tests( self, response: GenericResponse, endpoint: Endpoint, stateful: Optional[Stateful] ) -> Sequence[StatefulTest]: if stateful == Stateful.links: return links.get_links(response, endpoint, field=self.links_field) return [] def __repr__(self) -> str: info = self.raw_schema["info"] return f"{self.__class__.__name__} for {info['title']} ({info['version']})" def get_all_endpoints(self) -> Generator[Endpoint, None, None]: try: paths = self.raw_schema["paths"] context = HookContext() for path, methods in paths.items(): full_path = self.get_full_path(path) if should_skip_endpoint(full_path, self.endpoint): continue self.dispatch_hook("before_process_path", context, path, methods) scope, raw_methods = self._resolve_methods(methods) methods = self.resolver.resolve_all(methods) common_parameters = get_common_parameters(methods) for method, resolved_definition in methods.items(): if ( method not in self.operations or should_skip_method(method, self.method) or should_skip_deprecated( resolved_definition.get("deprecated", False), self.skip_deprecated_endpoints ) or should_skip_by_tag(resolved_definition.get("tags"), self.tag) or should_skip_by_operation_id(resolved_definition.get("operationId"), self.operation_id) ): continue parameters = self.collect_parameters( itertools.chain(resolved_definition.get("parameters", ()), common_parameters), resolved_definition, ) raw_definition = EndpointDefinition(raw_methods[method], resolved_definition, scope, parameters) yield self.make_endpoint(path, method, parameters, raw_definition) except (KeyError, AttributeError, jsonschema.exceptions.RefResolutionError) as exc: raise InvalidSchema("Schema parsing failed. Please check your schema.") from exc def collect_parameters( self, parameters: Iterable[Dict[str, Any]], endpoint_definition: Dict[str, Any] ) -> List[OpenAPIParameter]: raise NotImplementedError def _resolve_methods(self, methods: Dict[str, Any]) -> Tuple[str, Dict[str, Any]]: if "$ref" in methods: return deepcopy(self.resolver.resolve(methods["$ref"])) return self.resolver.resolution_scope, deepcopy(methods) def make_endpoint( self, path: str, method: str, parameters: List[OpenAPIParameter], raw_definition: EndpointDefinition, ) -> Endpoint: base_url = self.get_base_url() endpoint: Endpoint[OpenAPIParameter] = Endpoint( path=path, method=method, definition=raw_definition, base_url=base_url, app=self.app, schema=self, ) for parameter in parameters: endpoint.add_parameter(parameter) self.security.process_definitions(self.raw_schema, endpoint, self.resolver) return endpoint @property def resolver(self) -> ConvertingResolver: if not hasattr(self, "_resolver"): self._resolver = ConvertingResolver(self.location or "", self.raw_schema, nullable_name=self.nullable_name) return self._resolver def get_content_types(self, endpoint: Endpoint, response: GenericResponse) -> List[str]: raise NotImplementedError def get_strategies_from_examples(self, endpoint: Endpoint) -> List[SearchStrategy[Case]]: raise NotImplementedError def get_response_schema(self, definition: Dict[str, Any], scope: str) -> Tuple[List[str], Optional[Dict[str, Any]]]: raise NotImplementedError def get_endpoint_by_operation_id(self, operation_id: str) -> Endpoint: if not hasattr(self, "_endpoints_by_operation_id"): self._endpoints_by_operation_id = dict(self._group_endpoints_by_operation_id()) return self._endpoints_by_operation_id[operation_id] def _group_endpoints_by_operation_id(self) -> Generator[Tuple[str, Endpoint], None, None]: for path, methods in self.raw_schema["paths"].items(): scope, raw_methods = self._resolve_methods(methods) methods = self.resolver.resolve_all(methods) common_parameters = get_common_parameters(methods) for method, resolved_definition in methods.items(): if method not in self.operations or "operationId" not in resolved_definition: continue parameters = self.collect_parameters( itertools.chain(resolved_definition.get("parameters", ()), common_parameters), resolved_definition ) raw_definition = EndpointDefinition(raw_methods[method], resolved_definition, scope, parameters) yield resolved_definition["operationId"], self.make_endpoint(path, method, parameters, raw_definition) def get_endpoint_by_reference(self, reference: str) -> Endpoint: scope, data = self.resolver.resolve(reference) path, method = scope.rsplit("/", maxsplit=2)[-2:] path = path.replace("~1", "/").replace("~0", "~") resolved_definition = self.resolver.resolve_all(data) parent_ref, _ = reference.rsplit("/", maxsplit=1) _, methods = self.resolver.resolve(parent_ref) common_parameters = get_common_parameters(methods) parameters = self.collect_parameters( itertools.chain(resolved_definition.get("parameters", ()), common_parameters), resolved_definition ) raw_definition = EndpointDefinition(data, resolved_definition, scope, parameters) return self.make_endpoint(path, method, parameters, raw_definition) def get_case_strategy( self, endpoint: Endpoint, hooks: Optional[HookDispatcher] = None, feedback: Optional[Feedback] = None, data_generation_method: DataGenerationMethod = DataGenerationMethod.default(), ) -> SearchStrategy: return get_case_strategy( endpoint=endpoint, hooks=hooks, feedback=feedback, data_generation_method=data_generation_method ) def get_parameter_serializer(self, endpoint: Endpoint, location: str) -> Optional[Callable]: definitions = [item for item in endpoint.definition.resolved.get("parameters", []) if item["in"] == location] security_parameters = self.security.get_security_definitions_as_parameters( self.raw_schema, endpoint, self.resolver, location ) if security_parameters: definitions.extend(security_parameters) if definitions: return self._get_parameter_serializer(definitions) return None def _get_parameter_serializer(self, definitions: List[Dict[str, Any]]) -> Optional[Callable]: raise NotImplementedError def _get_response_definitions(self, endpoint: Endpoint, response: GenericResponse) -> Optional[Dict[str, Any]]: try: responses = endpoint.definition.resolved["responses"] except KeyError as exc: raise InvalidSchema("Schema parsing failed. Please check your schema.") from exc status_code = str(response.status_code) if status_code in responses: return responses[status_code] if "default" in responses: return responses["default"] return None def get_headers(self, endpoint: Endpoint, response: GenericResponse) -> Optional[Dict[str, Dict[str, Any]]]: definitions = self._get_response_definitions(endpoint, response) if not definitions: return None return definitions.get("headers") def as_state_machine(self) -> Type[APIStateMachine]: return create_state_machine(self) def add_link( self, source: Endpoint, target: Union[str, Endpoint], status_code: Union[str, int], parameters: Optional[Dict[str, str]] = None, request_body: Any = None, ) -> None: if parameters is None and request_body is None: raise ValueError("You need to provide `parameters` or `request_body`.") if hasattr(self, "_endpoints"): delattr(self, "_endpoints") for endpoint, methods in self.raw_schema["paths"].items(): if endpoint == source.path: methods = self.resolver.resolve_all(methods) found = False for method, definition in methods.items(): if method.upper() == source.method.upper(): found = True links.add_link( definition["responses"], self.links_field, parameters, request_body, status_code, target ) self.raw_schema["paths"][endpoint][method] = definition self.raw_schema["paths"][endpoint].pop("$ref", None) if found: return message = f"No such endpoint: `{source.verbose_name}`." possibilities = [e.verbose_name for e in self.get_all_endpoints()] matches = get_close_matches(source.verbose_name, possibilities) if matches: message += f" Did you mean `{matches[0]}`?" message += " Check if the requested endpoint passes the filters in the schema." raise ValueError(message) def get_links(self, endpoint: Endpoint) -> Dict[str, Dict[str, Any]]: result: Dict[str, Dict[str, Any]] = defaultdict(dict) for status_code, link in links.get_all_links(endpoint): result[status_code][link.name] = link return result def validate_response(self, endpoint: Endpoint, response: GenericResponse) -> None: responses = {str(key): value for key, value in endpoint.definition.raw.get("responses", {}).items()} status_code = str(response.status_code) if status_code in responses: definition = responses[status_code] elif "default" in responses: definition = responses["default"] else: return scopes, schema = self.get_response_schema(definition, endpoint.definition.scope) if not schema: return content_type = response.headers.get("Content-Type") if content_type is None: media_types = "\n ".join(self.get_content_types(endpoint, response)) raise get_missing_content_type_error()( "The response is missing the `Content-Type` header. The schema defines the following media types:\n\n" f" {media_types}" ) if not is_json_media_type(content_type): return try: if isinstance(response, requests.Response): data = response.json() else: data = response.json except JSONDecodeError as exc: exc_class = get_response_parsing_error(exc) payload = get_response_payload(response) raise exc_class( f"The received response is not valid JSON:\n\n {payload}\n\nException: \n\n {exc}" ) from exc with in_scopes(self.resolver, scopes): try: jsonschema.validate(data, schema, cls=jsonschema.Draft4Validator, resolver=self.resolver) except jsonschema.ValidationError as exc: exc_class = get_schema_validation_error(exc) raise exc_class( f"The received response does not conform to the defined schema!\n\nDetails: \n\n{exc}" ) from exc return None @contextmanager
Apache License 2.0
quansight-labs/udiff
src/udiff/_uarray_plug.py
DiffArrayBackend.self_implementations
python
def self_implementations(self): return {unumpy.ClassOverrideMeta.overridden_class.fget: self.overridden_class}
Specify the data type to be converted.
https://github.com/quansight-labs/udiff/blob/711d2d45ac901ca4e749b8321a08c293458e2603/src/udiff/_uarray_plug.py#L44-L48
import functools from uarray import wrap_single_convertor_instance from unumpy import ufunc, ndarray, numpy_backend import unumpy import unumpy as np import uarray as ua from ._diff_array import DiffArray, VJPDiffArray, JVPDiffArray from ._vjp_diffs import nograd_functions, raw_functions from typing import Dict _ufunc_mapping: Dict[ufunc, np.ufunc] = {} class DiffArrayBackend: __ua_domain__ = "numpy" _implementations: Dict = { unumpy.asarray: DiffArray, } @property @functools.lru_cache(None)
BSD 3-Clause New or Revised License
funnyzhou/fpn-pytorch
lib/datasets/roidb.py
_compute_targets
python
def _compute_targets(entry): rois = entry['boxes'] overlaps = entry['max_overlaps'] labels = entry['max_classes'] gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) if len(gt_inds) == 0: return targets ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] ex_gt_overlaps = box_utils.bbox_overlaps( rois[ex_inds, :].astype(dtype=np.float32, copy=False), rois[gt_inds, :].astype(dtype=np.float32, copy=False)) gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets[ex_inds, 0] = ( 1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds]) targets[ex_inds, 1:] = box_utils.bbox_transform_inv( ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS) return targets
Compute bounding-box regression targets for an image.
https://github.com/funnyzhou/fpn-pytorch/blob/423a4499c4e826d17367762e821b51b9b1b0f2f3/lib/datasets/roidb.py#L195-L226
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import six import logging import numpy as np import utils.boxes as box_utils import utils.keypoints as keypoint_utils import utils.segms as segm_utils import utils.blob as blob_utils from core.config import cfg from .json_dataset import JsonDataset logger = logging.getLogger(__name__) def combined_roidb_for_training(dataset_names, proposal_files): def get_roidb(dataset_name, proposal_file): ds = JsonDataset(dataset_name) roidb = ds.get_roidb( gt=True, proposal_file=proposal_file, crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH ) if cfg.TRAIN.USE_FLIPPED: logger.info('Appending horizontally-flipped training examples...') extend_with_flipped_entries(roidb, ds) logger.info('Loaded dataset: {:s}'.format(ds.name)) return roidb if isinstance(dataset_names, six.string_types): dataset_names = (dataset_names, ) if isinstance(proposal_files, six.string_types): proposal_files = (proposal_files, ) if len(proposal_files) == 0: proposal_files = (None, ) * len(dataset_names) assert len(dataset_names) == len(proposal_files) roidbs = [get_roidb(*args) for args in zip(dataset_names, proposal_files)] roidb = roidbs[0] for r in roidbs[1:]: roidb.extend(r) roidb = filter_for_training(roidb) if cfg.TRAIN.ASPECT_GROUPING or cfg.TRAIN.ASPECT_CROPPING: logger.info('Computing image aspect ratios and ordering the ratios...') ratio_list, ratio_index = rank_for_training(roidb) logger.info('done') else: ratio_list, ratio_index = None, None logger.info('Computing bounding-box regression targets...') add_bbox_regression_targets(roidb) logger.info('done') _compute_and_log_stats(roidb) return roidb, ratio_list, ratio_index def extend_with_flipped_entries(roidb, dataset): flipped_roidb = [] for entry in roidb: width = entry['width'] boxes = entry['boxes'].copy() oldx1 = boxes[:, 0].copy() oldx2 = boxes[:, 2].copy() boxes[:, 0] = width - oldx2 - 1 boxes[:, 2] = width - oldx1 - 1 assert (boxes[:, 2] >= boxes[:, 0]).all() flipped_entry = {} dont_copy = ('boxes', 'segms', 'gt_keypoints', 'flipped') for k, v in entry.items(): if k not in dont_copy: flipped_entry[k] = v flipped_entry['boxes'] = boxes flipped_entry['segms'] = segm_utils.flip_segms( entry['segms'], entry['height'], entry['width'] ) if dataset.keypoints is not None: flipped_entry['gt_keypoints'] = keypoint_utils.flip_keypoints( dataset.keypoints, dataset.keypoint_flip_map, entry['gt_keypoints'], entry['width'] ) flipped_entry['flipped'] = True flipped_roidb.append(flipped_entry) roidb.extend(flipped_roidb) def filter_for_training(roidb): def is_valid(entry): overlaps = entry['max_overlaps'] fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0] bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0] valid = len(fg_inds) > 0 or len(bg_inds) > 0 if cfg.MODEL.KEYPOINTS_ON: valid = valid and entry['has_visible_keypoints'] return valid num = len(roidb) filtered_roidb = [entry for entry in roidb if is_valid(entry)] num_after = len(filtered_roidb) logger.info('Filtered {} roidb entries: {} -> {}'. format(num - num_after, num, num_after)) return filtered_roidb def rank_for_training(roidb): RATIO_HI = cfg.TRAIN.ASPECT_HI RATIO_LO = cfg.TRAIN.ASPECT_LO need_crop_cnt = 0 ratio_list = [] for entry in roidb: width = entry['width'] height = entry['height'] ratio = width / float(height) if cfg.TRAIN.ASPECT_CROPPING: if ratio > RATIO_HI: entry['need_crop'] = True ratio = RATIO_HI need_crop_cnt += 1 elif ratio < RATIO_LO: entry['need_crop'] = True ratio = RATIO_LO need_crop_cnt += 1 else: entry['need_crop'] = False else: entry['need_crop'] = False ratio_list.append(ratio) if cfg.TRAIN.ASPECT_CROPPING: logging.info('Number of entries that need to be cropped: %d. Ratio bound: [%.2f, %.2f]', need_crop_cnt, RATIO_LO, RATIO_HI) ratio_list = np.array(ratio_list) ratio_index = np.argsort(ratio_list) return ratio_list[ratio_index], ratio_index def add_bbox_regression_targets(roidb): for entry in roidb: entry['bbox_targets'] = _compute_targets(entry)
MIT License
jdkandersson/openalchemy
open_alchemy/models_file/artifacts/type_.py
typed_dict
python
def typed_dict(*, artifacts: schemas_artifacts.types.TAnyPropertyArtifacts) -> str: model_type: str if artifacts.type != types.PropertyType.BACKREF: model_type = model(artifacts=artifacts) else: inner_type = "typing.Dict[str, typing.Union[int, float, str, bool]]" if artifacts.sub_type == schemas_artifacts.types.BackrefSubType.OBJECT: model_type = f"typing.Optional[{inner_type}]" else: model_type = f"typing.Sequence[{inner_type}]" if artifacts.type == types.PropertyType.JSON: return model_type if artifacts.type == types.PropertyType.RELATIONSHIP: model_type = model_type.replace( f"T{artifacts.parent}", f"{artifacts.parent}Dict" ) if artifacts.type == types.PropertyType.SIMPLE: if artifacts.open_api.format == "binary": model_type = model_type.replace("bytes", "str") if artifacts.open_api.format == "date": model_type = model_type.replace("datetime.date", "str") if artifacts.open_api.format == "date-time": model_type = model_type.replace("datetime.datetime", "str") return model_type
Calculate the Python type of a column for a TypedDict. Args: artifacts: The artifacts from the schema of the column. Returns: The equivalent Python type for the TypedDict.
https://github.com/jdkandersson/openalchemy/blob/40f52d003e40ad79e67dcb305aef3dd4debefcc9/open_alchemy/models_file/artifacts/type_.py#L104-L145
from open_alchemy import types from open_alchemy.helpers import calculate_nullable from open_alchemy.helpers import type_ as type_helper from open_alchemy.schemas import artifacts as schemas_artifacts _SIMPLE_TYPE_STRING_FORMAT_MAPPING = { "binary": "bytes", "date": "datetime.date", "date-time": "datetime.datetime", } _SIMPLE_TYPE_MAPPING = { "string": lambda format_: _SIMPLE_TYPE_STRING_FORMAT_MAPPING[format_] if format_ in _SIMPLE_TYPE_STRING_FORMAT_MAPPING else "str", "integer": lambda _: "int", "number": lambda _: "float", "boolean": lambda _: "bool", } def _model_simple_property( *, artifacts: schemas_artifacts.types.SimplePropertyArtifacts ) -> str: assert artifacts.open_api.type in type_helper.SIMPLE_TYPES type_ = _SIMPLE_TYPE_MAPPING[artifacts.open_api.type](artifacts.open_api.format) optional = calculate_nullable.calculate_nullable( nullable=artifacts.open_api.nullable, generated=artifacts.extension.autoincrement is True, required=artifacts.required, defaulted=artifacts.open_api.default is not None or artifacts.extension.server_default is not None, ) if optional: return f"typing.Optional[{type_}]" return type_ _RELATIONSHIP_TYPE_MAPPING = { types.RelationshipType.MANY_TO_ONE: lambda parent: f'"T{parent}"', types.RelationshipType.ONE_TO_ONE: lambda parent: f'"T{parent}"', types.RelationshipType.ONE_TO_MANY: ( lambda parent: f'typing.Sequence["T{parent}"]' ), types.RelationshipType.MANY_TO_MANY: ( lambda parent: f'typing.Sequence["T{parent}"]' ), } def _model_relationship_property( *, artifacts: schemas_artifacts.types.TAnyRelationshipPropertyArtifacts ) -> str: type_ = _RELATIONSHIP_TYPE_MAPPING[artifacts.sub_type](artifacts.parent) if ( artifacts.sub_type == types.RelationshipType.ONE_TO_MANY or artifacts.sub_type == types.RelationshipType.MANY_TO_MANY ): return type_ optional = calculate_nullable.calculate_nullable( nullable=artifacts.nullable, generated=False, required=artifacts.required, defaulted=False, ) if optional: return f"typing.Optional[{type_}]" return type_ def model(*, artifacts: schemas_artifacts.types.TAnyPropertyArtifacts) -> str: assert artifacts.type != types.PropertyType.BACKREF if artifacts.type == types.PropertyType.SIMPLE: return _model_simple_property(artifacts=artifacts) if artifacts.type == types.PropertyType.RELATIONSHIP: return _model_relationship_property(artifacts=artifacts) return "typing.Any"
Apache License 2.0
trustyjaid/trusty-cogs-archive
halo/halo.py
Halo._halowars
python
async def _halowars(self, ctx): if ctx.invoked_subcommand is None: await self.bot.send_cmd_help(ctx)
Get information from Halo Wars 2
https://github.com/trustyjaid/trusty-cogs-archive/blob/aa1d267b9315a7a58f22c10bc7e1a68a1e6d384b/halo/halo.py#L37-L40
import discord import aiohttp from discord.ext import commands from .utils.chat_formatting import pagify from .utils.dataIO import dataIO from .utils import checks import os from random import choice as randchoice numbs = { "next": "➡", "back": "⬅", "exit": "❌" } class Halo(): def __init__(self, bot): self.bot = bot self.session = aiohttp.ClientSession(loop=self.bot.loop) self.settings = dataIO.load_json("data/halo/settings.json") self.api_token = self.settings["api_token"] async def request_url(self, url, params=None): async with self.session.get(url, params=params, headers=self.api_token) as resp: return await resp.json() @commands.group(pass_context=True, name='halo5') @checks.admin_or_permissions(manage_server=True) async def _halo5(self, ctx): if ctx.invoked_subcommand is None: await self.bot.send_cmd_help(ctx) @commands.group(pass_context=True, name='halowars') @checks.admin_or_permissions(manage_server=True)
MIT License
wjohnson/pyapacheatlas
pyapacheatlas/core/entity.py
AtlasProcess.inputs
python
def inputs(self, value): if value is not None: self.attributes["inputs"] = self._parse_atlas_entity(value) else: self.attributes["inputs"] = None
Set the inputs attribute for the process. If you pass in a dict list, it should be have keys: guid, typeName, qualifiedName. Passing in a list of AtlasEntity, it will automatically convert the entities to dicts. If you set it to None, this will result in no change to the Process inputs you are targeting after upload. If you set it to an empty list `[]` you will erase all the inputs. :param value: List of dicts or atlas entities to set as the inputs. :type value: list(Union(dict, :class:`~pyapacheatlas.core.entity.AtlasEntity`))
https://github.com/wjohnson/pyapacheatlas/blob/31925f305ee10ebc39ca41bbfa54a0d17bd3a0ec/pyapacheatlas/core/entity.py#L393-L409
import warnings from .util import AtlasUnInit class AtlasEntity(): def __init__(self, name, typeName, qualified_name, guid=None, **kwargs): super().__init__() self.attributes = kwargs.get("attributes", {}) self.attributes.update({"name": None, "qualifiedName": None}) self.businessAttributes = kwargs.get( "businessAttributes", AtlasUnInit()) self.classifications = kwargs.get("classifications", AtlasUnInit()) self.contacts = kwargs.get("contacts", AtlasUnInit()) self.createTime = kwargs.get("createTime", AtlasUnInit()) self.createdBy = kwargs.get("createdBy", AtlasUnInit()) self.customAttributes = kwargs.get("customAttributes", AtlasUnInit()) self.guid = guid self.homeId = kwargs.get("homeId", AtlasUnInit()) self.isIncomplete = kwargs.get("isIncomplete", AtlasUnInit()) self.labels = kwargs.get("labels", AtlasUnInit()) self.lastModifiedTS = kwargs.get("lastModifiedTS", AtlasUnInit()) self.provenanceType = kwargs.get("provenanceType", AtlasUnInit()) self.proxy = kwargs.get("proxy", AtlasUnInit()) self.relationshipAttributes = kwargs.get( "relationshipAttributes", AtlasUnInit()) self.source = kwargs.get("source", AtlasUnInit()) self.sourceDetails = kwargs.get("sourceDetails", AtlasUnInit()) self.status = kwargs.get("status", AtlasUnInit()) self.typeName = typeName self.updateTime = kwargs.get("updateTime", AtlasUnInit()) self.updatedBy = kwargs.get("updatedBy", AtlasUnInit()) self.version = kwargs.get("version", AtlasUnInit()) self.name = name self.qualifiedName = qualified_name if "description" in kwargs: self.attributes.update({"description": kwargs["description"]}) def __eq__(self, other): return self.qualifiedName == other def __hash__(self): return hash(self.qualifiedName) def __ne__(self, other): return self.qualifiedName != other def __repr__(self): return "AtlasEntity({type_name},{qual_name})".format( type_name=self.typeName, qual_name=self.qualifiedName ) def __str__(self): return "AtlasEntity({type_name},{qual_name})".format( type_name=self.typeName, qual_name=self.qualifiedName ) @property def name(self): return self.attributes["name"] @name.setter def name(self, value): self.attributes["name"] = value @property def qualifiedName(self): return self.attributes["qualifiedName"] @qualifiedName.setter def qualifiedName(self, value): self.attributes["qualifiedName"] = value def addBusinessAttribute(self, **kwargs): businessAttributes_was_uninitialized = isinstance( self.businessAttributes, AtlasUnInit) if businessAttributes_was_uninitialized: self.businessAttributes = {} try: self.businessAttributes.update(kwargs) except Exception as e: if businessAttributes_was_uninitialized: self.businessAttributes = AtlasUnInit() raise e def addClassification(self, *args): classification_was_uninitialized = isinstance( self.classifications, AtlasUnInit) classifications_to_add = [] if classification_was_uninitialized: self.classifications = [] try: for arg in args: if isinstance(arg, dict): classifications_to_add.append(arg) elif isinstance(arg, str): classifications_to_add.append( AtlasClassification(arg).to_json()) elif isinstance(arg, AtlasClassification): classifications_to_add.append(arg.to_json()) else: raise TypeError( f"The type {type(arg)} for value {arg} can't be converted to a classification dict." ) self.classifications.extend(classifications_to_add) except Exception as e: if classification_was_uninitialized: self.classifications = AtlasUnInit() raise e def addCustomAttribute(self, **kwargs): customAttributes_was_uninitialized = isinstance( self.customAttributes, AtlasUnInit) if customAttributes_was_uninitialized: self.customAttributes = {} try: self.customAttributes.update(kwargs) except Exception as e: if customAttributes_was_uninitialized: self.customAttributes = AtlasUnInit() raise e def addRelationship(self, **kwargs): relationshipAttributes_was_uninitialized = isinstance( self.customAttributes, AtlasUnInit) relationships_to_add = {} if relationshipAttributes_was_uninitialized: self.relationshipAttributes = {} try: for k, v in kwargs.items(): val = v.to_json(minimum=True) if isinstance( v, AtlasEntity) else v relationships_to_add[k] = val self.relationshipAttributes.update(relationships_to_add) except Exception as e: if relationshipAttributes_was_uninitialized: self.relationshipAttributes = AtlasUnInit() def to_json(self, minimum=False): if minimum and self.guid is not None: output = { "typeName": self.typeName, "guid": self.guid, "qualifiedName": self.attributes["qualifiedName"] } elif minimum and self.guid is None: output = { "typeName": self.typeName, "uniqueAttributes": { "qualifiedName": self.qualifiedName } } else: output = { "typeName": self.typeName, "guid": self.guid, "attributes": self.attributes } for k, v in vars(self).items(): is_uninitialized = isinstance(v, AtlasUnInit) is_asset_attribute = k in ["name", "qualifiedName"] if is_uninitialized or is_asset_attribute: continue output[k] = v return output @classmethod def from_json(cls, entity_json): local_entity = entity_json.copy() guid = local_entity.pop("guid") typeName = local_entity.pop("typeName") name = local_entity["attributes"]["name"] qualified_name = local_entity["attributes"]["qualifiedName"] ae = cls( name=name, typeName=typeName, qualified_name=qualified_name, guid=guid, inputs=local_entity["attributes"].get("inputs"), outputs=local_entity["attributes"].get("outputs"), **local_entity ) return ae def merge(self, other): if self.qualifiedName != other.qualifiedName: raise TypeError("Type:{} cannot be merged with {}".format( type(other), type(self))) self.guid = other.guid _other_attr_keys = set(other.attributes.keys()) _self_attr_keys = set(self.attributes.keys()) _new_keys_in_other = _other_attr_keys.difference(_self_attr_keys) self.attributes.update( {k: v for k, v in other.attributes.items() if k in _new_keys_in_other}) if other.classifications: self.classifications = (self.classifications or []).extend( self.classifications) class AtlasProcess(AtlasEntity): def __init__(self, name, typeName, qualified_name, inputs, outputs, guid=None, **kwargs): super().__init__(name, typeName, qualified_name, guid=guid, **kwargs) self.attributes.update({"inputs": None, "outputs": None}) self.inputs = inputs self.outputs = outputs def _parse_atlas_entity(self, iterable): return [ e.to_json(minimum=True) if isinstance(e, AtlasEntity) else e for e in iterable ] @property def inputs(self): return self.attributes.get("inputs") @inputs.setter
MIT License
melloddy/sparsechem
sparsechem/utils.py
train_binary
python
def train_binary(net, optimizer, loader, loss, dev, task_weights, normalize_loss=None, num_int_batches=1, progress=True): net.train() logloss_sum = 0.0 logloss_count = 0 int_count = 0 for b in tqdm(loader, leave=False, disable=(progress == False)): if int_count == 0: optimizer.zero_grad() X = torch.sparse_coo_tensor( b["x_ind"], b["x_data"], size = [b["batch_size"], loader.dataset.input_size]).to(dev) y_ind = b["y_ind"].to(dev) y_w = task_weights[y_ind[1]] y_data = b["y_data"].to(dev) yhat_all = net(X) yhat = yhat_all[y_ind[0], y_ind[1]] norm = normalize_loss if norm is None: norm = b["batch_size"] * num_int_batches output = (loss(yhat, y_data) * y_w).sum() output_n = output / norm output_n.backward() int_count += 1 if int_count == num_int_batches: optimizer.step() int_count = 0 logloss_sum += output.detach() / y_data.shape[0] logloss_count += 1 if int_count > 0: optimizer.step() return logloss_sum / logloss_count
Args: net pytorch network optimizer optimizer to use loader data loader with training data dev device task_weights weights of the tasks normalize_loss normalization value, if None then use batch size num_int_batches number of internal batches to use progress whether to show a progress bar
https://github.com/melloddy/sparsechem/blob/2569d1bbf769d4bf8b590d4d7bca1804bbf54642/sparsechem/utils.py#L292-L343
import sklearn.metrics from tqdm import tqdm import pandas as pd import numpy as np import torch import scipy.sparse import scipy.io import scipy.special import types import json import warnings import torch.nn.functional as F from sparsechem import censored_mse_loss_numpy from collections import namedtuple from scipy.sparse import csr_matrix class Nothing(object): def __getattr__(self, name): return Nothing() def __call__(self, *args, **kwargs): return Nothing() def __repr__(self): return "Nothing" class Nothing(object): def __getattr__(self, name): return Nothing() def __call__(self, *args, **kwargs): return Nothing() def __repr__(self): return "Nothing" def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def calc_acc_kappa(recall, fpr, num_pos, num_neg): num_all = num_neg + num_pos tp = np.round(recall * num_pos).astype(np.int) fn = num_pos - tp fp = np.round(fpr * num_neg).astype(np.int) tn = num_neg - fp acc = (tp + tn) / num_all pexp = num_pos / num_all * (tp + fp) / num_all + num_neg / num_all * (tn + fn) / num_all kappa = (acc - pexp) / (1 - pexp) return acc, kappa def all_metrics(y_true, y_score): if len(y_true) <= 1 or (y_true[0] == y_true).all(): df = pd.DataFrame({"roc_auc_score": [np.nan], "auc_pr": [np.nan], "avg_prec_score": [np.nan], "f1_max": [np.nan], "p_f1_max": [np.nan], "kappa": [np.nan], "kappa_max": [np.nan], "p_kappa_max": [np.nan], "bceloss": [np.nan]}) return df fpr, tpr, tpr_thresholds = sklearn.metrics.roc_curve(y_true=y_true, y_score=y_score) roc_auc_score = sklearn.metrics.auc(x=fpr, y=tpr) precision, recall, pr_thresholds = sklearn.metrics.precision_recall_curve(y_true = y_true, probas_pred = y_score) bceloss = F.binary_cross_entropy_with_logits( input = torch.FloatTensor(y_score), target = torch.FloatTensor(y_true), reduction="none").mean().item() F1_score = np.zeros(len(precision)) mask = precision > 0 F1_score[mask] = 2 * (precision[mask] * recall[mask]) / (precision[mask] + recall[mask]) f1_max_idx = F1_score.argmax() f1_max = F1_score[f1_max_idx] p_f1_max = scipy.special.expit(pr_thresholds[f1_max_idx]) auc_pr = sklearn.metrics.auc(x = recall, y = precision) avg_prec_score = sklearn.metrics.average_precision_score( y_true = y_true, y_score = y_score) y_classes = np.where(y_score >= 0.0, 1, 0) acc, kappas = calc_acc_kappa(recall=tpr, fpr=fpr, num_pos=(y_true==1).sum(), num_neg=(y_true==0).sum()) kappa_max_idx = kappas.argmax() kappa_max = kappas[kappa_max_idx] p_kappa_max = scipy.special.expit(tpr_thresholds[kappa_max_idx]) kappa = sklearn.metrics.cohen_kappa_score(y_true, y_classes) df = pd.DataFrame({"roc_auc_score": [roc_auc_score], "auc_pr": [auc_pr], "avg_prec_score": [avg_prec_score], "f1_max": [f1_max], "p_f1_max": [p_f1_max], "kappa": [kappa], "kappa_max": [kappa_max], "p_kappa_max": p_kappa_max, "bceloss": bceloss}) return df def compute_corr(x, y): if len(y) <= 1: return np.nan ystd = y.std() xstd = x.std() if ystd == 0 or xstd == 0: return np.nan return np.dot((x - x.mean()), (y - y.mean())) / len(y) / y.std() / x.std() def all_metrics_regr(y_true, y_score, y_censor=None): if len(y_true) <= 1: df = pd.DataFrame({"rmse": [np.nan], "rmse_uncen": [np.nan], "rsquared": [np.nan], "corrcoef": [np.nan]}) return df censor0 = y_censor == 0 if y_censor is not None else slice(None) mse_cen = censored_mse_loss_numpy(target=y_true, input=y_score, censor=y_censor).mean() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) mse = ((y_true[censor0] - y_score[censor0])**2).mean() yvar = y_true[censor0].var() if yvar == 0 or np.isnan(yvar): rsquared = np.nan corr = np.nan else: rsquared = 1 - mse / yvar corr = compute_corr(y_true[censor0], y_score[censor0]) df = pd.DataFrame({ "rmse": [np.sqrt(mse_cen)], "rmse_uncen": [np.sqrt(mse)], "rsquared": [rsquared], "corrcoef": [corr], }) return df def compute_metrics(cols, y_true, y_score, num_tasks): if len(cols) < 1: return pd.DataFrame({ "roc_auc_score": np.nan, "auc_pr": np.nan, "avg_prec_score": np.nan, "f1_max": np.nan, "p_f1_max": np.nan, "kappa": np.nan, "kappa_max": np.nan, "p_kappa_max": np.nan, "bceloss": np.nan}, index=np.arange(num_tasks)) df = pd.DataFrame({"task": cols, "y_true": y_true, "y_score": y_score}) metrics = df.groupby("task", sort=True).apply(lambda g: all_metrics( y_true = g.y_true.values, y_score = g.y_score.values)) metrics.reset_index(level=-1, drop=True, inplace=True) return metrics.reindex(np.arange(num_tasks)) def compute_metrics_regr(cols, y_true, y_score, num_tasks, y_censor=None): if len(cols) < 1: return pd.DataFrame({ "rmse": np.nan, "rmse_uncen": np.nan, "rsquared": np.nan, "corrcoef": np.nan, }, index=np.arange(num_tasks)) df = pd.DataFrame({ "task": cols, "y_true": y_true, "y_score": y_score, "y_censor": y_censor, }) metrics = df.groupby("task", sort=True).apply(lambda g: all_metrics_regr( y_true = g.y_true.values, y_score = g.y_score.values, y_censor = g.y_censor.values if y_censor is not None else None)) metrics.reset_index(level=-1, drop=True, inplace=True) return metrics.reindex(np.arange(num_tasks)) def class_fold_counts(y_class, folding): folds = np.unique(folding) num_pos = [] num_neg = [] for fold in folds: yf = y_class[folding == fold] num_pos.append( np.array((yf == +1).sum(0)).flatten() ) num_neg.append( np.array((yf == -1).sum(0)).flatten() ) return np.row_stack(num_pos), np.row_stack(num_neg) def print_metrics(epoch, train_time, metrics_tr, metrics_va, header): if metrics_tr is None: if header: print("Epoch\tlogl_va | auc_va | aucpr_va | maxf1_va | tr_time") output_fstr = ( f"{epoch}.\t{metrics_va['logloss']:.5f}" f" | {metrics_va['roc_auc_score']:.5f}" f" | {metrics_va['auc_pr']:.5f}" f" | {metrics_va['f1_max']:.5f}" f" | {train_time:6.1f}" ) print(output_fstr) return if header: print("Epoch\tlogl_tr logl_va | auc_tr auc_va | aucpr_tr aucpr_va | maxf1_tr maxf1_va | tr_time") output_fstr = ( f"{epoch}.\t{metrics_tr['logloss']:.5f} {metrics_va['logloss']:.5f}" f" | {metrics_tr['roc_auc_score']:.5f} {metrics_va['roc_auc_score']:.5f}" f" | {metrics_tr['auc_pr']:.5f} {metrics_va['auc_pr']:.5f}" f" | {metrics_tr['f1_max']:.5f} {metrics_va['f1_max']:.5f}" f" | {train_time:6.1f}" ) print(output_fstr) def print_table(formats, data): for key, fmt in formats.items(): print(fmt.format(data[key]), end="") Column = namedtuple("Column", "key size dec title") columns_cr = [ Column("epoch", size=6, dec= 0, title="Epoch"), Column(None, size=1, dec=-1, title="|"), Column("logloss", size=8, dec= 5, title="logl"), Column("bceloss", size=8, dec= 5, title="bceloss"), Column("roc_auc_score", size=8, dec= 5, title="aucroc"), Column("auc_pr", size=8, dec= 5, title="aucpr"), Column("f1_max", size=8, dec= 5, title="f1_max"), Column(None, size=1, dec=-1, title="|"), Column("rmse", size=9, dec= 5, title="rmse"), Column("rsquared", size=9, dec= 5, title="rsquared"), Column("corrcoef", size=9, dec= 5, title="corrcoef"), Column(None, size=1, dec=-1, title="|"), Column("train_time", size=6, dec= 1, title="tr_time"), ] def print_cell(value, size, dec, left, end=" "): align = "<" if left else ">" if type(value) == str: print(("{:" + align + str(size) + "}").format(value), end=end) else: print(("{:" + align + str(size) + "." + str(dec) + "f}").format(value), end=end) def print_metrics_cr(epoch, train_time, results_tr, results_va, header): data = pd.concat([results_va["classification_agg"], results_va["regression_agg"]]) data["train_time"] = train_time data["epoch"] = epoch if header: for i, col in enumerate(columns_cr): print_cell(col.title, col.size, dec=0, left=(i==0)) print() for i, col in enumerate(columns_cr): print_cell(data.get(col.key, col.title), col.size, dec=col.dec, left=(i==0)) print() def evaluate_binary(net, loader, loss, dev, progress=True): net.eval() logloss_sum = 0.0 logloss_count = 0 y_ind_list = [] y_true_list = [] y_hat_list = [] num_tasks = loader.dataset.y.shape[1] with torch.no_grad(): for b in tqdm(loader, leave=False, disable=(progress == False)): X = torch.sparse_coo_tensor( b["x_ind"], b["x_data"], size = [b["batch_size"], loader.dataset.input_size]).to(dev) y_ind = b["y_ind"].to(dev) y_data = b["y_data"].to(dev) y_hat_all = net(X) y_hat = y_hat_all[y_ind[0], y_ind[1]] output = loss(y_hat, y_data).sum() logloss_sum += output logloss_count += y_data.shape[0] y_ind_list.append(b["y_ind"]) y_true_list.append(b["y_data"]) y_hat_list.append(y_hat.cpu()) if len(y_ind_list) == 0: return { "metrics": compute_metrics([], y_true=[], y_score=[], num_tasks=num_tasks), "logloss": np.nan, } y_ind = torch.cat(y_ind_list, dim=1).numpy() y_true = torch.cat(y_true_list, dim=0).numpy() y_hat = torch.cat(y_hat_list, dim=0).numpy() metrics = compute_metrics(y_ind[1], y_true=y_true, y_score=y_hat, num_tasks=num_tasks) return { 'metrics': metrics, 'logloss': logloss_sum.cpu().numpy() / logloss_count }
MIT License
mila-iqia/babyai
babyai/levels/levelgen.py
RoomGridLevel.gen_mission
python
def gen_mission(self): raise NotImplementedError
Generate a mission (instructions and matching environment) Derived level classes should implement this method
https://github.com/mila-iqia/babyai/blob/863f3529371ba45ef0148a48b48f5ae6e61e06cc/babyai/levels/levelgen.py#L157-L162
import random from collections import OrderedDict from copy import deepcopy import gym from gym_minigrid.roomgrid import RoomGrid from .verifier import * class RejectSampling(Exception): pass class RoomGridLevel(RoomGrid): def __init__( self, room_size=8, **kwargs ): super().__init__( room_size=room_size, **kwargs ) def reset(self, **kwargs): obs = super().reset(**kwargs) self.instrs.reset_verifier(self) nav_time_room = self.room_size ** 2 nav_time_maze = nav_time_room * self.num_rows * self.num_cols num_navs = self.num_navs_needed(self.instrs) self.max_steps = num_navs * nav_time_maze return obs def step(self, action): obs, reward, done, info = super().step(action) if action == self.actions.drop: self.update_objs_poss() status = self.instrs.verify(action) if status == 'success': done = True reward = self._reward() elif status == 'failure': done = True reward = 0 return obs, reward, done, info def update_objs_poss(self, instr=None): if instr is None: instr = self.instrs if isinstance(instr, BeforeInstr) or isinstance(instr, AndInstr) or isinstance(instr, AfterInstr): self.update_objs_poss(instr.instr_a) self.update_objs_poss(instr.instr_b) else: instr.update_objs_poss() def _gen_grid(self, width, height): while True: try: super()._gen_grid(width, height) self.gen_mission() self.validate_instrs(self.instrs) except RecursionError as error: print('Timeout during mission generation:', error) continue except RejectSampling as error: continue break self.surface = self.instrs.surface(self) self.mission = self.surface def validate_instrs(self, instr): if hasattr(self, 'unblocking') and self.unblocking: colors_of_locked_doors = [] for i in range(self.num_cols): for j in range(self.num_rows): room = self.get_room(i, j) for door in room.doors: if door and door.is_locked: colors_of_locked_doors.append(door.color) if isinstance(instr, PutNextInstr): instr.reset_verifier(self) if set(instr.desc_move.obj_set).intersection( set(instr.desc_fixed.obj_set)): raise RejectSampling( "there are objects that match both lhs and rhs of PutNext") if instr.objs_next(): raise RejectSampling('objs already next to each other') move = instr.desc_move fixed = instr.desc_fixed if len(move.obj_set) == 1 and len(fixed.obj_set) == 1: if move.obj_set[0] is fixed.obj_set[0]: raise RejectSampling('cannot move an object next to itself') if isinstance(instr, ActionInstr): if not hasattr(self, 'unblocking') or not self.unblocking: return potential_objects = ('desc', 'desc_move', 'desc_fixed') for attr in potential_objects: if hasattr(instr, attr): obj = getattr(instr, attr) if obj.type == 'key' and obj.color in colors_of_locked_doors: raise RejectSampling('cannot do anything with/to a key that can be used to open a door') return if isinstance(instr, SeqInstr): self.validate_instrs(instr.instr_a) self.validate_instrs(instr.instr_b) return assert False, "unhandled instruction type"
BSD 3-Clause New or Revised License
lenddoefl/filters
filters/extensions.py
is_filter_type
python
def is_filter_type(target): if not is_class(target): return 'not a class' if not issubclass(target, BaseFilter): return 'does not extend BaseFilter' if is_abstract(target): return 'abstract class' return True
Returns whether the specified object can be registered as a filter. :return: Returns ``True`` if the object is a filter. Otherwise, returns a string indicating why it is not valid.
https://github.com/lenddoefl/filters/blob/36c2a2b1cffa3a37279053cf181709045fd6683a/filters/extensions.py#L121-L139
from __future__ import absolute_import, division, print_function, unicode_literals from inspect import getmembers as get_members, isabstract as is_abstract, isclass as is_class, ismodule as is_module from logging import getLogger from typing import Any, Dict, Generator, Text, Tuple, Type, Union from warnings import warn from class_registry import EntryPointClassRegistry from pkg_resources import EntryPoint, iter_entry_points from filters.base import BaseFilter __all__ = [ 'FilterExtensionRegistry', 'GROUP_NAME', ] GROUP_NAME = 'filters.extensions' logger = getLogger(__name__) legacy_warned = False class FilterExtensionRegistry(EntryPointClassRegistry): def __init__(self, group=GROUP_NAME): super(FilterExtensionRegistry, self).__init__(group) def __getattr__(self, item): return self[item] def __repr__(self): return repr(self._get_cache()) def _get_cache(self): if self._cache is None: self._cache = {} try: for target in iter_entry_points(self.group): filter_ = target.load() ift_result = is_filter_type(filter_) if ift_result is True: logger.debug( 'Registering extension filter ' '{cls.__module__}.{cls.__name__} as {name}.'.format( cls = filter_, name = target.name, ), ) self._cache[target.name] = filter_ else: logger.debug( 'Using legacy extension loader for ' '{target.name} ({reason}).'.format( reason = ift_result, target = target, ), ) self._cache.update(iter_filters_in(filter_)) except DeprecationWarning: self._cache = None raise return self._cache @staticmethod def create_instance(class_, *args, **kwargs): if args or kwargs: return class_(*args, **kwargs) return class_
MIT License
elsigh/browserscope
base/util.py
CategoryTestDriver
python
def CategoryTestDriver(request): category = request.GET.get('category') test_set = all_test_sets.GetTestSet(category) params = { 'category': test_set.category, 'category_name': test_set.category_name, 'page_title': '%s - Test Driver' % test_set.category_name, 'continue': request.GET.get('continue', ''), 'autorun': request.GET.get('autorun', ''), 'test_page': test_set.test_page, 'testurl': request.GET.get('testurl', ''), 'csrf_token': request.session.get('csrf_token'), 'hide_footer': True } return Render(request, TEST_DRIVER_TPL, params, category)
Loads the test driver for a category.
https://github.com/elsigh/browserscope/blob/f0b3670d4692742d5f2e6cf605bce9b1a4b8ca1b/base/util.py#L168-L183
__author__ = 'elsigh@google.com (Lindsey Simon)' import hashlib import logging import random import os import pickle import random import re import sys from threading import Timer import time import urllib import urlparse from google.appengine.api import memcache from google.appengine.api import users from google.appengine.api import urlfetch from google.appengine.api.labs import taskqueue from google.appengine.ext import deferred from google.appengine.ext import db from google.appengine.runtime import DeadlineExceededError import django from django import http from django import shortcuts from django.template import loader, Context from django.utils import simplejson import settings import models.user_test import models.result import models.user_agent from models import result_stats from models import user_agent_release_dates from categories import all_test_sets from categories import test_set_params from base import decorators from base import manage_dirty from base import custom_filters from third_party.gviz import gviz_api from third_party.gaefy.db import pager from django.template import add_to_builtins add_to_builtins('base.custom_filters') RESULTS_STRING_MEMCACHE_NS = 'results_str' MULTI_TEST_DRIVER_TEST_PAGE = '/multi_test_frameset' ABOUT_TPL = 'about.html' TEST_DRIVER_TPL = 'test_driver.html' MULTI_TEST_FRAMESET_TPL = 'multi_test_frameset.html' MULTI_TEST_DRIVER_TPL = 'multi_test_driver.html' VALID_STATS_OUTPUTS = ('html', 'pickle', 'xhr', 'csv', 'js', 'json', 'jsonp', 'gviz_table', 'gviz_table_data', 'gviz_timeline_data') def Render(request, template, params={}, category=None): params['app_title'] = settings.APP_TITLE params['version_id'] = os.environ['CURRENT_VERSION_ID'] params['build'] = settings.BUILD params['resource_version'] = custom_filters.get_resource_version() params['template'] = template.replace('.html', '').replace('_', '-') params['epoch'] = int(time.time()) params['request_path'] = request.get_full_path().replace('&o=xhr', '') params['request_path_lastbit'] = re.sub('^.+\/([^\/]+$)', '\\1', request.path) params['request_path_noparams'] = request.path params['server'] = GetServer(request) params['current_ua_string'] = request.META.get('HTTP_USER_AGENT') if params['current_ua_string']: js_user_agent_string = request.REQUEST.get('js_ua') js_user_agent_family = request.REQUEST.get('js_user_agent_family') js_user_agent_v1 = request.REQUEST.get('js_user_agent_v1') js_user_agent_v2 = request.REQUEST.get('js_user_agent_v2') js_user_agent_v3 = request.REQUEST.get('js_user_agent_v3') ua = models.user_agent.UserAgent.factory( params['current_ua_string'], js_user_agent_string=js_user_agent_string, js_user_agent_family=js_user_agent_family, js_user_agent_v1=js_user_agent_v1, js_user_agent_v2=js_user_agent_v2, js_user_agent_v3=js_user_agent_v3) params['current_ua'] = ua params['chromeframe_enabled'] = request.COOKIES.get( 'browserscope-chromeframe-enabled', '0') params['app_categories'] = [] params['is_admin'] = users.is_current_user_admin() current_user = users.get_current_user() if current_user: params['user_id'] = current_user.user_id() params['is_elsigh'] = current_user.nickname() == 'elsigh' else: params['user_id'] = None params['is_elsigh'] = False params['user'] = current_user params['sign_in'] = users.create_login_url(request.get_full_path()) params['sign_out'] = users.create_logout_url('/') forced_categories = [ c for c in (category, params.get('stats_table_category', None)) if c] for test_set in all_test_sets.GetVisibleTestSets(forced_categories): params['app_categories'].append((test_set.category, test_set.category_name)) if category == test_set.category: params['app_category'] = test_set.category params['app_category_name'] = test_set.category_name if (category and template not in (TEST_DRIVER_TPL, MULTI_TEST_DRIVER_TPL, MULTI_TEST_FRAMESET_TPL, ABOUT_TPL)): template = '%s/%s' % (category, template) mimetype = 'text/html' if 'mimetype' in params: mimetype = params['mimetype'] return shortcuts.render_to_response(template, params, mimetype=mimetype) def CategoryTest(request): category = re.sub('\/test.*', '', request.path)[1:] test_set = all_test_sets.GetTestSet(category) testurl = '' test_key = request.GET.get('test_key') if test_key: test = test_set.GetTest(test_key) testurl = test.url params = { 'category': test_set.category, 'page_title': '%s - Tests' % test_set.category_name, 'continue': request.GET.get('continue', ''), 'autorun': request.GET.get('autorun', ''), 'testurl': testurl, 'test_page': test_set.test_page } return Render(request, 'test_frameset.html', params) @decorators.provide_csrf
Apache License 2.0
pratulsrinivasan/lighthouse
data_loader.py
from_quaternion
python
def from_quaternion(quaternion): quaternion = tf.convert_to_tensor(value=quaternion) w, x, y, z = tf.unstack(quaternion, axis=-1) tx = 2.0 * x ty = 2.0 * y tz = 2.0 * z twx = tx * w twy = ty * w twz = tz * w txx = tx * x txy = ty * x txz = tz * x tyy = ty * y tyz = tz * y tzz = tz * z matrix = tf.stack((1.0 - (tyy + tzz), txy - twz, txz + twy, txy + twz, 1.0 - (txx + tzz), tyz - twx, txz - twy, tyz + twx, 1.0 - (txx + tyy)), axis=-1) output_shape = tf.concat((tf.shape(input=quaternion)[:-1], (3, 3)), axis=-1) return tf.reshape(matrix, shape=output_shape)
Convert from a quaternion.
https://github.com/pratulsrinivasan/lighthouse/blob/21e048a2913558c176a49d4e5479f8cf1affbbe2/data_loader.py#L794-L815
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import numpy as np import tensorflow as tf def dataset_to_tensors(dataset, capacity, map_fn=None, parallelism=None): with tf.name_scope(None, 'dataset_to_tensors', [dataset, capacity, map_fn, parallelism]): if map_fn is not None: dataset = dataset.map(map_fn, num_parallel_calls=parallelism) return tf.contrib.data.get_single_element(dataset.batch(capacity)) class ViewTrip( collections.namedtuple('ViewTrip', [ 'scene_id', 'sequence_id', 'timestamp', 'rgb', 'pano', 'depth', 'normal', 'mask', 'pose', 'intrinsics', 'resolution' ])): def overlap_mask(self): intrinsics = self.intrinsics * tf.constant([[1., 1., 1.], [1., -1., 1.], [1., 1., 1.]]) mask1_in_2, mask2_in_1 = image_overlap(self.depth[0], self.pose[0], self.depth[1], self.pose[1], intrinsics) masks = tf.stack([mask1_in_2, mask2_in_1], 0) return ViewTrip(self.scene_id, self.sequence_id, self.timestamp, self.rgb, self.pano, self.depth, self.normal, masks, self.pose, self.intrinsics, self.resolution) def reverse(self): return ViewTrip(self.scene_id, self.sequence_id, tf.reverse(self.timestamp, [0]), tf.reverse(self.rgb, [0]), tf.reverse(self.pano, [0]), tf.reverse(self.depth, [0]), tf.reverse(self.normal, [0]), tf.reverse(self.mask, [0]), tf.reverse(self.pose, [0]), self.intrinsics, self.resolution) def random_reverse(self): uniform_random = tf.random_uniform([], 0, 1.0) condition = tf.less(uniform_random, 0.5) return tf.cond(condition, lambda: self, lambda: self.reverse()) def deterministic_reverse(self): return tf.cond( self.hash_in_range(2, 0, 1), lambda: self, lambda: self.reverse()) def hash_in_range(self, buckets, base, limit): hash_bucket = tf.string_to_hash_bucket_fast(self.scene_id, buckets) return tf.logical_and( tf.greater_equal(hash_bucket, base), tf.less(hash_bucket, limit)) class ViewSequence( collections.namedtuple('ViewSequence', [ 'scene_id', 'sequence_id', 'timestamp', 'rgb', 'pano', 'depth', 'normal', 'pose', 'intrinsics', 'resolution' ])): def subsequence(self, stride): return ViewSequence( self.scene_id, self.sequence_id, tf.strided_slice( self.timestamp, [0], [self.length()], strides=[stride]), tf.strided_slice(self.rgb, [0], [self.length()], strides=[stride]), tf.strided_slice(self.pano, [0], [self.length()], strides=[stride]), tf.strided_slice(self.depth, [0], [self.length()], strides=[stride]), tf.strided_slice(self.normal, [0], [self.length()], strides=[stride]), tf.strided_slice(self.pose, [0], [self.length()], strides=[stride]), tf.strided_slice( self.intrinsics, [0], [self.length()], strides=[stride]), tf.strided_slice( self.resolution, [0], [self.length()], strides=[stride])) def random_subsequence(self, min_stride, max_stride): random_stride = tf.random_uniform([], minval=min_stride, maxval=max_stride, dtype=tf.int32) return self.subsequence(random_stride) def generate_trips(self, min_gap=1, max_gap=5): def mapper(timestamp_trips, rgb_trips, pano_trips, depth_trips, normal_trips, pose_trips): return ViewTrip(self.scene_id, self.sequence_id, timestamp_trips, rgb_trips, pano_trips, depth_trips, normal_trips, tf.zeros([1]), pose_trips, self.intrinsics[0], self.resolution[0]) with tf.control_dependencies( [tf.Assert(tf.less(max_gap, self.length()), [max_gap, self.length()])]): timestamp_trips = [] rgb_trips = [] pano_trips = [] depth_trips = [] normal_trips = [] pose_trips = [] for stride in range(min_gap, max_gap + 1): inds = tf.range(stride, self.length() - stride) inds_jitter = tf.random.uniform( minval=-40, maxval=40, shape=[self.length() - 2 * stride], dtype=tf.int32) rand_inds = tf.minimum( tf.maximum(inds + inds_jitter, 0), self.length() - 1) timestamp = tf.stack([ self.timestamp[:-2 * stride], self.timestamp[2 * stride:], self.timestamp[stride:-stride], tf.gather(self.timestamp, rand_inds) ], axis=1) rgb = tf.stack([ self.rgb[:-2 * stride], self.rgb[2 * stride:], self.rgb[stride:-stride], tf.gather(self.rgb, rand_inds) ], axis=1) pano = tf.stack([ self.pano[:-2 * stride], self.pano[2 * stride:], self.pano[stride:-stride], tf.gather(self.pano, rand_inds) ], axis=1) depth = tf.stack([ self.depth[:-2 * stride], self.depth[2 * stride:], self.depth[stride:-stride], tf.gather(self.depth, rand_inds) ], axis=1) normal = tf.stack([ self.normal[:-2 * stride], self.normal[2 * stride:], self.normal[stride:-stride], tf.gather(self.normal, rand_inds) ], axis=1) pose = tf.stack([ self.pose[:-2 * stride], self.pose[2 * stride:], self.pose[stride:-stride], tf.gather(self.pose, rand_inds) ], axis=1) timestamp_trips.append(timestamp) rgb_trips.append(rgb) pano_trips.append(pano) depth_trips.append(depth) normal_trips.append(normal) pose_trips.append(pose) timestamp_trips = tf.concat(timestamp_trips, 0) rgb_trips = tf.concat(rgb_trips, 0) pano_trips = tf.concat(pano_trips, 0) depth_trips = tf.concat(depth_trips, 0) normal_trips = tf.concat(normal_trips, 0) pose_trips = tf.concat(pose_trips, 0) dataset = tf.data.Dataset.from_tensor_slices( (timestamp_trips, rgb_trips, pano_trips, depth_trips, normal_trips, pose_trips)) return dataset.map(mapper) def length(self): return tf.shape(self.timestamp)[0] def reverse(self): return ViewSequence(self.scene_id, self.sequence_id, tf.reverse(self.timestamp, [0]), tf.reverse(self.rgb, [0]), tf.reverse(self.pano, [0]), tf.reverse(self.depth, [0]), tf.reverse(self.normal, [0]), tf.reverse(self.pose, [0]), tf.reverse(self.intrinsics, [0]), tf.reverse(self.resolution, [0])) def random_reverse(self): uniform_random = tf.random_uniform([], 0, 1.0) condition = tf.less(uniform_random, 0.5) return tf.cond(condition, lambda: self, lambda: self.reverse()) def deterministic_reverse(self): return tf.cond( self.hash_in_range(2, 0, 1), lambda: self, lambda: self.reverse()) def hash_in_range(self, buckets, base, limit): hash_bucket = tf.string_to_hash_bucket_fast(self.scene_id, buckets) return tf.logical_and( tf.greater_equal(hash_bucket, base), tf.less(hash_bucket, limit)) def check_cam_coherence(path): cam_gt = path + 'cam0_gt.visim' cam_render = path + 'cam0.render' lines = tf.string_split([tf.read_file(cam_render)], '\n').values lines = lines[3:] lines = tf.strided_slice(lines, [0], [lines.shape_as_list()[0]], [2]) fields = tf.reshape(tf.string_split(lines, ' ').values, [-1, 10]) timestamp_from_render, numbers = tf.split(fields, [1, 9], -1) numbers = tf.strings.to_number(numbers) eye, lookat, up = tf.split(numbers, [3, 3, 3], -1) up_vector = tf.nn.l2_normalize(up - eye) lookat_vector = tf.nn.l2_normalize(lookat - eye) rotation_from_lookat = lookat_matrix(up_vector, lookat_vector) lines = tf.string_split([tf.read_file(cam_gt)], '\n').values lines = lines[1:] fields = tf.reshape(tf.string_split(lines, ',').values, [-1, 8]) timestamp_from_gt, numbers = tf.split(fields, [1, 7], -1) numbers = tf.strings.to_number(numbers) position, quaternion = tf.split(numbers, [3, 4], -1) rotation_from_quaternion = from_quaternion(quaternion) assert tf.reduce_all(tf.equal(timestamp_from_render, timestamp_from_gt)) assert tf.reduce_all(tf.equal(eye, position)) so3_diff = (tf.trace( tf.matmul( rotation_from_lookat, rotation_from_quaternion, transpose_a=True)) - 1) / 2 tf.assert_near(so3_diff, tf.ones_like(so3_diff)) def lookat_matrix(up, lookat_direction): z = tf.linalg.l2_normalize(-lookat_direction, axis=-1) x = tf.linalg.l2_normalize(tf.cross(up, z), axis=-1) y = tf.cross(z, x) lookat = tf.stack([x, y, z], axis=-1) return lookat def load_sequence(sequence_dir, data_dir, parallelism=10): n_timestamp = 1000 v = tf.string_split([sequence_dir], '/').values scene_id, sequence_id = v[-2], v[-1] camera_dir = data_dir + 'GroundTruth_HD1-HD6/' + scene_id + '/' trajectory_name = 'velocity_angular' + tf.strings.substr(v[-1], -4, -4) + '/' camera_dir = camera_dir + trajectory_name camera_timestamp_path = camera_dir + 'cam0.timestamp' timestamp, img_name = read_timestamp(camera_timestamp_path) rgb_paths = sequence_dir + '/cam0/data/' + img_name pano_paths = sequence_dir + '/cam0_pano/data/' + img_name depth_paths = sequence_dir + '/depth0/data/' + img_name normal_paths = sequence_dir + '/normal0/data/' + img_name camera_parameters_path = camera_dir + 'cam0.ccam' pose_matrix, intrinsic_matrix, resolution = read_camera_parameters( camera_parameters_path, n_timestamp, parallel_camera_process=parallelism) return ViewSequence(scene_id, sequence_id, timestamp, rgb_paths, pano_paths, depth_paths, normal_paths, pose_matrix, intrinsic_matrix, resolution) def read_timestamp(path): lines = tf.string_split([tf.read_file(path)], '\n').values lines = lines[1:] fields = tf.reshape(tf.string_split(lines, ',').values, [-1, 2]) timestamp, img_name = tf.split(fields, [1, 1], -1) timestamp = tf.squeeze(timestamp, -1) img_name = tf.squeeze(img_name, -1) return timestamp, img_name def read_camera_parameters(path, n_timestamp, parallel_camera_process=10): lines = tf.string_split([tf.read_file(path)], '\n').values lines = lines[6:] fields = tf.reshape(tf.string_split(lines, ' ').values, [-1, 15]) fields = tf.strings.to_number(fields) camera_info, orientation, position, resolution = tf.split( fields, [6, 4, 3, 2], -1) camera_ds = tf.data.Dataset.from_tensor_slices( (camera_info, orientation, position, resolution)) def process_camera_parameters(camera_info, orientation, position, resolution): rotation_matrix = from_quaternion(orientation) pose_matrix = tf.concat([rotation_matrix, tf.expand_dims(position, -1)], -1) intrinsic_matrix = build_intrinsic_matrix(camera_info[0], camera_info[1], camera_info[2]) return (pose_matrix, intrinsic_matrix, resolution) return dataset_to_tensors( camera_ds, capacity=n_timestamp, map_fn=process_camera_parameters, parallelism=parallel_camera_process) def build_intrinsic_matrix(f, cx, cy): return tf.stack( [tf.stack([f, 0., cx]), tf.stack([0., f, cy]), tf.constant([0., 0., 1.])]) def load_image_data(trip): def load_single_image(filename, shape): image = tf.image.decode_png(tf.read_file(filename), 3) image = tf.image.convert_image_dtype(image, tf.float32) image.set_shape(shape) return image def load_depth(filename, shape): depth = tf.image.decode_png(tf.read_file(filename), 3, tf.dtypes.uint16) depth = tf.cast(depth, tf.float32) / 1000 depth.set_shape(shape) return depth def load_surface_normal(filename, shape): normal = tf.image.decode_png(tf.read_file(filename), 3, tf.dtypes.uint16) normal = 2 * tf.cast(normal, tf.float32) / (2**16 - 1) - 1 normal.set_shape(shape) return normal trip_length = 4 rgb = dataset_to_tensors( tf.data.Dataset.from_tensor_slices(trip.rgb), trip_length, lambda filename: load_single_image(filename, [480, 640, 3]), parallelism=4) pano = dataset_to_tensors( tf.data.Dataset.from_tensor_slices(trip.pano), trip_length, lambda filename: load_single_image(filename, [1500, 3000, 3]), parallelism=4) depth = dataset_to_tensors( tf.data.Dataset.from_tensor_slices(trip.depth), trip_length, lambda filename: load_depth(filename, [480, 640, 3]), parallelism=4) depth = depth[:, :, :, :1] normal = dataset_to_tensors( tf.data.Dataset.from_tensor_slices(trip.normal), trip_length, lambda filename: load_surface_normal(filename, [480, 640, 3]), parallelism=4) return ViewTrip(trip.scene_id, trip.sequence_id, trip.timestamp, rgb, pano, depth, normal, trip.mask, trip.pose, trip.intrinsics, trip.resolution) def small_translation_condition(trip, translation_threshold): positions = trip.pose[:, :, -1] t_norm = tf.norm(positions[0] - positions[1], axis=-1) return tf.greater(t_norm, translation_threshold) def too_close_condition(trip, depth_threshold=0.1): depths = trip.depth[:3, :, :, 0] depthmax = tf.reduce_max(depths) depths = tf.where( tf.equal(depths, 0.0), depthmax * tf.ones_like(depths), depths) return tf.greater(tf.reduce_min(depths), depth_threshold) def pano_forwards_condition(trip): ref_pose = trip.pose[1, :, :] pano_pose = trip.pose[3, :, :] ref_twds = -1.0 * ref_pose[:, 2] t_vec = pano_pose[:, 3] - ref_pose[:, 3] ref_depth = trip.depth[1, :, :, 0] ref_depth = tf.where( tf.equal(ref_depth, 0.0), tf.reduce_max(ref_depth) * tf.ones_like(ref_depth), ref_depth) max_depth = tf.reduce_max(ref_depth) median_depth = tf.contrib.distributions.percentile(ref_depth, 0.5) min_depth_cond = tf.greater(tf.reduce_sum(ref_twds * t_vec), median_depth) max_depth_cond = tf.less(tf.reduce_sum(ref_twds * t_vec), max_depth) return tf.logical_and(min_depth_cond, max_depth_cond) def dark_trip_condition(trip, threshold=0.1): cond = tf.math.greater(image_brightness(trip.rgb), threshold) return tf.math.reduce_all(cond) def image_brightness(image): r, g, b = tf.split(image, [1, 1, 1], -1) brightness = tf.sqrt(0.299 * (r**2) + 0.587 * (g**2) + 0.114 * (b**2)) avg_brightness = tf.reduce_mean(brightness, axis=[1, 2, 3]) return avg_brightness def filter_random_lighting(sequence_dir): sequence_name = tf.string_split([sequence_dir], '/').values[-1] lighting = tf.substr(sequence_name, 0, 6) return tf.not_equal(lighting, 'random') def filter_seq_length(sequence_dir): img_files = tf.data.Dataset.list_files(sequence_dir + '/cam0/data/*.png') pano_files = tf.data.Dataset.list_files(sequence_dir + '/cam0_pano/data/*.png') num_imgs = tf.data.experimental.cardinality(img_files) num_panos = tf.data.experimental.cardinality(pano_files) return tf.logical_and(tf.equal(num_imgs, 1000), tf.equal(num_panos, 1000)) def prepare_training_set( dataset, min_gap, max_gap, min_stride, max_stride, batch_size, epochs, min_overlap, max_overlap, translation_threshold, luminence_threshold, depth_threshold, parallel_image_reads, prefetch_buffer, filter_envmap=True): dataset = dataset.map( lambda sequence: sequence.random_subsequence(min_stride, max_stride)) dataset = dataset.flat_map( lambda sequence: sequence.generate_trips(min_gap, max_gap)) dataset = dataset.shuffle(1000000).repeat(epochs) dataset = dataset.filter( lambda trip: small_translation_condition(trip, translation_threshold)) dataset = dataset.map(load_image_data, parallel_image_reads).apply( tf.data.experimental.ignore_errors()) dataset = dataset.filter( lambda trip: dark_trip_condition(trip, luminence_threshold)) if filter_envmap: dataset = dataset.filter(pano_forwards_condition) dataset = dataset.filter( lambda trip: too_close_condition(trip, depth_threshold)) dataset = dataset.batch( batch_size, drop_remainder=True).prefetch(prefetch_buffer) return dataset def prepare_eval_set( dataset, min_gap, max_gap, min_stride, max_stride, batch_size, min_overlap, max_overlap, translation_threshold, luminence_threshold, depth_threshold, parallel_image_reads, prefetch_buffer): stride = (min_stride + max_stride) // 2 dataset = dataset.map(lambda sequence: sequence.subsequence(stride)) dataset = dataset.flat_map( lambda sequence: sequence.generate_trips(min_gap, max_gap)) dataset = dataset.filter( lambda trip: small_translation_condition(trip, translation_threshold)) dataset = dataset.map(load_image_data, parallel_image_reads).apply( tf.data.experimental.ignore_errors()) dataset = dataset.filter( lambda trip: dark_trip_condition(trip, luminence_threshold)) dataset = dataset.filter(pano_forwards_condition) dataset = dataset.filter( lambda trip: too_close_condition(trip, depth_threshold)) dataset = dataset.batch( batch_size, drop_remainder=True).prefetch(prefetch_buffer) return dataset def world_to_camera_projection(p_world, intrinsics, world_to_camera): shape = p_world.shape.as_list() height, width = shape[0], shape[1] p_world_homogeneous = tf.concat([p_world, tf.ones([height, width, 1])], -1) intrinsics = tf.tile(intrinsics[tf.newaxis, tf.newaxis, :], [height, width, 1, 1]) world_to_camera = tf.tile(world_to_camera[tf.newaxis, tf.newaxis, :], [height, width, 1, 1]) p_camera = tf.squeeze( tf.matmul(world_to_camera, tf.expand_dims(p_world_homogeneous, -1)), -1) p_camera_z = p_camera * tf.constant([1., 1., -1.], shape=[1, 1, 3]) p_image = tf.squeeze( tf.matmul(intrinsics, tf.expand_dims(p_camera_z, -1)), -1) return p_image[:, :, :2] / (p_image[:, :, -1:] + 1e-8), p_image[:, :, -1] def camera_to_world_projection(depth, intrinsics, camera_to_world): shape = depth.shape.as_list() height, width = shape[0], shape[1] xx, yy = tf.meshgrid( tf.lin_space(0., width - 1., width), tf.lin_space(0., height - 1., height)) p_pixel = tf.stack([xx, yy], axis=-1) p_pixel_homogeneous = tf.concat([p_pixel, tf.ones([height, width, 1])], -1) camera_to_world = tf.tile(camera_to_world[tf.newaxis, tf.newaxis, :], [height, width, 1, 1]) intrinsics = tf.tile(intrinsics[tf.newaxis, tf.newaxis, :], [height, width, 1, 1]) p_image = tf.squeeze( tf.matmul( tf.matrix_inverse(intrinsics), tf.expand_dims(p_pixel_homogeneous, -1)), -1) lookat_axis = tf.tile( tf.constant([0., 0., 1.], shape=[1, 1, 3]), [height, width, 1]) z = depth * tf.reduce_sum( tf.math.l2_normalize(p_image, axis=-1) * lookat_axis, axis=-1, keepdims=True) p_camera = z * p_image p_camera = p_camera * tf.constant([1., 1., -1.], shape=[1, 1, 3]) p_camera_homogeneous = tf.concat( [p_camera, tf.ones(shape=[height, width, 1])], -1) p_world = tf.squeeze( tf.matmul(camera_to_world, tf.expand_dims(p_camera_homogeneous, -1)), -1) return p_world def image_overlap(depth1, pose1_c2w, depth2, pose2_c2w, intrinsics): pose1_w2c = tf.matrix_inverse( tf.concat([pose1_c2w, tf.constant([[0., 0., 0., 1.]])], 0))[:3] pose2_w2c = tf.matrix_inverse( tf.concat([pose2_c2w, tf.constant([[0., 0., 0., 1.]])], 0))[:3] p_world1 = camera_to_world_projection(depth1, intrinsics, pose1_c2w) p_image1_in_2, z1_c2 = world_to_camera_projection(p_world1, intrinsics, pose2_w2c) p_world2 = camera_to_world_projection(depth2, intrinsics, pose2_c2w) p_image2_in_1, z2_c1 = world_to_camera_projection(p_world2, intrinsics, pose1_w2c) shape = depth1.shape.as_list() height, width = shape[0], shape[1] height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) mask_h2_in_1 = tf.logical_and( tf.less_equal(p_image2_in_1[:, :, 1], height), tf.greater_equal(p_image2_in_1[:, :, 1], 0.)) mask_w2_in_1 = tf.logical_and( tf.less_equal(p_image2_in_1[:, :, 0], width), tf.greater_equal(p_image2_in_1[:, :, 0], 0.)) mask2_in_1 = tf.logical_and( tf.logical_and(mask_h2_in_1, mask_w2_in_1), z2_c1 > 0) mask_h1_in_2 = tf.logical_and( tf.less_equal(p_image1_in_2[:, :, 1], height), tf.greater_equal(p_image1_in_2[:, :, 1], 0.)) mask_w1_in_2 = tf.logical_and( tf.less_equal(p_image1_in_2[:, :, 0], width), tf.greater_equal(p_image1_in_2[:, :, 0], 0.)) mask1_in_2 = tf.logical_and( tf.logical_and(mask_h1_in_2, mask_w1_in_2), z1_c2 > 0) return mask1_in_2, mask2_in_1 def images_have_overlap(trip, min_ratio, max_ratio): mask1_in_2, mask2_in_1 = trip.mask[0], trip.mask[1] shape = mask1_in_2.shape.as_list() height, width = shape[0], shape[1] ratio1 = tf.reduce_sum(tf.cast(mask1_in_2, tf.float32)) / (height * width) ratio2 = tf.reduce_sum(tf.cast(mask2_in_1, tf.float32)) / (height * width) cond1 = tf.logical_and( tf.less_equal(ratio1, max_ratio), tf.less_equal(ratio2, max_ratio)) cond2 = tf.logical_and( tf.greater_equal(ratio1, min_ratio), tf.greater_equal(ratio2, min_ratio)) return tf.logical_and(cond1, cond2) def data_loader(parent_dir='', dataset_list=('HD1', 'HD2', 'HD3', 'HD4', 'HD5', 'HD6'), min_gap=1, max_gap=4, min_stride=1, max_stride=2, epochs=-1, batch_size=1, random_lighting=False, luminence_threshold=0.1, depth_threshold=0.1, min_overlap=0.3, max_overlap=1.0, min_translation=0.05, validation_percentage=0, test_percentage=10, parallelism=20, parallel_image_reads=100, prefetch_buffer=20, filter_envmap=True): datasets = collections.namedtuple('datasets', ['training', 'validation', 'test']) test_start = 100 - test_percentage val_start = test_start - validation_percentage data_dir = os.path.join(parent_dir, dataset_list[0]) scenes = tf.data.Dataset.list_files(os.path.join(data_dir, '*')) for dataset in dataset_list[1:]: data_dir = os.path.join(parent_dir, dataset) scenes = scenes.concatenate( tf.data.Dataset.list_files(os.path.join(data_dir, '*'))) sequences = scenes.flat_map( lambda scene_dir: tf.data.Dataset.list_files(scene_dir + '/*')).apply( tf.data.experimental.ignore_errors()) if not random_lighting: sequences = sequences.filter(filter_random_lighting) sequences = sequences.filter(filter_seq_length).apply( tf.data.experimental.ignore_errors()) sequences = sequences.map( lambda sequence_dir: load_sequence(sequence_dir, parent_dir, parallelism), num_parallel_calls=parallelism) training = sequences.filter( lambda sequence: sequence.hash_in_range(100, 0, val_start)) validation = sequences.filter( lambda sequence: sequence.hash_in_range(100, val_start, test_start)) test = sequences.filter( lambda sequence: sequence.hash_in_range(100, test_start, 100)) training = prepare_training_set(training, min_gap, max_gap, min_stride, max_stride, batch_size, epochs, min_overlap, max_overlap, min_translation, luminence_threshold, depth_threshold, parallel_image_reads, prefetch_buffer, filter_envmap) validation = prepare_eval_set(validation, min_gap, max_gap, min_stride, max_stride, batch_size, min_overlap, max_overlap, min_translation, luminence_threshold, depth_threshold, parallel_image_reads, prefetch_buffer) test = prepare_eval_set(test, min_gap, max_gap, min_stride, max_stride, batch_size, min_overlap, max_overlap, min_translation, luminence_threshold, depth_threshold, parallel_image_reads, prefetch_buffer) return datasets(training, validation, test) def relative_pose(element): r1_c2w, t1_world = tf.split(element.pose[:, 0], [3, 1], -1) r2_c2w, t2_world = tf.split(element.pose[:, 1], [3, 1], -1) relative_rotation_c2toc1 = tf.matmul(r1_c2w, r2_c2w, transpose_a=True) translation_c1 = tf.matmul(r1_c2w, t2_world - t1_world, transpose_a=True) translation_c1 = tf.math.l2_normalize(tf.squeeze(translation_c1, -1), axis=-1) return relative_rotation_c2toc1, translation_c1 def quaternion_to_matrix(quaternion): quaternion = tf.nn.l2_normalize(quaternion, axis=-1) w, x, y, z = tf.unstack(quaternion, axis=-1) return tf.stack([ tf.stack([ 1 - 2 * y**2 - 2 * z**2, 2 * x * y - 2 * z * w, 2 * x * z + 2 * y * w ], -1), tf.stack([ 2 * x * y + 2 * z * w, 1 - 2 * x**2 - 2 * z**2, 2 * y * z - 2 * x * w ], -1), tf.stack([ 2 * x * z - 2 * y * w, 2 * y * z + 2 * x * w, 1 - 2 * x**2 - 2 * y**2 ], -1) ], 1)
Apache License 2.0
dbt-labs/dbt
core/dbt/context/base.py
BaseContext.debug
python
def debug(): import sys import ipdb frame = sys._getframe(3) ipdb.set_trace(frame) return ''
Enter a debugger at this line in the compiled jinja code.
https://github.com/dbt-labs/dbt/blob/ea07729bbf0a3481e220615644b7955659001529/core/dbt/context/base.py#L292-L298
import json import os from typing import ( Any, Dict, NoReturn, Optional, Mapping ) from dbt import flags from dbt import tracking from dbt.clients.jinja import undefined_error, get_rendered from dbt.clients.yaml_helper import ( yaml, safe_load, SafeLoader, Loader, Dumper ) from dbt.contracts.graph.compiled import CompiledResource from dbt.exceptions import raise_compiler_error, MacroReturn from dbt.logger import GLOBAL_LOGGER as logger from dbt.version import __version__ as dbt_version import pytz import datetime import re def get_pytz_module_context() -> Dict[str, Any]: context_exports = pytz.__all__ return { name: getattr(pytz, name) for name in context_exports } def get_datetime_module_context() -> Dict[str, Any]: context_exports = [ 'date', 'datetime', 'time', 'timedelta', 'tzinfo' ] return { name: getattr(datetime, name) for name in context_exports } def get_re_module_context() -> Dict[str, Any]: context_exports = re.__all__ return { name: getattr(re, name) for name in context_exports } def get_context_modules() -> Dict[str, Dict[str, Any]]: return { 'pytz': get_pytz_module_context(), 'datetime': get_datetime_module_context(), 're': get_re_module_context(), } class ContextMember: def __init__(self, value, name=None): self.name = name self.inner = value def key(self, default): if self.name is None: return default return self.name def contextmember(value): if isinstance(value, str): return lambda v: ContextMember(v, name=value) return ContextMember(value) def contextproperty(value): if isinstance(value, str): return lambda v: ContextMember(property(v), name=value) return ContextMember(property(value)) class ContextMeta(type): def __new__(mcls, name, bases, dct): context_members = {} context_attrs = {} new_dct = {} for base in bases: context_members.update(getattr(base, '_context_members_', {})) context_attrs.update(getattr(base, '_context_attrs_', {})) for key, value in dct.items(): if isinstance(value, ContextMember): context_key = value.key(key) context_members[context_key] = value.inner context_attrs[context_key] = key value = value.inner new_dct[key] = value new_dct['_context_members_'] = context_members new_dct['_context_attrs_'] = context_attrs return type.__new__(mcls, name, bases, new_dct) class Var: UndefinedVarError = "Required var '{}' not found in config:\nVars " "supplied to {} = {}" _VAR_NOTSET = object() def __init__( self, context: Mapping[str, Any], cli_vars: Mapping[str, Any], node: Optional[CompiledResource] = None ) -> None: self._context: Mapping[str, Any] = context self._cli_vars: Mapping[str, Any] = cli_vars self._node: Optional[CompiledResource] = node self._merged: Mapping[str, Any] = self._generate_merged() def _generate_merged(self) -> Mapping[str, Any]: return self._cli_vars @property def node_name(self): if self._node is not None: return self._node.name else: return '<Configuration>' def get_missing_var(self, var_name): dct = {k: self._merged[k] for k in self._merged} pretty_vars = json.dumps(dct, sort_keys=True, indent=4) msg = self.UndefinedVarError.format( var_name, self.node_name, pretty_vars ) raise_compiler_error(msg, self._node) def has_var(self, var_name: str): return var_name in self._merged def get_rendered_var(self, var_name): raw = self._merged[var_name] if not isinstance(raw, str): return raw return get_rendered(raw, self._context) def __call__(self, var_name, default=_VAR_NOTSET): if self.has_var(var_name): return self.get_rendered_var(var_name) elif default is not self._VAR_NOTSET: return default else: return self.get_missing_var(var_name) class BaseContext(metaclass=ContextMeta): def __init__(self, cli_vars): self._ctx = {} self.cli_vars = cli_vars def generate_builtins(self): builtins: Dict[str, Any] = {} for key, value in self._context_members_.items(): if hasattr(value, '__get__'): value = value.__get__(self) builtins[key] = value return builtins def to_dict(self): self._ctx['context'] = self._ctx builtins = self.generate_builtins() self._ctx['builtins'] = builtins self._ctx.update(builtins) return self._ctx @contextproperty def dbt_version(self) -> str: return dbt_version @contextproperty def var(self) -> Var: return Var(self._ctx, self.cli_vars) @contextmember @staticmethod def env_var(var: str, default: Optional[str] = None) -> str: if var in os.environ: return os.environ[var] elif default is not None: return default else: msg = f"Env var required but not provided: '{var}'" undefined_error(msg) if os.environ.get('DBT_MACRO_DEBUGGING'): @contextmember @staticmethod
Apache License 2.0
galarzaa90/tibia.py
tibiapy/tournament.py
Tournament._parse_rank_range
python
def _parse_rank_range(rank_text): m = RANGE_PATTERN.search(rank_text) first = int(m.group(1)) last = first if m.group(2): last = int(m.group(2)) return first, last
Parse the rank range text from the reward set table. Parameters ---------- rank_text: :class:`str` The string describing the ranks. Returns ------- :class:`tuple` of :class:`int` A tuple containing the highest and lower rank for this reward bracket. If the reward is for a single rank, both tuple elements will be the same.
https://github.com/galarzaa90/tibia.py/blob/babcb1648fb99bf5ac0fd0162b38244cbcd21b9d/tibiapy/tournament.py#L582-L602
import datetime import math import re from typing import List, TYPE_CHECKING from tibiapy import abc from tibiapy.enums import PvpType, TournamentPhase, Vocation from tibiapy.errors import InvalidContent from tibiapy.utils import get_tibia_url, parse_integer, parse_popup, parse_tibia_datetime, parse_tibia_full_date, parse_tibiacom_content, split_list, try_enum if TYPE_CHECKING: import bs4 __all__ = ( "TournamentLeaderboardEntry", "TournamentEntry", "RewardEntry", "RuleSet", "ScoreSet", "Tournament", "TournamentLeaderboard", ) RANGE_PATTERN = re.compile(r'(\d+)(?:-(\d+))?') CUP_PATTERN = re.compile(r'(\w+ cup)') DEED_PATTERN = re.compile(r'(\w+ deed)') ARCHIVE_LIST_PATTERN = re.compile(r'([\w\s]+)\s\(([^-]+)-\s([^)]+)\)') RANK_PATTERN = re.compile(r'(\d+)\.\s\(\+?(-?\d+)\)') RESULTS_PATTERN = re.compile(r'Results: (\d+)') CURRENT_TOURNAMENT_PATTERN = re.compile(r'(?:.*- (\w+))') TOURNAMENT_LEADERBOARDS_URL = "https://www.tibia.com/community/?subtopic=tournamentleaderboard" class TournamentLeaderboardEntry(abc.BaseCharacter, abc.Serializable): __slots__ = ( "name", "rank", "change", "vocation", "score", ) def __init__(self, **kwargs): self.name: str = kwargs.get("name") self.rank: int = kwargs.get("rank") self.change: int = kwargs.get("change") self.vocation: Vocation = kwargs.get("vocation") self.score: int = kwargs.get("score") def __repr__(self): return "<{0.__class__.__name__} rank={0.rank} name={0.name!r} vocation={0.vocation!r} " "points={0.score}>".format(self) class TournamentEntry(abc.BaseTournament, abc.Serializable): __slots__ = ( "title", "cycle", "start_date", "end_date", ) _serializable_properties = ("duration",) def __init__(self, title, start_date, end_date, **kwargs): self.title: str = title self.start_date: datetime.date = start_date self.end_date: datetime.date = end_date self.cycle: int = kwargs.get("cycle", 0) def __repr__(self): return "<{0.__class__.__name__} title={0.title!r} cycle={0.cycle} start_date={0.start_date!r} " "end_date={0.end_date!r}>".format(self) @property def duration(self): if self.start_date and self.end_date: return self.end_date - self.start_date return None class RewardEntry(abc.Serializable): __slots__ = ( "initial_rank", "last_rank", "tibia_coins", "tournament_coins", "tournament_ticker_voucher", "cup", "deed", "other_rewards", ) def __init__(self, **kwargs): self.initial_rank = kwargs.get("initial_rank", 0) self.last_rank = kwargs.get("last_rank", 0) self.tibia_coins = kwargs.get("tibia_coins", 0) self.tournament_coins = kwargs.get("tournament_coins", 0) self.tournament_ticker_voucher = kwargs.get("tournament_ticker_voucher", 0) self.cup = kwargs.get("cup") self.deed = kwargs.get("deed") self.other_rewards = kwargs.get("other_rewards") def __repr__(self): attributes = "" for attr in self.__slots__: v = getattr(self, attr) attributes += f" {attr}={v!r}" return f"<{self.__class__.__name__}{attributes}>" class RuleSet(abc.Serializable): __slots__ = ( "pvp_type", "daily_tournament_playtime", "total_tournament_playtime", "playtime_reduced_only_in_combat", "death_penalty_modifier", "xp_multiplier", "skill_multiplier", "spawn_rate_multiplier", "loot_probability", "rent_percentage", "house_auction_durations", "shared_xp_bonus", ) def __init__(self, **kwargs): self.pvp_type = try_enum(PvpType, kwargs.get("pvp_type")) self.daily_tournament_playtime = self._try_parse_interval(kwargs.get("daily_tournament_playtime")) self.total_tournament_playtime = self._try_parse_interval(kwargs.get("total_tournament_playtime")) self.playtime_reduced_only_in_combat = kwargs.get("playtime_reduced_only_in_combat") self.death_penalty_modifier = kwargs.get("death_penalty_modifier") self.xp_multiplier = kwargs.get("xp_multiplier") self.skill_multiplier = kwargs.get("skill_multiplier") self.spawn_rate_multiplier = kwargs.get("spawn_rate_multiplier") self.loot_probability = kwargs.get("loot_probability") self.rent_percentage = kwargs.get("rent_percentage") self.house_auction_durations = kwargs.get("house_auction_durations") self.shared_xp_bonus = kwargs.get("shared_xp_bonus") def __repr__(self): attributes = "" for attr in self.__slots__: v = getattr(self, attr) attributes += f" {attr}={v!r}" return f"<{self.__class__.__name__}{attributes}>" @staticmethod def _try_parse_interval(interval): if interval is None: return None if isinstance(interval, datetime.timedelta): return interval try: t = datetime.datetime.strptime(interval, "%H:%M:%S") return datetime.timedelta(hours=t.hour, minutes=t.minute, seconds=t.second) except ValueError: return None class ScoreSet(abc.Serializable): __slots__ = ( "creature_kills", "level_gain_loss", "skill_gain_loss", "charm_point_multiplier", "character_death", "area_discovery", ) def __init__(self, **kwargs): self.creature_kills = kwargs.get("creature_kills", {}) self.level_gain_loss = kwargs.get("level_gain_loss", 0) self.skill_gain_loss = kwargs.get("skill_gain_loss", 0) self.charm_point_multiplier = kwargs.get("charm_point_multiplier", 0) self.character_death = kwargs.get("character_death", 0) self.area_discovery = kwargs.get("area_discovery", 0) def __repr__(self): attributes = "" for attr in self.__slots__: v = getattr(self, attr) attributes += f" {attr}={v!r}" return f"<{self.__class__.__name__}{attributes}>" class Tournament(abc.BaseTournament, abc.Serializable): __slots__ = ( "phase", "start_date", "end_date", "worlds", "rule_set", "score_set", "reward_set", "archived_tournaments", ) _serializable_properties = ( "duration", "rewards_range", ) def __init__(self, **kwargs): self.title = kwargs.get("title") self.cycle = kwargs.get("cycle", 0) self.phase = try_enum(TournamentPhase, kwargs.get("phase")) self.start_date: datetime.datetime = kwargs.get("start_date") self.end_date: datetime.datetime = kwargs.get("end_date") self.worlds: List[str] = kwargs.get("worlds") self.rule_set: RuleSet = kwargs.get("rule_set") self.score_set: ScoreSet = kwargs.get("score_set") self.reward_set: List[RewardEntry] = kwargs.get("reward_set", []) self.archived_tournaments: List[TournamentEntry] = kwargs.get("archived_tournaments", []) def __repr__(self): return ("<{0.__class__.__name__} title={0.title!r} phase={0.phase!r} start_date={0.start_date!r} " "end_date={0.start_date!r}>").format(self) @property def rewards_range(self): return (self.reward_set[0].initial_rank, self.reward_set[-1].last_rank) if self.reward_set else (0, 0) @property def duration(self): return self.end_date - self.start_date def rewards_for_rank(self, rank): for rewards in self.reward_set: if rewards.initial_rank <= rank <= rewards.last_rank: return rewards return None @classmethod def from_content(cls, content): try: if "An internal error has occurred" in content: return None if "Currently there is no Tournament running." in content: return None parsed_content = parse_tibiacom_content(content, builder='html5lib') box_content = parsed_content.find("div", attrs={"class": "BoxContent"}) tables = box_content.find_all('table', attrs={"class": "Table5"}) archive_table = box_content.find('table', attrs={"class": "Table4"}) tournament_details_table = tables[-1] info_tables = tournament_details_table.find_all('table', attrs={'class': 'TableContent'}) main_info = info_tables[0] rule_set = info_tables[1] score_set = info_tables[2] reward_set = info_tables[3] tournament = cls() tournament._parse_tournament_info(main_info) tournament._parse_tournament_rules(rule_set) tournament._parse_tournament_scores(score_set) tournament._parse_tournament_rewards(reward_set) if archive_table: tournament._parse_archive_list(archive_table) return tournament except IndexError as e: raise InvalidContent("content does not belong to the Tibia.com's tournament section", e) def _parse_tournament_info(self, table): rows = table.find_all('tr') date_fields = ("start_date", "end_date") list_fields = ("worlds",) for row in rows: cols_raw = row.find_all('td') cols = [ele.text.strip() for ele in cols_raw] field, value = cols field = field.replace("\xa0", "_").replace(" ", "_").replace(":", "").lower() value = value.replace("\xa0", " ") if field in date_fields: value = parse_tibia_datetime(value) if field in list_fields: value = split_list(value, ",", ",") if field == "phase": value = try_enum(TournamentPhase, value) try: setattr(self, field, value) except AttributeError: pass def _parse_tournament_rules(self, table): rows = table.find_all('tr') bool_fields = ("playtime_reduced_only_in_combat", "shared_xp_bonus") float_fields = ( "death_penalty_modifier", "xp_multiplier", "skill_multiplier", "spawn_rate_multiplier", "loot_probability", ) int_fields = ("rent_percentage", "house_auction_durations") rules = {} for row in rows[1:]: cols_raw = row.find_all('td') cols = [ele.text.strip() for ele in cols_raw] field, value, *_ = cols field = field.replace("\xa0", "_").replace(" ", "_").replace(":", "").lower() value = value.replace("\xa0", " ") if field in bool_fields: value = value.lower() == "yes" if field in float_fields: value = float(value.replace("x", "")) if field in int_fields: value = int(value.replace("%", "")) rules[field] = value self.rule_set = RuleSet(**rules) def _parse_tournament_scores(self, table): creatures = {} rows = table.find_all('tr') rules = {} for row in rows[1:]: cols_raw = row.find_all('td') cols = [ele.text.strip() for ele in cols_raw] field, value, *_ = cols icon = cols_raw[2].find("span") field = field.replace("\xa0", "_").replace(" ", "_").replace(":", "").replace("/", "_").lower() value = re.sub(r'[^-0-9]', '', value.replace("+/-", "")) if not icon: creatures[field.replace("_", " ")] = int(value) else: rules[field] = parse_integer(value) if "creature_kills" in rules: rules["creature_kills"] = creatures self.score_set = ScoreSet(**rules) def _parse_tournament_rewards(self, table): rows = table.find_all('tr') rewards = [] for row in rows[1:]: cols_raw = row.find_all('td') rank_row, *rewards_cols = cols_raw rank_text = rank_row.text if not rank_text: break first, last = self._parse_rank_range(rank_text) entry = RewardEntry(initial_rank=first, last_rank=last) for col in rewards_cols: self._parse_rewards_column(col, entry) rewards.append(entry) self.reward_set = rewards @classmethod def _parse_rewards_column(cls, column, entry): col_str = str(column) img = column.find('img') if img and "tibiacoin" in img["src"]: entry.tibia_coins = parse_integer(column.text) if img and "tournamentcoin" in img["src"]: entry.tournament_coins = parse_integer(column.text) if img and "tournamentvoucher" in img["src"]: entry.tournament_ticker_voucher = parse_integer(column.text) if img and "trophy" in img["src"]: m = CUP_PATTERN.search(col_str) if m: entry.cup = m.group(1) m = DEED_PATTERN.search(col_str) if m: entry.deed = m.group(1) if img and "reward" in img["src"]: span = column.find('span', attrs={"class": "HelperDivIndicator"}) mouse_over = span["onmouseover"] title, popup = parse_popup(mouse_over) label = popup.find('div', attrs={'class': 'ItemOverLabel'}) entry.other_rewards = label.text.strip() @staticmethod
Apache License 2.0