repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
thefourgreaterrors/alpha-rptr
src/binance_futures_api.py
Client.futures_cancel_orders
python
def futures_cancel_orders(self, **params): return self._request_futures_api('delete', 'batchOrders', True, data=params)
Cancel multiple futures orders https://binance-docs.github.io/apidocs/futures/en/#cancel-multiple-orders-trade
https://github.com/thefourgreaterrors/alpha-rptr/blob/17fc54b1fb044978aab337a0a4814840e328b6f9/src/binance_futures_api.py#L425-L429
from urllib.parse import urlparse import time, hashlib, hmac import requests from operator import itemgetter from .exceptions import BinanceAPIException, BinanceRequestException, BinanceWithdrawException class Client(object): API_URL = 'https://api.binance.{}/api' WITHDRAW_API_URL = 'https://api.binance.{}/wapi' MARGIN_API_URL = 'https://api.binance.{}/sapi' WEBSITE_URL = 'https://www.binance.{}' FUTURES_URL = 'https://fapi.binance.{}/fapi' PUBLIC_API_VERSION = 'v1' PRIVATE_API_VERSION = 'v3' WITHDRAW_API_VERSION = 'v3' MARGIN_API_VERSION = 'v1' FUTURES_API_VERSION = 'v1' FUTURES_API_VERSION_V2 = 'v2' SYMBOL_TYPE_SPOT = 'SPOT' ORDER_STATUS_NEW = 'NEW' ORDER_STATUS_PARTIALLY_FILLED = 'PARTIALLY_FILLED' ORDER_STATUS_FILLED = 'FILLED' ORDER_STATUS_CANCELED = 'CANCELED' ORDER_STATUS_PENDING_CANCEL = 'PENDING_CANCEL' ORDER_STATUS_REJECTED = 'REJECTED' ORDER_STATUS_EXPIRED = 'EXPIRED' KLINE_INTERVAL_1MINUTE = '1m' KLINE_INTERVAL_3MINUTE = '3m' KLINE_INTERVAL_5MINUTE = '5m' KLINE_INTERVAL_15MINUTE = '15m' KLINE_INTERVAL_30MINUTE = '30m' KLINE_INTERVAL_1HOUR = '1h' KLINE_INTERVAL_2HOUR = '2h' KLINE_INTERVAL_4HOUR = '4h' KLINE_INTERVAL_6HOUR = '6h' KLINE_INTERVAL_8HOUR = '8h' KLINE_INTERVAL_12HOUR = '12h' KLINE_INTERVAL_1DAY = '1d' KLINE_INTERVAL_3DAY = '3d' KLINE_INTERVAL_1WEEK = '1w' KLINE_INTERVAL_1MONTH = '1M' SIDE_BUY = 'BUY' SIDE_SELL = 'SELL' ORDER_TYPE_LIMIT = 'LIMIT' ORDER_TYPE_MARKET = 'MARKET' ORDER_TYPE_STOP_LOSS = 'STOP_LOSS' ORDER_TYPE_STOP_LOSS_LIMIT = 'STOP_LOSS_LIMIT' ORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT' ORDER_TYPE_TAKE_PROFIT_LIMIT = 'TAKE_PROFIT_LIMIT' ORDER_TYPE_LIMIT_MAKER = 'LIMIT_MAKER' TIME_IN_FORCE_GTC = 'GTC' TIME_IN_FORCE_IOC = 'IOC' TIME_IN_FORCE_FOK = 'FOK' ORDER_RESP_TYPE_ACK = 'ACK' ORDER_RESP_TYPE_RESULT = 'RESULT' ORDER_RESP_TYPE_FULL = 'FULL' AGG_ID = 'a' AGG_PRICE = 'p' AGG_QUANTITY = 'q' AGG_FIRST_TRADE_ID = 'f' AGG_LAST_TRADE_ID = 'l' AGG_TIME = 'T' AGG_BUYER_MAKES = 'm' AGG_BEST_MATCH = 'M' def __init__(self, api_key=None, api_secret=None, requests_params=None, tld='com'): self.API_URL = self.API_URL.format(tld) self.WITHDRAW_API_URL = self.WITHDRAW_API_URL.format(tld) self.MARGIN_API_URL = self.MARGIN_API_URL.format(tld) self.WEBSITE_URL = self.WEBSITE_URL.format(tld) self.FUTURES_URL = self.FUTURES_URL.format(tld) self.API_KEY = api_key self.API_SECRET = api_secret self.session = self._init_session() self._requests_params = requests_params self.response = None def _init_session(self): session = requests.session() session.headers.update({'Accept': 'application/json', 'User-Agent': 'binance/python', 'X-MBX-APIKEY': self.API_KEY}) return session def _create_api_uri(self, path, signed=True, version=PUBLIC_API_VERSION): v = self.PRIVATE_API_VERSION if signed else version return self.API_URL + '/' + v + '/' + path def _create_withdraw_api_uri(self, path): return self.WITHDRAW_API_URL + '/' + self.WITHDRAW_API_VERSION + '/' + path def _create_margin_api_uri(self, path): return self.MARGIN_API_URL + '/' + self.MARGIN_API_VERSION + '/' + path def _create_website_uri(self, path): return self.WEBSITE_URL + '/' + path def _create_futures_api_uri(self, path, v2): if v2 == False: return self.FUTURES_URL + '/' + self.FUTURES_API_VERSION + '/' + path else: return self.FUTURES_URL + '/' + self.FUTURES_API_VERSION_V2 + '/' + path def _generate_signature(self, data): ordered_data = self._order_params(data) query_string = '&'.join(["{}={}".format(d[0], d[1]) for d in ordered_data]) m = hmac.new(self.API_SECRET.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256) return m.hexdigest() def _order_params(self, data): has_signature = False params = [] for key, value in data.items(): if key == 'signature': has_signature = True else: params.append((key, value)) params.sort(key=itemgetter(0)) if has_signature: params.append(('signature', data['signature'])) return params def _request(self, method, uri, signed, force_params=False, **kwargs): kwargs['timeout'] = 10 if self._requests_params: kwargs.update(self._requests_params) data = kwargs.get('data', None) if data and isinstance(data, dict): kwargs['data'] = data if 'requests_params' in kwargs['data']: kwargs.update(kwargs['data']['requests_params']) del(kwargs['data']['requests_params']) if signed: kwargs['data']['timestamp'] = int(time.time() * 1000) kwargs['data']['signature'] = self._generate_signature(kwargs['data']) if data: kwargs['data'] = self._order_params(kwargs['data']) null_args = [i for i, (key, value) in enumerate(kwargs['data']) if value is None] for i in reversed(null_args): del kwargs['data'][i] if data and (method == 'get' or force_params): kwargs['params'] = '&'.join('%s=%s' % (data[0], data[1]) for data in kwargs['data']) del(kwargs['data']) self.response = getattr(self.session, method)(uri, **kwargs) return self._handle_response() def _request_api(self, method, path, signed=False, version=PUBLIC_API_VERSION, **kwargs): uri = self._create_api_uri(path, signed, version) return self._request(method, uri, signed, **kwargs) def _request_withdraw_api(self, method, path, signed=False, **kwargs): uri = self._create_withdraw_api_uri(path) return self._request(method, uri, signed, True, **kwargs) def _request_margin_api(self, method, path, signed=False, **kwargs): uri = self._create_margin_api_uri(path) return self._request(method, uri, signed, **kwargs) def _request_website(self, method, path, signed=False, **kwargs): uri = self._create_website_uri(path) return self._request(method, uri, signed, **kwargs) def _request_futures_api(self, method, path, signed=False, v2=False, **kwargs): uri = self._create_futures_api_uri(path, v2) return self._request(method, uri, signed, True, **kwargs) def _handle_response(self): if not str(self.response.status_code).startswith('2'): raise BinanceAPIException(self.response) try: return self.response.json(),self.response except ValueError: raise BinanceRequestException('Invalid Response: %s' % self.response.text) def _get(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs): return self._request_futures_api('get', path, signed, **kwargs) def _post(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs): return self._request_futures_api('post', path, signed, **kwargs) def _put(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs): return self._request_futures_api('put', path, signed, **kwargs) def _delete(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs): return self._request_futures_api('delete', path, signed, **kwargs) def stream_get_listen_key(self): ret, res = self._post('listenKey', False, data={}) return ret['listenKey'] def stream_keepalive(self): return self._put('listenKey', False, data={}) def futures_ping(self): return self._request_futures_api('get', 'ping') def futures_time(self): return self._request_futures_api('get', 'time') def futures_exchange_info(self): return self._request_futures_api('get', 'exchangeInfo') def futures_order_book(self, **params): return self._request_futures_api('get', 'depth', data=params) def futures_recent_trades(self, **params): return self._request_futures_api('get', 'trades', data=params) def futures_historical_trades(self, **params): return self._request_futures_api('get', 'historicalTrades', data=params) def futures_aggregate_trades(self, **params): return self._request_futures_api('get', 'aggTrades', data=params) def futures_klines(self, **params): return self._request_futures_api('get', 'klines', data=params) def futures_mark_price(self, **params): return self._request_futures_api('get', 'premiumIndex', data=params) def futures_funding_rate(self, **params): return self._request_futures_api('get', 'fundingRate', data=params) def futures_ticker(self, **params): return self._request_futures_api('get', 'ticker/24hr', data=params) def futures_symbol_ticker(self, **params): return self._request_futures_api('get', 'ticker/price', data=params) def futures_orderbook_ticker(self, **params): return self._request_futures_api('get', 'ticker/bookTicker', data=params) def futures_liquidation_orders(self, **params): return self._request_futures_api('get', 'ticker/allForceOrders', data=params) def futures_open_interest(self, **params): return self._request_futures_api('get', 'ticker/openInterest', data=params) def futures_leverage_bracket(self, **params): return self._request_futures_api('get', 'ticker/leverageBracket', data=params) def transfer_history(self, **params): return self._request_margin_api('get', 'futures/transfer', True, data=params) def futures_create_order(self, **params): return self._request_futures_api('post', 'order', True, data=params) def futures_get_order(self, **params): return self._request_futures_api('get', 'order', True, data=params) def futures_get_open_orders(self, **params): return self._request_futures_api('get', 'openOrders', True, data=params) def futures_get_all_orders(self, **params): return self._request_futures_api('get', 'allOrders', True, data=params) def futures_cancel_order(self, **params): return self._request_futures_api('delete', 'order', True, data=params) def futures_cancel_all_open_orders(self, **params): return self._request_futures_api('delete', 'allOpenOrders', True, data=params)
MIT License
holoclean/holoclean-legacy-deprecated
holoclean/holoclean.py
HoloClean._init_spark
python
def _init_spark(self): conf = SparkConf() conf.set("spark.executor.extraClassPath", self.holoclean_path + "/" + self.pg_driver) conf.set("spark.driver.extraClassPath", self.holoclean_path + "/" + self.pg_driver) conf.set('spark.driver.memory', '20g') conf.set('spark.executor.memory', '20g') conf.set("spark.network.timeout", "6000") conf.set("spark.rpc.askTimeout", "99999") conf.set("spark.worker.timeout", "60000") conf.set("spark.driver.maxResultSize", '70g') conf.set("spark.ui.showConsoleProgress", "false") if self.spark_cluster: conf.set("spark.master", self.spark_cluster) sc = SparkContext(conf=conf) sc.setLogLevel("OFF") sql_ctxt = SQLContext(sc) return sql_ctxt.sparkSession, sql_ctxt
Set spark configuration :return: Spark session :return: Spark context
https://github.com/holoclean/holoclean-legacy-deprecated/blob/057bc41b5c6b2bd84dc46b7fd009bcea9fa5f7ea/holoclean/holoclean.py#L227-L257
import logging from pyspark import SparkContext, SparkConf from pyspark.sql import SQLContext import time import torch.nn.functional as F import torch from dataengine import DataEngine from dataset import Dataset from featurization.database_worker import DatabaseWorker, PopulateTensor from utils.pruning import Pruning from utils.parser_interface import ParserInterface, DenialConstraint import multiprocessing from errordetection.errordetector_wrapper import ErrorDetectorsWrapper from featurization.initfeaturizer import SignalInit from featurization.dcfeaturizer import SignalDC from featurization.cooccurrencefeaturizer import SignalCooccur from global_variables import GlobalVariables from learning.softmax import SoftMax from learning.accuracy import Accuracy arguments = [ (('-path', '--holoclean_path'), {'metavar': 'HOLOCLEAN_PATH', 'dest': 'holoclean_path', 'default': '.', 'type': str, 'help': 'Path of HoloCLean'}), (('-u', '--db_user'), {'metavar': 'DB_USER', 'dest': 'db_user', 'default': 'holocleanuser', 'type': str, 'help': 'User for DB used to persist state'}), (('-p', '--password', '--pass'), {'metavar': 'PASSWORD', 'dest': 'db_pwd', 'default': 'abcd1234', 'type': str, 'help': 'Password for DB used to persist state'}), (('-h', '--host'), {'metavar': 'HOST', 'dest': 'db_host', 'default': 'localhost', 'type': str, 'help': 'Host for DB used to persist state'}), (('-d', '--database'), {'metavar': 'DATABASE', 'dest': 'db_name', 'default': 'holo', 'type': str, 'help': 'Name of DB used to persist state'}), (('-m', '--mysql_driver'), {'metavar': 'MYSQL_DRIVER', 'dest': 'mysql_driver', 'default': 'holoclean/lib/mysql-connector-java-5.1.44-bin.jar', 'type': str, 'help': 'Path for MySQL driver'}), (('-pg', '--pg_driver'), {'metavar': 'PG_DRIVER', 'dest': 'pg_driver', 'default': 'holoclean/lib/postgresql-42.2.2.jar', 'type': str, 'help': 'Path for Postgresql PySpark driver'}), (('-s', '--spark_cluster'), {'metavar': 'SPARK', 'dest': 'spark_cluster', 'default': None, 'type': str, 'help': 'Spark cluster address'}), (('-k', '--first_k'), {'metavar': 'FIRST_K', 'dest': 'first_k', 'default': 1, 'type': int, 'help': 'The final output will show the k-first results ' '(if it is 0 it will show everything)'}), (('-l', '--learning-rate'), {'metavar': 'LEARNING_RATE', 'dest': 'learning_rate', 'default': 0.001, 'type': float, 'help': 'The learning rate holoclean will use during training'}), (('-pt1', '--pruning-threshold1'), {'metavar': 'PRUNING_THRESHOLD1', 'dest': 'pruning_threshold1', 'default': 0.0, 'type': float, 'help': 'Threshold1 used for domain pruning step'}), (('-pt2', '--pruning-threshold2'), {'metavar': 'PRUNING_THRESHOLD2', 'dest': 'pruning_threshold2', 'default': 0.1, 'type': float, 'help': 'Threshold2 used for domain pruning step'}), (('-pdb', '--pruning-dk-breakoff'), {'metavar': 'DK_BREAKOFF', 'dest': 'pruning_dk_breakoff', 'default': 5, 'type': float, 'help': 'DK breakoff used for domain pruning step'}), (('-pcb', '--pruning-clean-breakoff'), {'metavar': 'CLEAN_BREAKOFF', 'dest': 'pruning_clean_breakoff', 'default': 5, 'type': float, 'help': 'Clean breakoff used for domain pruning step'}), (('-it', '--learning-iterations'), {'metavar': 'LEARNING_ITERATIONS', 'dest': 'learning_iterations', 'default': 20, 'type': float, 'help': 'Number of iterations used for softmax'}), (('-w', '--weight_decay'), {'metavar': 'WEIGHT_DECAY', 'dest': 'weight_decay', 'default': 0.9, 'type': float, 'help': 'The L2 penalty HoloClean will use during training'}), (('-p', '--momentum'), {'metavar': 'MOMENTUM', 'dest': 'momentum', 'default': 0.0, 'type': float, 'help': 'The momentum term in the loss function'}), (('-b', '--batch-size'), {'metavar': 'BATCH_SIZE', 'dest': 'batch_size', 'default': 1, 'type': int, 'help': 'The batch size during training'}), (('-ki', '--k-inferred'), {'metavar': 'K_INFERRED', 'dest': 'k_inferred', 'default': 1, 'type': int, 'help': 'Number of inferred values'}), (('-t', '--timing-file'), {'metavar': 'TIMING_FILE', 'dest': 'timing_file', 'default': None, 'type': str, 'help': 'File to save timing infomrmation'}), ] flags = [ (('-q', '--quiet'), {'default': False, 'dest': 'quiet', 'action': 'store_true', 'help': 'quiet'}), (tuple(['--verbose']), {'default': False, 'dest': 'verbose', 'action': 'store_true', 'help': 'verbose'}) ] class HoloClean: def __init__(self, **kwargs): arg_defaults = {} for arg, opts in arguments: if 'directory' in arg[0]: arg_defaults['directory'] = opts['default'] else: arg_defaults[opts['dest']] = opts['default'] for arg, opts in flags: arg_defaults[opts['dest']] = opts['default'] for key in kwargs: arg_defaults[key] = kwargs[key] for (arg, default) in arg_defaults.items(): setattr(self, arg, kwargs.get(arg, default)) if self.verbose: logging.basicConfig(filename="logger.log", filemode='w', level=logging.INFO) else: logging.basicConfig(filename="logger.log", filemode='w', level=logging.ERROR) self.logger = logging.getLogger("__main__") self.spark_session, self.spark_sql_ctxt = self._init_spark() self.dataengine = self._init_dataengine() self.session = {} def _init_dataengine(self): data_engine = DataEngine(self) return data_engine
Apache License 2.0
devopshq/tfs
tfs/resources.py
TFSObject.__getattr__
python
def __getattr__(self, name): if self.data and name in self.data.get('_links', {}): return self.__get_object_by_links(name) raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, name))
If object have not attribute, try search in `_links` and return new TFSObject :param name: :return: mapped or unknown tfs object
https://github.com/devopshq/tfs/blob/56644a36dd34457dec6922eb144c21db320f16e7/tfs/resources.py#L59-L67
import os import re from copy import deepcopy from requests.structures import CaseInsensitiveDict from six import iteritems class TFSObject(object): def __init__(self, data=None, tfs=None, uri='', underProject=None): self.tfs = tfs self._uri = uri self._underProject = underProject self.data = data self._links_attrs = [] self._data = self.data def __dir__(self): original = super(TFSObject, self).__dir__() if not self.data: return original extend = [x for x in self.data.get('_links', {}) if x in self._links_attrs] return original + extend def __get_object_by_links(self, name): links = self.data.get('_links', {}) url = links[name]['href'] return self.tfs.get_tfs_resource(url)
MIT License
dgerosa/precession
precession/precession.py
Jresonances
python
def Jresonances(r, chieff, q, chi1, chi2): u = eval_u(r, q) kappamin, kappamax = kapparesonances(u, chieff, q, chi1, chi2) Jmin = eval_J(kappa=kappamin, r=r, q=q) Jmax = eval_J(kappa=kappamax, r=r, q=q) return np.stack([Jmin, Jmax])
Total angular momentum of the two spin-orbit resonances. The resonances minimizes and maximizes J for a given value of chieff. The minimum corresponds to deltaphi=pi and the maximum corresponds to deltaphi=0. Call ---- Jmin,Jmax = Jresonances(r,chieff,q,chi1,chi2) Parameters ---------- r: float Binary separation. chieff: float Effective spin. q: float Mass ratio: 0<=q<=1. chi1: float Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1. chi2: float Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1. Returns ------- Jmin: float Minimum value of the total angular momentum J. Jmax: float Maximum value of the total angular momentum J.
https://github.com/dgerosa/precession/blob/35c9226c78b5b73a06d26cc02e5234a93c12b1c7/precession/precession.py#L888-L922
import warnings import numpy as np import scipy.special import scipy.integrate from sympy import elliptic_pi def roots_vec(p): p = np.atleast_1d(p) n = p.shape[-1] A = np.zeros(p.shape[:1] + (n-1, n-1), float) A[..., 1:, :-1] = np.eye(n-2) A[..., 0, :] = -p[..., 1:]/p[..., None, 0] return np.linalg.eigvals(A) def norm_nested(x): return np.linalg.norm(x, axis=1) def normalize_nested(x): return x/norm_nested(x)[:, None] def dot_nested(x, y): return np.einsum('ij, ij->i', x, y) def sample_unitsphere(N=1): vec = np.random.randn(3, N) vec /= np.linalg.norm(vec, axis=0) return vec.T def wraproots(coefficientfunction, *args, **kwargs): coeffs = coefficientfunction(*args, **kwargs) sols = np.sort_complex(roots_vec(coeffs.T)) sols = np.real(np.where(np.isreal(sols), sols, np.nan)) return sols @np.vectorize def ellippi(n, phi, m): return float(elliptic_pi(n, phi, m)) def rotate_zaxis(vec, angle): newx = vec[:, 0]*np.cos(angle) - vec[:, 1]*np.sin(angle) newy = vec[:, 0]*np.sin(angle) + vec[:, 1]*np.cos(angle) newz = vec[:, 2] newvec = np.transpose([newx, newy, newz]) return newvec def ismonotonic(vec, which): if which == '<': return np.all(vec[:-1] < vec[1:]) elif which == '<=': return np.all(vec[:-1] <= vec[1:]) elif which == '>': return np.all(vec[:-1] > vec[1:]) elif which == '>=': return np.all(vec[:-1] >= vec[1:]) else: raise ValueError("`which` needs to be one of the following: `>`, `>=`, `<`, `<=`.") def eval_m1(q): q = np.atleast_1d(q) m1 = 1/(1+q) return m1 def eval_m2(q): q = np.atleast_1d(q) m2 = q/(1+q) return m2 def masses(q): m1 = eval_m1(q) m2 = eval_m2(q) return np.stack([m1, m2]) def eval_q(m1, m2): m1 = np.atleast_1d(m1) m2 = np.atleast_1d(m2) q = m2/m1 assert (q < 1).all(), "The convention used in this code is q=m2/m1<1." return q def eval_eta(q): q = np.atleast_1d(q) eta = q/(1+q)**2 return eta def eval_S1(q, chi1): chi1 = np.atleast_1d(chi1) S1 = chi1*(eval_m1(q))**2 return S1 def eval_S2(q, chi2): chi2 = np.atleast_1d(chi2) S2 = chi2*(eval_m2(q))**2 return S2 def spinmags(q, chi1, chi2): S1 = eval_S1(q, chi1) S2 = eval_S2(q, chi2) return np.stack([S1, S2]) def eval_L(r, q): r = np.atleast_1d(r) L = eval_m1(q)*eval_m2(q)*r**0.5 return L def eval_v(r): r = np.atleast_1d(r) v = 1/r**0.5 return v def eval_r(L=None, u=None, q=None): if L is not None and u is None and q is not None: L = np.atleast_1d(L) m1, m2 = masses(q) r = (L / (m1 * m2))**2 elif L is None and u is not None and q is not None: u = np.atleast_1d(u) r = (2*eval_m1(q)*eval_m2(q)*u)**(-2) else: raise TypeError("Provide either (L,q) or (u,q).") return r def Jlimits_LS1S2(r, q, chi1, chi2): S1, S2 = spinmags(q, chi1, chi2) L = eval_L(r, q) Jmin = np.maximum.reduce([np.zeros(L.shape), L-S1-S2, np.abs(S1-S2)-L]) Jmax = L+S1+S2 return np.stack([Jmin, Jmax]) def kappadiscriminant_coefficients(u, chieff, q, chi1, chi2): u = np.atleast_1d(u) q = np.atleast_1d(q) chieff = np.atleast_1d(chieff) S1, S2 = spinmags(q, chi1, chi2) coeff5 = -256 * q**3 * ((1 + q))**6 * u coeff4 = 16 * q**2 * ((1 + q))**4 * (((-1 + q**2))**2 + (-16 * ((1 + q))**2 * (q * (-5 + 3 * q) * S1**2 + (3 + -5 * q) * S2**2) * u**2 + (40 * q * ((1 + q))**2 * u * chieff + 16 * q**2 * u**2 * chieff**2))) coeff3 = -32 * q * ((1 + q))**4 * (2 * q**6 * S1**2 * u * (-5 + 12 * S1**2 * u**2) + (2 * S2**2 * u * (-5 + 12 * S2**2 * u**2) + (2 * q**2 * u * (40 * S1**4 * u**2 + (-44 * S2**4 * u**2 + (8 * chieff**2 + (S1**2 * (-5 + (-8 * S2**2 * u**2 + 40 * u * chieff)) + -2 * S2**2 * (-5 + 4 * u * chieff * (1 + u * chieff)))))) + (2 * q**3 * (32 * S1**4 * u**3 + (32 * S2**4 * u**3 + (chieff * (-1 + 8 * u * chieff * (3 + u * chieff)) + (2 * S2**2 * u * (-1 + u * chieff * (17 + 8 * u * chieff)) + 2 * S1**2 * u * (-1 + (40 * S2**2 * u**2 + u * chieff * (17 + 8 * u * chieff))))))) + (q * (chieff + 2 * u * (S1**2 * (1 + -48 * S2**2 * u**2) + S2**2 * (1 + -2 * u * (12 * S2**2 * u + chieff)))) + (q**5 * (chieff + 2 * u * (S2**2 + S1**2 * (1 + -2 * u * (12 * (S1**2 + 2 * S2**2) * u + chieff)))) + -2 * q**4 * u * (5 * S2**2 + (44 * S1**4 * u**2 + (-8 * (5 * S2**4 * u**2 + (5 * S2**2 * u * chieff + chieff**2)) + 2 * S1**2 * (-5 + 4 * u * (chieff + u * (S2**2 + chieff**2)))))))))))) coeff2 = -16 * ((1 + q))**2 * (16 * (-1 + q) * q**3 * ((1 + q))**4 * (10 + (-8 + q) * q) * S1**6 * u**4 + (-16 * ((-1 + q))**3 * ((1 + q))**4 * S2**6 * u**4 + (-1 * ((-1 + q**2))**2 * S2**4 * u**2 * (((1 + q))**2 * (-8 + (-20 + q) * q) + (8 * (-4 + q) * q * (1 + q) * u * chieff + 16 * q**2 * u**2 * chieff**2)) + (-1 * q**2 * (((1 + q) * S2**2 * u + q * chieff))**2 * ((-1 + q) * ((1 + q))**2 * (-1 + (q + 48 * S2**2 * u**2)) + (8 * q * (1 + q) * (5 + q) * u * chieff + 16 * q**2 * u**2 * chieff**2)) + (2 * q**2 * ((1 + q))**2 * S1**4 * u**2 * ((-1 + q) * ((1 + q))**2 * ((-1 + q) * (-3 + (30 * q + 4 * q**2)) + -72 * (2 + (-2 + q) * q) * S2**2 * u**2) + (4 * q * (1 + q) * (-30 + q * (39 + q * (-19 + 4 * q))) * u * chieff + -8 * q**2 * (6 + (-6 + q) * q) * u**2 * chieff**2)) + (-4 * q * (-1 * (1 + q) * S2**2 * u + -1 * q * chieff) * (-1 * ((-1 + q))**2 * ((1 + q))**3 * S2**2 * u * (-10 + (q + 24 * S2**2 * u**2)) + (-1 * (-1 + q) * q * ((1 + q))**2 * (-1 + (q + 4 * (1 + 2 * q) * S2**2 * u**2)) * chieff + (-8 * q**2 * (1 + q) * u * (2 + (q + 2 * (-1 + q) * S2**2 * u**2)) * chieff**2 + -16 * q**3 * u**2 * chieff**3))) + (q * (1 + q) * S1**2 * ((-1 + q) * ((1 + q))**3 * (((-1 + q))**3 * q + (4 * (-1 + q) * (15 + q * (-29 + 15 * q)) * S2**2 * u**2 + 144 * (1 + 2 * (-1 + q) * q) * S2**4 * u**4)) + (2 * q * ((1 + q))**2 * u * (((-1 + q))**2 * (-3 + q * (23 + 4 * q)) + 12 * (1 + q) * (1 + q**2) * S2**2 * u**2) * chieff + (8 * q**2 * (1 + q) * u**2 * (-12 + (-2 * q + (-11 * q**2 + (q**3 + 4 * (3 + q * (-5 + 3 * q)) * S2**2 * u**2)))) * chieff**2 + -32 * q**3 * (3 + (-1 + q) * q) * u**3 * chieff**3))) + (S2**2 * (((-1 + q**2))**4 + (2 * ((-1 + q))**2 * q * ((1 + q))**3 * (4 + 5 * q) * u * chieff + (8 * (-1 + q) * q**2 * ((1 + q))**2 * (-1 + 4 * q) * u**2 * chieff**2 + 32 * q**3 * (-1 + q**2) * u**3 * chieff**3))) + -1 * q**2 * chieff**2 * (1 + q * (8 * u * chieff + q * (-2 + (16 * u * chieff + ((q + 4 * u * chieff))**2)))))))))))) coeff1 = -16 * (1 + q) * (-16 * ((-1 + q))**2 * q**3 * ((1 + q))**5 * (-5 + 2 * q) * S1**8 * u**5 + (-4 * (-1 + q) * q**2 * ((1 + q))**3 * S1**6 * u**3 * ((-1 + q) * ((1 + q))**2 * (-1 + (15 * q + (4 * q**2 + 8 * (6 + (-1 + q) * q) * S2**2 * u**2))) + (2 * q * (1 + q) * (20 + q * (-29 + 12 * q)) * u * chieff + -8 * (-2 + q) * q**2 * u**2 * chieff**2)) + (-2 * q * (((1 + q) * S2**2 * u + q * chieff))**2 * (-1 * ((-1 + q))**2 * ((1 + q))**3 * S2**2 * u * (-10 + (q + 24 * S2**2 * u**2)) + (-1 * (-1 + q) * q * ((1 + q))**2 * (-1 + (q + 4 * (1 + 2 * q) * S2**2 * u**2)) * chieff + (-8 * q**2 * (1 + q) * u * (2 + (q + 2 * (-1 + q) * S2**2 * u**2)) * chieff**2 + -16 * q**3 * u**2 * chieff**3))) + (-2 * q * ((1 + q))**2 * S1**4 * u * (((-1 + q))**2 * ((1 + q))**3 * (((-1 + q))**2 * q + (2 * (15 + q * (-55 + 2 * q * (9 + 2 * q))) * S2**2 * u**2 + -72 * (1 + q**2) * S2**4 * u**4)) + ((-1 + q) * q * ((1 + q))**2 * u * (3 + (-52 * q + (33 * q**2 + (16 * q**3 + 4 * (-3 + 2 * q**2 * (-7 + 4 * q)) * S2**2 * u**2)))) * chieff + (-8 * q**2 * (1 + q) * u**2 * (6 + (-16 * q + (18 * q**2 + (-5 * q**3 + 2 * (-1 + q) * (3 + (-1 + q) * q) * S2**2 * u**2)))) * chieff**2 + -16 * q**3 * (3 + q * (-5 + 3 * q)) * u**3 * chieff**3))) + (S1**2 * (-32 * ((-1 + q))**2 * ((1 + q))**5 * (1 + q * (-1 + 6 * q)) * S2**6 * u**5 + (-4 * (-1 + q) * ((1 + q))**3 * S2**4 * u**3 * ((-1 + q) * ((1 + q))**2 * (4 + q * (18 + 5 * q * (-11 + 3 * q))) + (2 * q * (1 + q) * (-8 + (14 * q + 3 * q**3)) * u * chieff + 8 * q**2 * (1 + q * (-1 + 3 * q)) * u**2 * chieff**2)) + (2 * ((1 + q))**3 * S2**2 * u * (-1 * ((-1 + q))**4 * ((1 + q))**2 * (1 + (-12 + q) * q) + (-2 * q * ((-1 + q**2))**2 * (4 + q * (-7 + 4 * q)) * u * chieff + (-8 * q**2 * (1 + q * (-8 + q * (20 + (-8 + q) * q))) * u**2 * chieff**2 + 16 * (-2 + q) * q**3 * (-1 + 2 * q) * u**3 * chieff**3))) + 2 * q**2 * chieff * (-1 * ((-1 + q**2))**4 + (-1 * ((-1 + q))**2 * ((1 + q))**3 * (-1 + q * (18 + 7 * q)) * u * chieff + (4 * q * ((1 + q))**2 * (2 + q * (-5 + 19 * q)) * u**2 * chieff**2 + 16 * q**2 * (1 + q**2 * (2 + 3 * q)) * u**3 * chieff**3)))))) + -2 * (-1 * (1 + q) * S2**2 * u + -1 * q * chieff) * (16 * ((-1 + q))**3 * ((1 + q))**4 * S2**6 * u**4 + (((-1 + q**2))**2 * S2**4 * u**2 * (((1 + q))**2 * (-8 + (-20 + q) * q) + (8 * (-4 + q) * q * (1 + q) * u * chieff + 16 * q**2 * u**2 * chieff**2)) + (S2**2 * (-1 * ((-1 + q**2))**4 + (-2 * ((-1 + q))**2 * q * ((1 + q))**3 * (4 + 5 * q) * u * chieff + (-8 * (-1 + q) * q**2 * ((1 + q))**2 * (-1 + 4 * q) * u**2 * chieff**2 + -32 * q**3 * (-1 + q**2) * u**3 * chieff**3))) + q**2 * chieff**2 * (1 + q * (8 * u * chieff + q * (-2 + (16 * u * chieff + ((q + 4 * u * chieff))**2)))))))))))) coeff0 = -16 * (16 * ((-1 + q))**3 * q**3 * ((1 + q))**6 * S1**10 * u**6 + (-1 * ((-1 + q))**2 * q**2 * ((1 + q))**4 * S1**8 * u**4 * (((1 + q))**2 * (1 + (-20 * q + (-8 * q**2 + 16 * (-3 + (q + 2 * q**2)) * S2**2 * u**2))) + (-8 * q * (1 + q) * (-5 + 8 * q) * u * chieff + 16 * q**2 * u**2 * chieff**2)) + ((-1 + q) * q * ((1 + q))**3 * S1**6 * u**2 * (q * ((-1 + q**2))**3 + (-4 * (-1 + q) * ((1 + q))**3 * (-5 + q * (27 + q * (-3 + 8 * q))) * S2**2 * u**2 + (16 * ((-1 + q))**2 * ((1 + q))**3 * (3 + q * (6 + q)) * S2**4 * u**4 + (-2 * (-1 + q) * q * ((1 + q))**2 * u * (1 + (-25 * q + (-12 * q**2 + 4 * (-1 + (q + 12 * q**2)) * S2**2 * u**2))) * chieff + (8 * q**2 * (1 + q) * u**2 * (4 + (-18 * q + (11 * q**2 + 4 * (-1 + q**2) * S2**2 * u**2))) * chieff**2 + 32 * (1 + -2 * q) * q**3 * u**3 * chieff**3))))) + (((1 + q))**2 * S1**4 * u * (-16 * ((-1 + q))**3 * ((1 + q))**4 * (1 + 3 * q * (2 + q)) * S2**6 * u**5 + (2 * S2**4 * u**3 * (((-1 + q))**2 * ((1 + q))**4 * (4 + q * (6 + q * (61 + (6 * q + 4 * q**2)))) + (4 * ((-1 + q))**2 * q * ((1 + q))**4 * (4 + (q + 4 * q**2)) * u * chieff + -8 * q**2 * ((-1 + q**2))**2 * (1 + q * (4 + q)) * u**2 * chieff**2)) + (chieff * (2 * ((-1 + q))**4 * q**2 * ((1 + q))**3 + (((q + -1 * q**3))**2 * (-1 + q * (40 + 23 * q)) * u * chieff + (8 * q**3 * (1 + q) * (-1 + q * (14 + 5 * (-4 + q) * q)) * u**2 * chieff**2 + -16 * q**4 * (1 + 6 * (-1 + q) * q) * u**3 * chieff**3))) + (-1 + q) * (1 + q) * S2**2 * u * (-1 * ((-1 + q**2))**3 * (-1 + 2 * q * (12 + 5 * q)) + (-2 * (-1 + q) * q * ((1 + q))**2 * (-4 + q * (29 + q * (-21 + 32 * q))) * u * chieff + (-8 * q**2 * (1 + q) * (1 + 2 * (-2 + q) * q * (1 + 4 * q)) * u**2 * chieff**2 + 32 * q**3 * (1 + q * (-1 + 3 * q)) * u**3 * chieff**3)))))) + ((1 + q) * S1**2 * (16 * ((-1 + q))**3 * ((1 + q))**5 * (2 + 3 * q) * S2**8 * u**6 + (q**2 * chieff**2 * (((-1 + q))**4 * ((1 + q))**3 + (2 * q * (5 + 3 * q) * ((-1 + q**2))**2 * u * chieff + (-8 * q**2 * (1 + q) * (-4 + q * (7 + q)) * u**2 * chieff**2 + 32 * (1 + -2 * q) * q**3 * u**3 * chieff**3))) + ((-1 + q) * ((1 + q))**2 * S2**4 * u**2 * ((-10 + (-24 + q) * q) * ((-1 + q**2))**3 + (2 * (-1 + q) * q * ((1 + q))**2 * (-32 + q * (21 + q * (-29 + 4 * q))) * u * chieff + (8 * q**2 * (1 + q) * (8 + q * (-14 + (-4 + q) * q)) * u**2 * chieff**2 + -32 * q**3 * (3 + (-1 + q) * q) * u**3 * chieff**3))) + (S2**2 * (-1 * ((-1 + q))**6 * ((1 + q))**5 + (-10 * ((-1 + q))**4 * q * ((1 + q))**5 * u * chieff + (-2 * ((-1 + q))**2 * q**2 * ((1 + q))**3 * (11 + q * (-24 + 11 * q)) * u**2 * chieff**2 + (16 * q**3 * ((1 + q))**3 * (2 + q * (-3 + 2 * q)) * u**3 * chieff**3 + 32 * q**4 * (1 + q) * (3 + q * (-5 + 3 * q)) * u**4 * chieff**4)))) + 4 * ((-1 + q))**2 * ((1 + q))**4 * S2**6 * u**4 * (-8 + q * (-5 + (-24 * q + (-22 * q**2 + (5 * q**3 + (2 * (-4 + q) * (3 + q) * u * chieff + 8 * q * u**2 * chieff**2)))))))))) + -1 * (((1 + q) * S2**2 * u + q * chieff))**2 * (16 * ((-1 + q))**3 * ((1 + q))**4 * S2**6 * u**4 + (((-1 + q**2))**2 * S2**4 * u**2 * (((1 + q))**2 * (-8 + (-20 + q) * q) + (8 * (-4 + q) * q * (1 + q) * u * chieff + 16 * q**2 * u**2 * chieff**2)) + (S2**2 * (-1 * ((-1 + q**2))**4 + (-2 * ((-1 + q))**2 * q * ((1 + q))**3 * (4 + 5 * q) * u * chieff + (-8 * (-1 + q) * q**2 * ((1 + q))**2 * (-1 + 4 * q) * u**2 * chieff**2 + -32 * q**3 * (-1 + q**2) * u**3 * chieff**3))) + q**2 * chieff**2 * (1 + q * (8 * u * chieff + q * (-2 + (16 * u * chieff + ((q + 4 * u * chieff))**2)))))))))))) return np.stack([coeff5, coeff4, coeff3, coeff2, coeff1, coeff0]) def kapparesonances(u, chieff, q, chi1, chi2): u = np.atleast_1d(u) chieff = np.atleast_1d(chieff) q = np.atleast_1d(q) chi1 = np.atleast_1d(chi1) chi2 = np.atleast_1d(chi2) kapparoots = wraproots(kappadiscriminant_coefficients, u, chieff, q, chi1, chi2) def _compute(kapparoots, u, chieff, q, chi1, chi2): kapparoots = kapparoots[np.isfinite(kapparoots)] Sroots = Satresonance(kappa=kapparoots, u=np.tile(u, kapparoots.shape), chieff=np.tile(chieff, kapparoots.shape), q=np.tile(q, kapparoots.shape), chi1=np.tile(chi1, kapparoots.shape), chi2=np.tile(chi2, kapparoots.shape)) Smin, Smax = Slimits_S1S2(np.tile(q, kapparoots.shape), np.tile(chi1, kapparoots.shape), np.tile(chi2, kapparoots.shape)) kappares = kapparoots[np.logical_and(Sroots > Smin, Sroots < Smax)] assert len(kappares) <= 2, "I found more than two resonances, this should not be possible." kappares = np.concatenate([kappares, np.repeat(np.nan, 2-len(kappares))]) return kappares kappamin, kappamax = np.array(list(map(_compute, kapparoots, u, chieff, q, chi1, chi2))).T return np.stack([kappamin, kappamax]) def kappainfresonances(chieff, q, chi1, chi2): chieff = np.atleast_1d(chieff) q = np.atleast_1d(q) S1, S2 = spinmags(q, chi1, chi2) kappainfmin = np.maximum((chieff - (q**-1-q)*S2)/(1+q), (chieff - (q**-1-q)*S1)/(1+q**-1)) kappainfmax = np.minimum((chieff + (q**-1-q)*S2)/(1+q), (chieff + (q**-1-q)*S1)/(1+q**-1)) return np.stack([kappainfmin, kappainfmax])
MIT License
radio-astro-tools/spectral-cube
spectral_cube/dask_spectral_cube.py
DaskSpectralCubeMixin.percentile
python
def percentile(self, q, axis=None, **kwargs): data = self._get_filled_data(fill=np.nan) if axis is None: self._warn_slow('percentile') return np.nanpercentile(data, q, **kwargs) else: data = data.rechunk([-1 if i == axis else 'auto' for i in range(3)]) return self._compute(data.map_blocks(np.nanpercentile, q=q, drop_axis=axis, axis=axis, **kwargs))
Return percentiles of the data. Parameters ---------- q : float The percentile to compute axis : int, or None Which axis to compute percentiles over
https://github.com/radio-astro-tools/spectral-cube/blob/dc25a7eccd38d5f59bf35d13955e72117d68b3dd/spectral_cube/dask_spectral_cube.py#L651-L673
from __future__ import print_function, absolute_import, division import uuid import inspect import warnings import tempfile from functools import wraps from contextlib import contextmanager from astropy import units as u from astropy.io.fits import PrimaryHDU, HDUList from astropy.wcs.utils import proj_plane_pixel_area import numpy as np import dask import dask.array as da from astropy import stats from astropy import convolution from astropy import wcs from . import wcs_utils from .spectral_cube import SpectralCube, VaryingResolutionSpectralCube, SIGMA2FWHM, np2wcs from .utils import cached, VarianceWarning, SliceWarning, BeamWarning, SmoothingWarning, BeamUnitsError from .lower_dimensional_structures import Projection from .masks import BooleanArrayMask, is_broadcastable_and_smaller from .np_compat import allbadtonan __all__ = ['DaskSpectralCube', 'DaskVaryingResolutionSpectralCube'] try: from scipy import ndimage import scipy.interpolate SCIPY_INSTALLED = True except ImportError: SCIPY_INSTALLED = False try: import zarr import fsspec except ImportError: ZARR_INSTALLED = False else: ZARR_INSTALLED = True def nansum_allbadtonan(dask_array, axis=None, keepdims=None): return da.reduction(dask_array, allbadtonan(np.nansum), allbadtonan(np.nansum), axis=axis, dtype=dask_array.dtype) def ignore_warnings(function): @wraps(function) def wrapper(self, *args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter('ignore') return function(self, *args, **kwargs) return wrapper def add_save_to_tmp_dir_option(function): @wraps(function) def wrapper(self, *args, **kwargs): save_to_tmp_dir = kwargs.pop('save_to_tmp_dir', False) cube = function(self, *args, **kwargs) if save_to_tmp_dir and isinstance(cube, DaskSpectralCubeMixin): if not ZARR_INSTALLED: raise ImportError("saving the cube to a temporary directory " "requires the zarr and fsspec packages to " "be installed.") filename = tempfile.mktemp() with dask.config.set(**cube._scheduler_kwargs): cube._data.to_zarr(filename) cube._data = da.from_zarr(filename) return cube return wrapper def projection_if_needed(function): parameters = inspect.signature(function).parameters if 'projection' in parameters: default_projection = parameters['projection'].default else: default_projection = True if 'unit' in parameters: default_unit = parameters['unit'].default else: default_unit = 'self' @wraps(function) def wrapper(self, *args, **kwargs): projection = kwargs.get('projection', default_projection) unit = kwargs.get('unit', default_unit) if unit == 'self': unit = self.unit out = function(self, *args, **kwargs) axis = kwargs.get('axis') if isinstance(out, da.Array): out = self._compute(out) if axis is None: if unit is not None: return u.Quantity(out, unit=unit) else: return out elif projection and axis is not None and self._naxes_dropped(axis) in (1, 2): meta = {'collapse_axis': axis} meta.update(self._meta) if hasattr(axis, '__len__') and len(axis) == 2: if set(axis) == set((1, 2)): new_wcs = self._wcs.sub([wcs.WCSSUB_SPECTRAL]) header = self._nowcs_header if hasattr(self, '_beam') and self._beam is not None: bmarg = {'beam': self.beam} elif hasattr(self, '_beams') and self._beams is not None: bmarg = {'beams': self.unmasked_beams} else: bmarg = {} return self._oned_spectrum(value=out, wcs=new_wcs, copy=False, unit=unit, header=header, meta=meta, spectral_unit=self._spectral_unit, **bmarg ) else: warnings.warn("Averaging over a spatial and a spectral " "dimension cannot produce a Projection " "quantity (no units or WCS are preserved).", SliceWarning) return out else: new_wcs = wcs_utils.drop_axis(self._wcs, np2wcs[axis]) header = self._nowcs_header return Projection(out, copy=False, wcs=new_wcs, meta=meta, unit=unit, header=header) else: return out return wrapper class FilledArrayHandler: def __init__(self, cube, fill=np.nan): self._data = cube._data self._mask = cube._mask self._fill = fill self._wcs = cube._wcs self._wcs_tolerance = cube._wcs_tolerance self.shape = cube._data.shape self.dtype = cube._data.dtype self.ndim = len(self.shape) def __getitem__(self, view): if self._data[view].size == 0: return 0. else: return self._mask._filled(data=self._data, view=view, wcs=self._wcs, fill=self._fill, wcs_tolerance=self._wcs_tolerance) class MaskHandler: def __init__(self, cube): self._data = cube._data self._mask = cube.mask self.shape = cube._data.shape self.dtype = cube._data.dtype self.ndim = len(self.shape) def __getitem__(self, view): if self._data[view].size == 0: return False else: result = self._mask.include(view=view) if isinstance(result, da.Array): result = result.compute() return result class DaskSpectralCubeMixin: _scheduler_kwargs = {'scheduler': 'synchronous'} def _new_cube_with(self, *args, **kwargs): new_cube = super()._new_cube_with(*args, **kwargs) new_cube._scheduler_kwargs = self._scheduler_kwargs return new_cube @property def _data(self): return self.__data @_data.setter def _data(self, value): if not isinstance(value, da.Array): raise TypeError('_data should be set to a dask array') self.__data = value def use_dask_scheduler(self, scheduler, num_workers=None): original_scheduler_kwargs = self._scheduler_kwargs self._scheduler_kwargs = {'scheduler': scheduler} if num_workers is not None: self._scheduler_kwargs['num_workers'] = num_workers self._num_workers = num_workers class SchedulerHandler: def __init__(self, cube, original_scheduler_kwargs): self.cube = cube self.original_scheduler_kwargs = original_scheduler_kwargs def __enter__(self): pass def __exit__(self, *args): self.cube._scheduler_kwargs = self.original_scheduler_kwargs return SchedulerHandler(self, original_scheduler_kwargs) def _compute(self, array): return array.compute(**self._scheduler_kwargs) def _warn_slow(self, funcname): if self._is_huge and not self.allow_huge_operations: raise ValueError("This function ({0}) requires loading the entire " "cube into memory, and the cube is large ({1} " "pixels), so by default we disable this operation. " "To enable the operation, set " "`cube.allow_huge_operations=True` and try again." .format(funcname, self.size)) def _get_filled_data(self, view=(), fill=np.nan, check_endian=None, use_memmap=None): if check_endian: if not self._data.dtype.isnative: kind = str(self._data.dtype.kind) sz = str(self._data.dtype.itemsize) dt = '=' + kind + sz data = self._data.astype(dt) else: data = self._data else: data = self._data if self._mask is None: return data[view] else: return da.from_array(FilledArrayHandler(self, fill=fill), name='FilledArrayHandler ' + str(uuid.uuid4()), chunks=data.chunksize)[view] def __repr__(self): default_repr = super().__repr__() lines = default_repr.splitlines() lines[0] = lines[0][:-1] + ' and chunk size {0}:'.format(self._data.chunksize) return '\n'.join(lines) @add_save_to_tmp_dir_option def rechunk(self, chunks='auto', threshold=None, block_size_limit=None, **kwargs): newdata = self._data.rechunk(chunks=chunks, threshold=threshold, block_size_limit=block_size_limit) return self._new_cube_with(data=newdata) @add_save_to_tmp_dir_option @projection_if_needed def apply_function(self, function, axis=None, unit=None, projection=False, keep_shape=False, **kwargs): if axis is None: out = function(self.flattened(), **kwargs) if unit is not None: return u.Quantity(out, unit=unit) else: return out data = self._get_filled_data(fill=self._fill_value) if keep_shape: newdata = da.apply_along_axis(function, axis, data, shape=(self.shape[axis],)) else: newdata = da.apply_along_axis(function, axis, data) return newdata @add_save_to_tmp_dir_option @projection_if_needed def apply_numpy_function(self, function, fill=np.nan, projection=False, unit=None, check_endian=False, **kwargs): data = self._get_filled_data(fill=fill, check_endian=check_endian) if function.__module__.startswith('numpy'): return function(data, **kwargs) else: raise NotImplementedError() @add_save_to_tmp_dir_option def apply_function_parallel_spatial(self, function, accepts_chunks=False, **kwargs): if accepts_chunks: def wrapper(data_slices, **kwargs): if data_slices.size > 0: return function(data_slices, **kwargs) else: return data_slices else: def wrapper(data_slices, **kwargs): if data_slices.size > 0: out = np.zeros_like(data_slices) for index in range(data_slices.shape[0]): out[index] = function(data_slices[index], **kwargs) return out else: return data_slices return self._map_blocks_to_cube(wrapper, rechunk=('auto', -1, -1), fill=self._fill_value, **kwargs) @add_save_to_tmp_dir_option def apply_function_parallel_spectral(self, function, accepts_chunks=False, return_new_cube=True, **kwargs): _has_blockinfo = 'block_info' in inspect.signature(function).parameters if _has_blockinfo: def wrapper(data, block_info=None, **kwargs): if data.size > 0: return function(data, block_info=block_info, **kwargs) else: return data else: def wrapper(data, **kwargs): if data.size > 0: return function(data, **kwargs) else: return data if accepts_chunks: current_chunksize = self._data.chunksize if current_chunksize[0] == self.shape[0]: rechunk = None else: rechunk = (-1, 'auto', 'auto') return self._map_blocks_to_cube(wrapper, return_new_cube=return_new_cube, rechunk=rechunk, **kwargs) else: data = self._get_filled_data(fill=self._fill_value) data = data.rechunk((-1, 'auto', 'auto')) newdata = da.apply_along_axis(wrapper, 0, data, shape=(self.shape[0],)) if return_new_cube: return self._new_cube_with(data=newdata, wcs=self.wcs, mask=self.mask, meta=self.meta, fill_value=self.fill_value) else: return newdata @projection_if_needed @ignore_warnings def sum(self, axis=None, **kwargs): return self._compute(nansum_allbadtonan(self._get_filled_data(fill=np.nan), axis=axis, **kwargs)) @projection_if_needed @ignore_warnings def mean(self, axis=None, **kwargs): return self._compute(da.nanmean(self._get_filled_data(fill=np.nan), axis=axis, **kwargs)) @projection_if_needed @ignore_warnings def median(self, axis=None, **kwargs): data = self._get_filled_data(fill=np.nan) if axis is None: self._warn_slow('median') return np.nanmedian(self._compute(data), **kwargs) else: return self._compute(da.nanmedian(self._get_filled_data(fill=np.nan), axis=axis, **kwargs)) @projection_if_needed @ignore_warnings
BSD 3-Clause New or Revised License
awslabs/aws-deployment-framework
src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/generate_params.py
Parameters._parse
python
def _parse(filename): try: with open("{0}.json".format(filename)) as file: return json.load(file) except FileNotFoundError: try: with open("{0}.yml".format(filename)) as file: return yaml.load(file, Loader=yaml.FullLoader) except yaml.scanner.ScannerError: LOGGER.exception('Invalid Yaml for %s.yml', filename) raise except FileNotFoundError: return {'Parameters': {}, 'Tags': {}}
Attempt to parse the parameters file and return he default CloudFormation parameter base object if not found. Returning Base CloudFormation Parameters here since if the user was using Any other type (SC, ECS) they would require a parameter file (global.json) and thus this would not fail.
https://github.com/awslabs/aws-deployment-framework/blob/e152e43d99a91ac919b01790cc904f354d95eb0a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/generate_params.py#L99-L118
import json import secrets import string import os import ast import yaml import boto3 from resolver import Resolver from s3 import S3 from logger import configure_logger from parameter_store import ParameterStore LOGGER = configure_logger(__name__) DEPLOYMENT_ACCOUNT_REGION = os.environ["AWS_REGION"] SHARED_MODULES_BUCKET = os.environ["S3_BUCKET_NAME"] PROJECT_NAME = os.environ["ADF_PROJECT_NAME"] class Parameters: def __init__(self, build_name, parameter_store, s3, directory=None): self.cwd = directory or os.getcwd() self._create_params_folder() self.global_path = "params/global" self.parameter_store = parameter_store self.build_name = build_name self.s3 = s3 self.file_name = "".join( secrets.choice(string.ascii_lowercase + string.digits) for _ in range(6) ) [self.account_ous, self.regions] = self._fetch_initial_parameter() def _fetch_initial_parameter(self): return [ ast.literal_eval( self.s3.read_object("adf-parameters/deployment/{0}/account_ous.json".format(self.build_name)) ), ast.literal_eval( self.parameter_store.fetch_parameter( "/deployment/{0}/regions".format(self.build_name) ) ) ] def _create_params_folder(self): try: return os.mkdir('{0}/params'.format(self.cwd)) except FileExistsError: return None @staticmethod def _is_account_id(value): return str(value).isnumeric() def create_parameter_files(self): for account, ou in self.account_ous.items(): for region in self.regions: compare_params = {'Parameters': {}, 'Tags': {}} compare_params = self._param_updater( Parameters._parse("{0}/params/{1}".format(self.cwd, "{0}_{1}".format(account, region))), compare_params, ) compare_params = self._param_updater( Parameters._parse("{0}/params/{1}".format(self.cwd, account)), compare_params, ) if not Parameters._is_account_id(ou): compare_params = self._param_updater( Parameters._parse("{0}/params/{1}_{2}".format(self.cwd, ou, region)), compare_params ) compare_params = self._param_updater( Parameters._parse("{0}/params/{1}".format(self.cwd, ou)), compare_params ) compare_params = self._param_updater( Parameters._parse("{0}/params/global_{1}".format(self.cwd, region)), compare_params ) compare_params = self._param_updater( Parameters._parse(self.global_path), compare_params ) if compare_params is not None: self._update_params(compare_params, "{0}_{1}".format(account, region)) @staticmethod
Apache License 2.0
jrgoodle/clowder
clowder/git/git_repo.py
GitRepo.format_project_string
python
def format_project_string(self, path: Path) -> str: if not existing_git_repo(self.repo_path): return str(path) if not self.validate_repo(): return f'{path}*' else: return str(path)
Return formatted project name :param Path path: Relative project path :return: Formatted project name
https://github.com/jrgoodle/clowder/blob/864afacfc7122e937f7087e233c61d05fd007af2/clowder/git/git_repo.py#L196-L209
from pathlib import Path from subprocess import CalledProcessError from typing import Optional from git import Repo, GitCommandError, GitError import clowder.util.formatting as fmt from clowder.util.console import CONSOLE from clowder.util.execute import execute_command from clowder.util.file_system import remove_directory from clowder.util.logging import LOG from .git_ref import GitRef from .util import ( existing_git_repo, not_detached ) class GitRepo(object): def __init__(self, repo_path: Path, remote: str): self.repo_path = repo_path self.remote = remote self.repo = self._create_repo() if existing_git_repo(repo_path) else None def add(self, files: str) -> None: CONSOLE.stdout(' - Add files to git index') try: CONSOLE.stdout(self.repo.git.add(files)) except GitError: LOG.error("Failed to add files to git index") raise else: self.status_verbose() def checkout(self, ref: str, allow_failure: bool = False) -> None: try: CONSOLE.stdout(f' - Check out {fmt.ref(ref)}') CONSOLE.stdout(self.repo.git.checkout(ref)) except GitError: message = f'Failed to checkout {fmt.ref(ref)}' if allow_failure: CONSOLE.stdout(f' - {message}') return LOG.error(message) raise def clean(self, args: str = '') -> None: CONSOLE.stdout(' - Clean project') clean_args = '-f' if args == '' else '-f' + args self._clean(args=clean_args) CONSOLE.stdout(' - Reset project') self._reset_head() if self._is_rebase_in_progress: CONSOLE.stdout(' - Abort rebase in progress') self._abort_rebase() def commit(self, message: str) -> None: try: CONSOLE.stdout(' - Commit current changes') CONSOLE.stdout(self.repo.git.commit(message=message)) except GitError: LOG.error('Failed to commit current changes') raise @property def current_branch(self) -> str: return self.repo.head.ref.name def has_remote_branch(self, branch: str, remote: str) -> bool: try: origin = self.repo.remotes[remote] return branch in origin.refs except (GitError, IndexError) as err: LOG.debug(error=err) return False def has_local_branch(self, branch: str) -> bool: return branch in self.repo.heads def fetch(self, remote: str, ref: Optional[GitRef] = None, depth: int = 0, remove_dir: bool = False, allow_failure: bool = False) -> None: if depth == 0 or ref is None: CONSOLE.stdout(f' - Fetch from {fmt.remote(remote)}') error_message = f'Failed to fetch from remote {fmt.remote(remote)}' else: CONSOLE.stdout(f' - Fetch from {fmt.remote(remote)} {fmt.ref(ref.short_ref)}') error_message = f'Failed to fetch from {fmt.remote(remote)} {fmt.ref(ref.short_ref)}' try: if depth == 0: execute_command(f'git fetch {remote} --prune --tags', self.repo_path) elif ref is None: command = f'git fetch {remote} --depth {depth} --prune --tags' execute_command(command, self.repo_path) else: command = f'git fetch {remote} {ref.short_ref} --depth {depth} --prune --tags' execute_command(command, self.repo_path) except BaseException as err: LOG.error(error_message) if remove_dir: remove_directory(self.repo_path, check=False) if allow_failure: LOG.debug(error=err) return raise @property def formatted_ref(self) -> str: local_commits_count = self.new_commits_count() upstream_commits_count = self.new_commits_count(upstream=True) no_local_commits = local_commits_count == 0 or local_commits_count == '0' no_upstream_commits = upstream_commits_count == 0 or upstream_commits_count == '0' if no_local_commits and no_upstream_commits: status = '' else: local_commits_output = fmt.yellow(f'+{local_commits_count}') upstream_commits_output = fmt.red(f'-{upstream_commits_count}') status = f'({local_commits_output}/{upstream_commits_output})' if self.is_detached: return fmt.magenta(fmt.escape(f'[HEAD @ {self.sha()}]')) return fmt.magenta(fmt.escape(f'[{self.current_branch}]')) + status
MIT License
facebookincubator/reindeer
build/fbcode_builder/shell_quoting.py
shell_join
python
def shell_join(delim, it): return ShellQuoted(delim.join(raw_shell(s) for s in it))
Joins an iterable of ShellQuoted with a delimiter between each two
https://github.com/facebookincubator/reindeer/blob/d3a70b069cd6774f2be374fa19bea68a3cb6142c/build/fbcode_builder/shell_quoting.py#L87-L89
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os from collections import namedtuple class ShellQuoted(namedtuple("ShellQuoted", ("do_not_use_raw_str",))): def __new__(cls, s): return super(ShellQuoted, cls).__new__( cls, s.do_not_use_raw_str if isinstance(s, ShellQuoted) else s ) def __str__(self): raise RuntimeError( "One does not simply convert {0} to a string -- use path_join() " "or ShellQuoted.format() instead".format(repr(self)) ) def __repr__(self): return "{0}({1})".format(self.__class__.__name__, repr(self.do_not_use_raw_str)) def format(self, **kwargs): return ShellQuoted( self.do_not_use_raw_str.format( **dict( (k, shell_quote(v).do_not_use_raw_str) for k, v in kwargs.items() ) ) ) def shell_quote(s): return ( s if isinstance(s, ShellQuoted) else ShellQuoted("'" + str(s).replace("'", "'\\''") + "'") ) def raw_shell(s): if isinstance(s, ShellQuoted): return s.do_not_use_raw_str raise RuntimeError("{0} should have been ShellQuoted".format(s))
MIT License
google-research/long-range-arena
lra_benchmarks/models/synthesizer/synthesizer.py
SynthesizerDecoder.apply
python
def apply(self, inputs, vocab_size, emb_dim=512, num_heads=8, num_layers=6, qkv_dim=512, mlp_dim=2048, max_len=2048, train=False, shift=True, dropout_rate=0.1, attention_dropout_rate=0.1, cache=None, max_length=512, ignore_dot_product=False, synthesizer_mode='random'): padding_mask = jnp.where(inputs > 0, 1, 0).astype(jnp.float32)[..., None] assert inputs.ndim == 2 x = inputs if shift: x = common_layers.shift_right(x) x = x.astype('int32') x = common_layers.Embed( x, num_embeddings=vocab_size, features=emb_dim, name='embed') x = common_layers.AddPositionEmbs( x, max_len=max_len, posemb_init=common_layers.sinusoidal_init(max_len=max_len), cache=cache) x = nn.dropout(x, rate=dropout_rate, deterministic=not train) for _ in range(num_layers): x = SynthesizerBlock( x, qkv_dim=qkv_dim, mlp_dim=mlp_dim, num_heads=num_heads, causal_mask=True, padding_mask=padding_mask, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, deterministic=not train, cache=cache, max_length=max_length, ignore_dot_product=ignore_dot_product, synthesizer_mode=synthesizer_mode) x = nn.LayerNorm(x) logits = nn.Dense( x, vocab_size, kernel_init=nn.initializers.xavier_uniform(), bias_init=nn.initializers.normal(stddev=1e-6)) return logits
Applies Synthesizer model on the inputs. Args: inputs: input data vocab_size: size of the vocabulary emb_dim: dimension of embedding num_heads: number of heads num_layers: number of layers qkv_dim: dimension of the query/key/value mlp_dim: dimension of the mlp on top of attention block max_len: maximum length. train: bool: if model is training. shift: bool: if we right-shift input - this is only disabled for fast, looped single-token autoregressive decoding. dropout_rate: dropout rate attention_dropout_rate: dropout rate for attention weights cache: flax autoregressive cache for fast decoding. max_length: int, the maximum supported sequence length. ignore_dot_product: bool, to ignore the dot product attention or not. synthesizer_mode: str support 'dense' and 'random' or 'dense+random' Returns: output of a transformer decoder.
https://github.com/google-research/long-range-arena/blob/09c2916c3f33a07347dcc70c8839957d3c9d4062/lra_benchmarks/models/synthesizer/synthesizer.py#L332-L408
from flax import nn import jax.numpy as jnp from lra_benchmarks.models.layers import common_layers from lra_benchmarks.models.synthesizer import synthesizer_attention class SynthesizerBlock(nn.Module): def apply(self, inputs, qkv_dim, mlp_dim, num_heads, dtype=jnp.float32, inputs_segmentation=None, causal_mask=False, padding_mask=None, dropout_rate=0.1, attention_dropout_rate=0.1, deterministic=False, cache=None, max_length=512, ignore_dot_product=False, synthesizer_mode='random'): assert inputs.ndim == 3 x = nn.LayerNorm(inputs) x = synthesizer_attention.SynthesizerSelfAttention( x, num_heads=num_heads, qkv_features=qkv_dim, attention_axis=(1,), causal_mask=causal_mask, padding_mask=padding_mask, kernel_init=nn.initializers.xavier_uniform(), bias_init=nn.initializers.normal(stddev=1e-6), bias=False, broadcast_dropout=False, dropout_rate=attention_dropout_rate, deterministic=deterministic, cache=cache, max_length=max_length, ignore_dot_product=ignore_dot_product, synthesizer_mode=synthesizer_mode) x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic) x = x + inputs y = nn.LayerNorm(x) y = common_layers.MlpBlock( y, mlp_dim=mlp_dim, dropout_rate=dropout_rate, deterministic=deterministic) return x + y class SynthesizerEncoder(nn.Module): def apply(self, inputs, vocab_size, inputs_positions=None, inputs_segmentation=None, shared_embedding=None, use_bfloat16=False, emb_dim=512, num_heads=8, num_layers=6, qkv_dim=512, mlp_dim=2048, max_len=512, train=True, dropout_rate=0.1, attention_dropout_rate=0.1, ignore_dot_product=False, synthesizer_mode='random', learn_pos_emb=False, classifier=False, classifier_pool='CLS', num_classes=10, tied_weights=False, k=32): assert inputs.ndim == 2 src_padding_mask = (inputs > 0)[..., None] if shared_embedding is None: input_embed = nn.Embed.partial( num_embeddings=vocab_size, features=emb_dim, embedding_init=nn.initializers.normal(stddev=1.0)) else: input_embed = shared_embedding x = inputs.astype('int32') x = input_embed(x) if classifier and classifier_pool == 'CLS': cls = self.param('cls', (1, 1, emb_dim), nn.initializers.zeros) cls = jnp.tile(cls, [x.shape[0], 1, 1]) x = jnp.concatenate([cls, x], axis=1) max_len += 1 src_padding_mask = jnp.concatenate( [src_padding_mask[:, :1], src_padding_mask], axis=1) pe_init = nn.initializers.normal(stddev=0.02) if learn_pos_emb else None x = common_layers.AddPositionEmbs( x, inputs_positions=inputs_positions, posemb_init=pe_init, max_len=max_len, name='posembed_input') x = nn.dropout(x, rate=dropout_rate, deterministic=not train) if use_bfloat16: x = x.astype(jnp.bfloat16) dtype = jnp.bfloat16 else: dtype = jnp.float32 if tied_weights: encoder = SynthesizerBlock.shared( qkv_dim=qkv_dim, mlp_dim=mlp_dim, num_heads=num_heads, dtype=dtype, padding_mask=src_padding_mask, inputs_segmentation=inputs_segmentation, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, deterministic=not train, name='encoderblock', max_length=max_len, ignore_dot_product=ignore_dot_product, synthesizer_mode=synthesizer_mode) for lyr in range(num_layers): x = encoder(x) else: for lyr in range(num_layers): x = SynthesizerBlock( x, qkv_dim=qkv_dim, mlp_dim=mlp_dim, num_heads=num_heads, dtype=dtype, padding_mask=src_padding_mask, inputs_segmentation=inputs_segmentation, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, deterministic=not train, name=f'encoderblock_{lyr}', max_length=max_len, ignore_dot_product=ignore_dot_product, synthesizer_mode=synthesizer_mode) encoded = nn.LayerNorm(x, dtype=dtype, name='encoder_norm') if classifier: encoded = common_layers.classifier_head( encoded, num_classes, mlp_dim, pooling_mode=classifier_pool) return encoded class SynthesizerDualEncoder(nn.Module): def apply(self, inputs1, inputs2, vocab_size=None, inputs1_positions=None, inputs2_positions=None, inputs1_segmentation=None, inputs2_segmentation=None, use_bfloat16=False, emb_dim=512, num_heads=8, num_layers=6, qkv_dim=512, mlp_dim=2048, max_len=2048, train=False, dropout_rate=0.1, attention_dropout_rate=0.1, classifier=True, classifier_pool='CLS', num_classes=2, interaction=None, tied_weights=False): encoder = SynthesizerEncoder.shared( inputs_positions=inputs1_positions, inputs_segmentation=inputs1_segmentation, vocab_size=vocab_size, use_bfloat16=use_bfloat16, emb_dim=emb_dim, num_heads=num_heads, num_layers=num_layers, qkv_dim=qkv_dim, mlp_dim=mlp_dim, max_len=max_len, train=train, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, name='encoder', tied_weights=tied_weights) inputs1_encoded = encoder(inputs1) inputs2_encoded = encoder(inputs2) encoded = common_layers.classifier_head_dual( inputs1_encoded, inputs2_encoded, num_classes, mlp_dim, pooling_mode=classifier_pool, interaction=interaction) return encoded class SynthesizerDecoder(nn.Module):
Apache License 2.0
netket/netket
netket/utils/struct/dataclass.py
purge_cache_fields
python
def purge_cache_fields(clz): flds = getattr(clz, _FIELDS, None) if flds is not None: caches = getattr(clz, _CACHES) for name, _ in caches.items(): cname = _cache_name(name) if cname in flds: flds.pop(cname)
Removes the cache fields generated by netket dataclass from the dataclass mechanism.
https://github.com/netket/netket/blob/74248a39e86bb501eaf6822e76107c4926321f80/netket/utils/struct/dataclass.py#L261-L271
from functools import partial import dataclasses from dataclasses import MISSING from flax import serialization import jax from .utils import _set_new_attribute, _create_fn, get_class_globals try: from dataclasses import _FIELDS except ImportError: _FIELDS = "__dataclass_fields__" _CACHES = "__dataclass_caches__" PRECOMPUTE_CACHED_PROPERTY_NAME = "_precompute_cached_properties" HASH_COMPUTE_NAME = "__dataclass_compute_hash__" _PRE_INIT_NAME = "__pre_init__" _DATACLASS_INIT_NAME = "__init_dataclass__" def _cache_name(property_name): return "__" + property_name + "_cache" def _hash_cache_name(class_name): return "__" + class_name + "_hash_cache" def _compute_cache_name(property_name): return "__" + property_name class _Uninitialized: def __repr__(self): return "Uninitialized" Uninitialized = _Uninitialized() jax.tree_util.register_pytree_node( _Uninitialized, lambda x: ((), Uninitialized), lambda *args: Uninitialized ) def field(pytree_node=True, serialize=True, cache=False, **kwargs): return dataclasses.field( metadata={"pytree_node": pytree_node, "serialize": serialize, "cache": cache}, **kwargs, ) class CachedProperty: def __init__(self, method, pytree_node=False): self.name = method.__name__ self.cache_name = _cache_name(self.name) self.method = method self.pytree_node = pytree_node self.type = method.__annotations__.get("return", MISSING) self.doc = method.__doc__ if self.type is MISSING: raise TypeError( "Cached property {method} requires a return type annotation." ) def __repr__(self): return ( f"CachedProperty(name={self.name}, " f"type={self.type}, pytree_node={self.pytree_node})" ) def property_cached(fun=None, pytree_node=False): if fun is None: return partial(property_cached, pytree_node=pytree_node) return CachedProperty(fun, pytree_node=pytree_node) def _set_annotation(clz, attr, typ): if "__annotations__" not in clz.__dict__: setattr(clz, "__annotations__", {}) if not hasattr(clz, attr): raise ValueError(f"Setting annotation for inexistant attribute {attr}") clz.__annotations__[attr] = typ def process_cached_properties(clz, globals=None): if globals is None: globals = {} cached_props = {} self_name = "self" for name, field_info in clz.__dict__.items(): if isinstance(field_info, CachedProperty): cached_props[name] = field_info for name, cp in cached_props.items(): _set_new_attribute(clz, _compute_cache_name(name), cp.method) for name, cp in cached_props.items(): cache_name = _cache_name(name) compute_name = _compute_cache_name(name) body_lines = [ f"if {self_name}.{cache_name} is Uninitialized:", f"\tBUILTINS.object.__setattr__({self_name},{cache_name!r},self.{compute_name}())", "", f"return {self_name}.{cache_name}", ] fun = _create_fn( name, [self_name], body_lines, return_type=cp.type, globals=globals, doc=cp.doc, ) prop_fun = property(fun) setattr(clz, name, prop_fun) for b in clz.__mro__[1:]: for (name, cp) in getattr(b, _CACHES, {}).items(): if name not in cached_props: cached_props[name] = cp for name, cp in cached_props.items(): cache_name = _cache_name(name) _cache = field( pytree_node=cp.pytree_node, serialize=False, cache=True, default=Uninitialized, repr=False, hash=False, init=True, compare=False, ) _set_new_attribute(clz, cache_name, _cache) _set_annotation(clz, cache_name, cp.type) _precompute_body_method = [] for name in cached_props.keys(): _precompute_body_method.append(f"{self_name}.{name}") if len(_precompute_body_method) == 0: _precompute_body_method.append("pass") fun = _create_fn( PRECOMPUTE_CACHED_PROPERTY_NAME, [self_name], _precompute_body_method, globals=globals, doc="Precompute the value of all cached properties", ) setattr(clz, PRECOMPUTE_CACHED_PROPERTY_NAME, fun) setattr(clz, _CACHES, cached_props)
Apache License 2.0
kubevirt/client-python
kubevirt/models/v1_developer_configuration.py
V1DeveloperConfiguration.feature_gates
python
def feature_gates(self): return self._feature_gates
Gets the feature_gates of this V1DeveloperConfiguration. :return: The feature_gates of this V1DeveloperConfiguration. :rtype: list[str]
https://github.com/kubevirt/client-python/blob/c9f9d3bc429f783076982b46b194d5f7669eab1b/kubevirt/models/v1_developer_configuration.py#L139-L146
from pprint import pformat from six import iteritems import re class V1DeveloperConfiguration(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'cpu_allocation_ratio': 'int', 'disk_verification': 'V1DiskVerification', 'feature_gates': 'list[str]', 'log_verbosity': 'V1LogVerbosity', 'memory_overcommit': 'int', 'minimum_cluster_tsc_frequency': 'int', 'minimum_reserve_pvc_bytes': 'int', 'node_selectors': 'dict(str, str)', 'pvc_tolerate_less_space_up_to_percent': 'int', 'use_emulation': 'bool' } attribute_map = { 'cpu_allocation_ratio': 'cpuAllocationRatio', 'disk_verification': 'diskVerification', 'feature_gates': 'featureGates', 'log_verbosity': 'logVerbosity', 'memory_overcommit': 'memoryOvercommit', 'minimum_cluster_tsc_frequency': 'minimumClusterTSCFrequency', 'minimum_reserve_pvc_bytes': 'minimumReservePVCBytes', 'node_selectors': 'nodeSelectors', 'pvc_tolerate_less_space_up_to_percent': 'pvcTolerateLessSpaceUpToPercent', 'use_emulation': 'useEmulation' } def __init__(self, cpu_allocation_ratio=None, disk_verification=None, feature_gates=None, log_verbosity=None, memory_overcommit=None, minimum_cluster_tsc_frequency=None, minimum_reserve_pvc_bytes=None, node_selectors=None, pvc_tolerate_less_space_up_to_percent=None, use_emulation=None): self._cpu_allocation_ratio = None self._disk_verification = None self._feature_gates = None self._log_verbosity = None self._memory_overcommit = None self._minimum_cluster_tsc_frequency = None self._minimum_reserve_pvc_bytes = None self._node_selectors = None self._pvc_tolerate_less_space_up_to_percent = None self._use_emulation = None if cpu_allocation_ratio is not None: self.cpu_allocation_ratio = cpu_allocation_ratio if disk_verification is not None: self.disk_verification = disk_verification if feature_gates is not None: self.feature_gates = feature_gates if log_verbosity is not None: self.log_verbosity = log_verbosity if memory_overcommit is not None: self.memory_overcommit = memory_overcommit if minimum_cluster_tsc_frequency is not None: self.minimum_cluster_tsc_frequency = minimum_cluster_tsc_frequency if minimum_reserve_pvc_bytes is not None: self.minimum_reserve_pvc_bytes = minimum_reserve_pvc_bytes if node_selectors is not None: self.node_selectors = node_selectors if pvc_tolerate_less_space_up_to_percent is not None: self.pvc_tolerate_less_space_up_to_percent = pvc_tolerate_less_space_up_to_percent if use_emulation is not None: self.use_emulation = use_emulation @property def cpu_allocation_ratio(self): return self._cpu_allocation_ratio @cpu_allocation_ratio.setter def cpu_allocation_ratio(self, cpu_allocation_ratio): self._cpu_allocation_ratio = cpu_allocation_ratio @property def disk_verification(self): return self._disk_verification @disk_verification.setter def disk_verification(self, disk_verification): self._disk_verification = disk_verification @property
Apache License 2.0
biorack/metatlas
metatlas/datastructures/metatlas_dataset.py
AnalysisIdentifiers.atlas
python
def atlas(self) -> AtlasName: return AtlasName( f"{'_'.join(self._exp_tokens[3:6])}_{self.output_type}_{self.short_polarity}_{self.analysis}" )
Atlas identifier (name)
https://github.com/biorack/metatlas/blob/d20f39a8d9f0c45db379d685cb6c96775f660ba8/metatlas/datastructures/metatlas_dataset.py#L230-L234
import datetime import getpass import glob import logging import os import pickle import shutil import uuid from pathlib import Path from typing import cast, Any, Dict, List, NewType, Optional, Tuple, TypedDict, Union import humanize import pandas as pd import traitlets from traitlets import TraitError, default, observe, validate from traitlets import Bool, Float, HasTraits, Instance, Int, TraitType, Unicode from traitlets.traitlets import ObserveHandler from metatlas.datastructures import metatlas_objects as metob from metatlas.datastructures import object_helpers as metoh from metatlas.io import metatlas_get_data_helper_fun as ma_data from metatlas.io import targeted_output from metatlas.io import write_utils from metatlas.plots import dill2plots as dp from metatlas.tools import parallel from metatlas.tools.util import or_default GroupList = Optional[List[metob.Group]] LcmsRunsList = Optional[List[metob.LcmsRun]] FileMatchList = List[str] GroupMatchList = List[str] Polarity = NewType("Polarity", str) ShortPolarity = NewType("ShortPolarity", str) Experiment = NewType("Experiment", str) OutputType = NewType("OutputType", str) AnalysisNumber = NewType("AnalysisNumber", int) AtlasName = NewType("AtlasName", str) PathString = NewType("PathString", str) Username = NewType("Username", str) MSMS_REFS_PATH = PathString( "/global/project/projectdirs/metatlas/projects/spectral_libraries/msms_refs_v3.tab" ) DEFAULT_GROUPS_CONTROLLED_VOCAB = cast(GroupMatchList, ["QC", "InjBl", "ISTD"]) OUTPUT_TYPES = [OutputType("ISTDsEtc"), OutputType("FinalEMA-HILIC"), OutputType("data_QC")] POLARITIES = [Polarity("positive"), Polarity("negative"), Polarity("fast-polarity-switching")] SHORT_POLARITIES = { Polarity("positive"): ShortPolarity("POS"), Polarity("negative"): ShortPolarity("NEG"), Polarity("fast-polarity-switching"): ShortPolarity("FPS"), } logger = logging.getLogger(__name__) class Proposal(TypedDict): owner: HasTraits value: object trait: TraitType class _LcmsRunDict(TypedDict): object: metob.LcmsRun group: str short_name: str class MsSummary(TypedDict): num_ms1_datapoints: int mz_peak: float rt_peak: float mz_centroid: float rt_centroid: float peak_height: float peak_area: float class Eic(TypedDict): mz: List[float] rt: List[float] intensity: List[float] class MsmsDataDict(TypedDict): mz: List[float] i: List[float] rt: List[float] polarity: List[float] precursor_MZ: List[float] precursor_intensity: List[float] collision_energy: List[float] class MsmsDict(TypedDict): data: MsmsDataDict class MsDataDict(TypedDict): msms: MsmsDict eic: Eic ms1_summary: MsSummary class CompoundDict(TypedDict): atlas_name: AtlasName atlas_unique_id: str lcmsrun: metob.LcmsRun group: metob.Group identification: metob.CompoundIdentification data: MsDataDict class AnalysisIdentifiers(HasTraits): source_atlas: Optional[AtlasName] = Unicode(allow_none=True, default_value=None) experiment: Experiment = Unicode() output_type: OutputType = Unicode() polarity: Polarity = Unicode(default_value="positive") analysis_number: AnalysisNumber = Int(default_value=0) username: Username = Unicode(default_value=getpass.getuser()) project_directory: PathString = Unicode() google_folder: str = Unicode() exclude_files: FileMatchList = traitlets.List(trait=Unicode(), default_value=[]) include_groups: GroupMatchList = traitlets.List() exclude_groups: GroupMatchList = traitlets.List() groups_controlled_vocab: GroupMatchList = traitlets.List( trait=Unicode(), default_value=DEFAULT_GROUPS_CONTROLLED_VOCAB ) _lcmsruns: LcmsRunsList = traitlets.List(allow_none=True, default_value=None) _all_groups: GroupList = traitlets.List(allow_none=True, default_value=None) _groups: GroupList = traitlets.List(allow_none=True, default_value=None) def __init__(self, **kwargs) -> None: super().__init__(**kwargs) self.exclude_groups = append_inverse(self.exclude_groups, self.polarity) logger.info( "IDs: source_atlas=%s, atlas=%s, short_experiment_analysis=%s, output_dir=%s", self.source_atlas, self.atlas, self.short_experiment_analysis, self.output_dir, ) self.store_all_groups(exist_ok=True) @default("include_groups") def _default_include_groups(self) -> List[OutputType]: if self.output_type == "data_QC": return [OutputType("QC")] return [] @default("exclude_groups") def _default_exclude_groups(self) -> GroupMatchList: out: GroupMatchList = ["InjBl", "InjBL"] if self.output_type != "data_QC": out.append("QC") return append_inverse(out, self.polarity) @validate("polarity") def _valid_polarity(self, proposal: Proposal) -> Polarity: if proposal["value"] not in POLARITIES: raise TraitError(f"Parameter polarity must be one of {', '.join(POLARITIES)}") return cast(Polarity, proposal["value"]) @validate("output_type") def _valid_output_type(self, proposal: Proposal) -> OutputType: if proposal["value"] not in OUTPUT_TYPES: raise TraitError(f"Parameter output_type must be one of {', '.join(OUTPUT_TYPES)}") return cast(OutputType, proposal["value"]) @validate("source_atlas") def _valid_source_atlas(self, proposal: Proposal) -> Optional[AtlasName]: if proposal["value"] is not None: proposed_name = cast(AtlasName, proposal["value"]) try: get_atlas(proposed_name, cast(Username, "*")) except ValueError as err: raise TraitError(str(err)) from err return proposed_name return None @validate("analysis_number") def _valid_analysis_number(self, proposal: Proposal) -> AnalysisNumber: value = cast(AnalysisNumber, proposal["value"]) if value < 0: raise TraitError("Parameter analysis_number cannot be negative.") return value @validate("experiment") def _valid_experiment(self, proposal: Proposal) -> Experiment: value = cast(str, proposal["value"]) if len(value.split("_")) != 9: raise TraitError('Parameter experiment does contain 9 fields when split on "_".') return cast(Experiment, value) @property def _exp_tokens(self) -> List[str]: return self.experiment.split("_") @property def project(self) -> int: return int(self._exp_tokens[3]) @property
BSD 3-Clause New or Revised License
montrealcorpustools/polyglotdb
polyglotdb/io/parsers/base.py
BaseParser.match_extension
python
def match_extension(self, filename): for x in self._extensions: if filename.lower().endswith(x): break else: return False return True
Ensures that filename ends with acceptable extension Parameters ---------- filename : str the filename of the file being checked Returns ------- boolean True if filename is acceptable extension, false otherwise
https://github.com/montrealcorpustools/polyglotdb/blob/f16bd14d847eda6184b897edfa6e9587af7f96be/polyglotdb/io/parsers/base.py#L42-L61
from ..types.standardized import PGAnnotation, PGSubAnnotation, PGAnnotationType from ..types.parsing import Tobi, BreakIndex from ..discoursedata import DiscourseData from ...exceptions import ParseError class BaseParser(object): _extensions = ['.txt'] def __init__(self, annotation_tiers, hierarchy, make_transcription=True, make_label=False, stop_check=None, call_back=None): self.speaker_parser = None self.annotation_tiers = annotation_tiers self.hierarchy = hierarchy self.make_transcription = make_transcription self.make_label = make_label self.stop_check = stop_check self.call_back = call_back
MIT License
joelgrus/data-science-from-scratch
scratch/decision_trees.py
classify
python
def classify(tree: DecisionTree, input: Any) -> Any: if isinstance(tree, Leaf): return tree.value subtree_key = getattr(input, tree.attribute) if subtree_key not in tree.subtrees: return tree.default_value subtree = tree.subtrees[subtree_key] return classify(subtree, input)
classify the input using the given decision tree
https://github.com/joelgrus/data-science-from-scratch/blob/d5d0f117f41b3ccab3b07f1ee1fa21cfcf69afa1/scratch/decision_trees.py#L126-L142
from typing import List import math def entropy(class_probabilities: List[float]) -> float: return sum(-p * math.log(p, 2) for p in class_probabilities if p > 0) assert entropy([1.0]) == 0 assert entropy([0.5, 0.5]) == 1 assert 0.81 < entropy([0.25, 0.75]) < 0.82 from typing import Any from collections import Counter def class_probabilities(labels: List[Any]) -> List[float]: total_count = len(labels) return [count / total_count for count in Counter(labels).values()] def data_entropy(labels: List[Any]) -> float: return entropy(class_probabilities(labels)) assert data_entropy(['a']) == 0 assert data_entropy([True, False]) == 1 assert data_entropy([3, 4, 4, 4]) == entropy([0.25, 0.75]) def partition_entropy(subsets: List[List[Any]]) -> float: total_count = sum(len(subset) for subset in subsets) return sum(data_entropy(subset) * len(subset) / total_count for subset in subsets) from typing import NamedTuple, Optional class Candidate(NamedTuple): level: str lang: str tweets: bool phd: bool did_well: Optional[bool] = None inputs = [Candidate('Senior', 'Java', False, False, False), Candidate('Senior', 'Java', False, True, False), Candidate('Mid', 'Python', False, False, True), Candidate('Junior', 'Python', False, False, True), Candidate('Junior', 'R', True, False, True), Candidate('Junior', 'R', True, True, False), Candidate('Mid', 'R', True, True, True), Candidate('Senior', 'Python', False, False, False), Candidate('Senior', 'R', True, False, True), Candidate('Junior', 'Python', True, False, True), Candidate('Senior', 'Python', True, True, True), Candidate('Mid', 'Python', False, True, True), Candidate('Mid', 'Java', True, False, True), Candidate('Junior', 'Python', False, True, False) ] from typing import Dict, TypeVar from collections import defaultdict T = TypeVar('T') def partition_by(inputs: List[T], attribute: str) -> Dict[Any, List[T]]: partitions: Dict[Any, List[T]] = defaultdict(list) for input in inputs: key = getattr(input, attribute) partitions[key].append(input) return partitions def partition_entropy_by(inputs: List[Any], attribute: str, label_attribute: str) -> float: partitions = partition_by(inputs, attribute) labels = [[getattr(input, label_attribute) for input in partition] for partition in partitions.values()] return partition_entropy(labels) for key in ['level','lang','tweets','phd']: print(key, partition_entropy_by(inputs, key, 'did_well')) assert 0.69 < partition_entropy_by(inputs, 'level', 'did_well') < 0.70 assert 0.86 < partition_entropy_by(inputs, 'lang', 'did_well') < 0.87 assert 0.78 < partition_entropy_by(inputs, 'tweets', 'did_well') < 0.79 assert 0.89 < partition_entropy_by(inputs, 'phd', 'did_well') < 0.90 senior_inputs = [input for input in inputs if input.level == 'Senior'] assert 0.4 == partition_entropy_by(senior_inputs, 'lang', 'did_well') assert 0.0 == partition_entropy_by(senior_inputs, 'tweets', 'did_well') assert 0.95 < partition_entropy_by(senior_inputs, 'phd', 'did_well') < 0.96 from typing import NamedTuple, Union, Any class Leaf(NamedTuple): value: Any class Split(NamedTuple): attribute: str subtrees: dict default_value: Any = None DecisionTree = Union[Leaf, Split] hiring_tree = Split('level', { 'Junior': Split('phd', { False: Leaf(True), True: Leaf(False) }), 'Mid': Leaf(True), 'Senior': Split('tweets', { False: Leaf(False), True: Leaf(True) }) })
MIT License
tensorflow/decision-forests
tensorflow_decision_forests/tensorflow/ops/inference/api.py
_InferenceArgsBuilder._all_feature_idxs
python
def _all_feature_idxs(feature_maps: FeatureMaps): idxs = [] for field_name in feature_maps._fields: idxs.extend(getattr(feature_maps, field_name).keys()) return idxs
Lists all the input feature indices.
https://github.com/tensorflow/decision-forests/blob/d530fd85eccc79b47712b8728c9c06f263191a9a/tensorflow_decision_forests/tensorflow/ops/inference/api.py#L614-L619
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import os from typing import Text, Dict, List, Any, Optional import uuid from absl import logging import six import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.training.tracking import base as trackable_base from tensorflow.python.training.tracking import tracking from tensorflow_decision_forests.tensorflow.ops.inference import op from yggdrasil_decision_forests.dataset import data_spec_pb2 from yggdrasil_decision_forests.model import abstract_model_pb2 from tensorflow.python.framework import load_library from tensorflow.python.platform import resource_loader tf.load_op_library(resource_loader.get_path_to_datafile("inference.so")) Tensor = Any InitOp = Tensor Task = abstract_model_pb2.Task ColumnType = data_spec_pb2.ColumnType ModelOutput = collections.namedtuple( "ModelOutput", [ "dense_predictions", "dense_col_representation", ]) MISSING_NON_INTEGERIZED_CATEGORICAL_STORED_AS_INT = 0x7FFFFFFF - 2 class Model(object): def __init__(self, model_path: Text, tensor_model_path: Optional[Tensor] = None, verbose: Optional[bool] = True): self._verbose: Optional[bool] = verbose if self._verbose: logging.info("Create inference model for %s", model_path) self.model_identifier = _create_model_identifier() self.input_builder = _InferenceArgsBuilder(verbose) self.input_builder.build_from_model_path(model_path) if tensor_model_path is None: tensor_model_path = model_path load_model_op = op.SimpleMLLoadModelFromPath( model_identifier=self.model_identifier, path=tensor_model_path) self._init_op = tf.group(self.input_builder.init_op(), load_model_op) def init_op(self) -> InitOp: return self._init_op def apply(self, features: Dict[Text, Tensor]) -> ModelOutput: if self._verbose: logging.info("Create inference op") inference_args = self.input_builder.build_inference_op_args(features) dense_predictions, dense_col_representation = op.SimpleMLInferenceOp( model_identifier=self.model_identifier, **inference_args) return ModelOutput( dense_predictions=dense_predictions, dense_col_representation=dense_col_representation) class ModelV2(tracking.AutoTrackable): def __init__(self, model_path: Text, verbose: Optional[bool] = True): super(ModelV2).__init__() self._input_builder = _InferenceArgsBuilder(verbose) self._input_builder.build_from_model_path(model_path) self._compiled_model = _CompiledSimpleMLModelResource( _DiskModelLoader(model_path)) def apply(self, features: Dict[Text, Tensor]) -> ModelOutput: inference_args = self._input_builder.build_inference_op_args(features) (dense_predictions, dense_col_representation) = op.SimpleMLInferenceOpWithHandle( model_handle=self._compiled_model.resource_handle, name="inference_op", **inference_args) return ModelOutput( dense_predictions=dense_predictions, dense_col_representation=dense_col_representation) def _create_model_identifier() -> Text: return "sml_{}".format(uuid.uuid4()) FeatureMaps = collections.namedtuple("FeatureMaps", [ "numerical_features", "boolean_features", "categorical_int_features", "categorical_set_int_features", ]) class _InferenceArgsBuilder(tracking.AutoTrackable): def __init__(self, verbose: Optional[bool] = True): super(_InferenceArgsBuilder).__init__() self._verbose: bool = verbose self._header: Optional[abstract_model_pb2.AbstractModel] = None self._data_spec: Optional[data_spec_pb2.DataSpecification] = None self._feature_name_to_idx = None self._init_ops: List[tf.Operation] = None self._dense_output_dim: Optional[int] = None super(_InferenceArgsBuilder, self).__init__() def build_from_model_path(self, model_path: Text): header = abstract_model_pb2.AbstractModel() with tf.io.gfile.GFile(os.path.join(model_path, "header.pb"), "rb") as f: header.ParseFromString(f.read()) data_spec = data_spec_pb2.DataSpecification() with tf.io.gfile.GFile(os.path.join(model_path, "data_spec.pb"), "rb") as f: data_spec.ParseFromString(f.read()) self.build_from_dataspec_and_header(data_spec, header) def build_from_dataspec_and_header(self, dataspec: data_spec_pb2.DataSpecification, header: abstract_model_pb2.AbstractModel): self._header = header self._data_spec = dataspec self._feature_name_to_idx = { self._data_spec.columns[feature_idx].name: feature_idx for feature_idx in self._header.input_features } self._init_ops = [] self._dense_output_dim = self._get_dense_output_dim() self._create_str_to_int_tables() def init_op(self) -> Tensor: if self._init_ops: return tf.group(*self._init_ops) else: return tf.no_op() def build_inference_op_args(self, features: Dict[Text, Tensor]) -> Dict[Text, Any]: if self._verbose: logging.info("\tApply model on features:\n%s", features) feature_maps = FeatureMaps( numerical_features={}, boolean_features={}, categorical_int_features={}, categorical_set_int_features={}) for feature_name, feature_tensor in features.items(): self._register_input_feature(feature_name, feature_tensor, feature_maps) self._check_all_input_features_are_provided(feature_maps) if feature_maps.numerical_features: numerical_features = tf.stack( self._dict_to_list_sorted_by_key(feature_maps.numerical_features), axis=1) else: numerical_features = tf.constant(0, dtype=tf.float32, shape=(0, 0)) if feature_maps.boolean_features: boolean_features = tf.stack( self._dict_to_list_sorted_by_key(feature_maps.boolean_features), axis=1) else: boolean_features = tf.constant(0, dtype=tf.float32, shape=(0, 0)) if feature_maps.categorical_int_features: categorical_int_features = tf.stack( self._dict_to_list_sorted_by_key( feature_maps.categorical_int_features), axis=1) else: categorical_int_features = tf.constant(0, dtype=tf.int32, shape=(0, 0)) if feature_maps.categorical_set_int_features: categorical_set_int_features = tf.stack( self._dict_to_list_sorted_by_key( feature_maps.categorical_set_int_features), axis=1) else: categorical_set_int_features = tf.ragged.constant([], dtype=tf.int32, ragged_rank=2) args = { "numerical_features": numerical_features, "boolean_features": boolean_features, "categorical_int_features": categorical_int_features, "categorical_set_int_features_values": categorical_set_int_features.values.values, "categorical_set_int_features_row_splits_dim_1": categorical_set_int_features.values.row_splits, "categorical_set_int_features_row_splits_dim_2": categorical_set_int_features.row_splits, "dense_output_dim": self._dense_output_dim, } if self._verbose: logging.info("Inference op arguments:\n%s", args) return args def _register_input_feature(self, name: Text, value: Tensor, feature_maps: FeatureMaps) -> None: feature_idx = self._feature_name_to_idx.get(name) if feature_idx is None: logging.warn("Registering feature \"%s\" not used by the model.", name) return if feature_idx in self._all_feature_idxs(feature_maps): raise Exception("The feature \"{}\" was already registered.".format(name)) feature_spec = self._data_spec.columns[feature_idx] if feature_spec.type == ColumnType.NUMERICAL: value = self._prepare_and_check_numerical_feature(name, value) feature_maps.numerical_features[feature_idx] = value elif feature_spec.type == ColumnType.BOOLEAN: value = self._prepare_and_check_boolean_feature(name, value) feature_maps.boolean_features[feature_idx] = value elif feature_spec.type == ColumnType.CATEGORICAL: value = self._prepare_and_check_categorical_feature( name, value, feature_spec) feature_maps.categorical_int_features[feature_idx] = value elif feature_spec.type == ColumnType.CATEGORICAL_SET: value = self._prepare_and_check_categorical_set_feature( name, value, feature_spec) feature_maps.categorical_set_int_features[feature_idx] = value else: raise Exception("No supported type \"{}\" for feature \"{}\"".format( ColumnType.Name(feature_spec.type), name)) def _create_str_to_int_tables(self): self.categorical_str_to_int_hashmaps = {} for feature_idx in self._header.input_features: feature_spec = self._data_spec.columns[feature_idx] if feature_spec.HasField( "categorical" ) and not feature_spec.categorical.is_already_integerized: vocabulary = [(key, item.index) for key, item in feature_spec.categorical.items.items() if item.index != 0] if "" not in feature_spec.categorical.items: vocabulary.append(("", -1)) vocabulary.append( (str(MISSING_NON_INTEGERIZED_CATEGORICAL_STORED_AS_INT), -1)) vocabulary.sort(key=lambda x: x[1]) vocabulary_keys = tf.constant(list(zip(*vocabulary))[0]) vocabulary_values = tf.constant(list(zip(*vocabulary))[1]) vocabulary_index = tf.lookup.KeyValueTensorInitializer( vocabulary_keys, vocabulary_values) vocabulary_hashmap = tf.lookup.StaticHashTable(vocabulary_index, 0) self._init_ops.append(vocabulary_index.initialize(vocabulary_hashmap)) self.categorical_str_to_int_hashmaps[ feature_spec.name] = vocabulary_hashmap @staticmethod def _dict_to_list_sorted_by_key(src: Dict[Any, Any]) -> List[Any]: return [value[1] for value in sorted(src.items())] @staticmethod
Apache License 2.0
maybeshewill-cv/dvcnn_lane_detection
DVCNN/cnn_util.py
_model_json_exist
python
def _model_json_exist(json_path): return os.path.isfile(json_path)
Check if the json file exists :param json_path: :return:
https://github.com/maybeshewill-cv/dvcnn_lane_detection/blob/b66a1a856ba69b0a0a82c7b53dd192e4906a375b/DVCNN/cnn_util.py#L34-L40
import tensorflow as tf import os import json import collections from DVCNN.model_def import dvcnn_global_variable def write_dvcnn_model(json_path): if _model_json_exist(json_path): print('{} already exist'.format(json_path)) return dvcnn_model = _convert_to_ordered_dict(model_dict=dvcnn_global_variable.DVCNN_ARCHITECTURE) jsonobj = json.dumps(dvcnn_model, indent=4) file = open(json_path, 'w') file.write(jsonobj) file.close()
Apache License 2.0
symjax/symjax
symjax/tensor/interpolation.py
hermite_2d
python
def hermite_2d(values, n_x, n_y): n, N, M = values.shape[:3] R_N = N - 1 R_M = M - 1 patches = T.extract_image_patches(values, (2, 2, 1)) F = T.concatenate( [ T.concatenate([patches[..., 0], patches[..., 1]], -1), T.concatenate([patches[..., 2], patches[..., 3]], -1), ], axis=-2, ) M = T.Variable( array([[1, 0, 0, 0], [0, 0, 1, 0], [-3, 3, -2, -1], [2, -2, 1, 1]]).astype( "float32" ), trainable=False, ) MFM = T.einsum("xnmij,ai,bj->xnmab", F, M, M) t_x = T.linspace(float32(0), float32(1), int32(n_x - 1)) t_y = T.linspace(float32(0), float32(1), int32(n_y - 1)) x = T.pow(t_x, T.arange(4)[:, None]) y = T.pow(t_y, T.arange(4)[:, None]) values = T.einsum("xnmij,ia,jb->xnamb", MFM, x, y) return T.reshape(values, (n, (R_N) * (n - 1), (R_M) * (n - 1)))
TODO: test and finalize this Parameters ---------- values: array-like the values, and 2 directional derivatives and the cross derivative for the 4 knots per region, hence it should be of shape n,N,M,4 values vx vy vxy n_x: int the number of points in x per region n_y: int the number of points in y per region Returns ------- interpolation: array-like
https://github.com/symjax/symjax/blob/d8778c2eb3254b478cef4f45d934bf921e695619/symjax/tensor/interpolation.py#L166-L218
import numpy as np from .. import tensor as T import jax from ..data.utils import as_tuple import symjax __author__ = "Randall Balestriero" _HERMITE = np.array( [[1, 0, -3, 2], [0, 0, 3, -2], [0, 1, -2, 1], [0, 0, -1, 1]], dtype="float32", ) def upsample_1d( tensor, repeat, axis=-1, mode="constant", value=0.0, boundary_condition="periodic", ): if axis < 0: axis = tensor.ndim + axis if repeat == 0: return tensor out_shape = list(tensor.shape) out_shape[axis] *= 1 + repeat if mode == "constant": zshape = list(tensor.shape) zshape.insert(axis + 1, repeat) tensor_aug = T.concatenate( [ T.expand_dims(tensor, axis + 1), T.full(zshape, value, dtype=tensor.dtype), ], axis + 1, ) elif mode == "nearest": if boundary_condition == "periodic": return T.roll(T.repeat(tensor, repeat + 1, axis), -repeat // 2, axis) else: raise NotImplemented elif mode == "linear": assert tensor.shape[axis] > 1 zshape = [1] * (tensor.ndim + 1) zshape[axis + 1] = repeat coefficients = T.linspace(0, 1, repeat + 2)[1:-1].reshape(zshape) augmented_tensor = T.expand_dims(tensor, axis + 1) if boundary_condition == "periodic": interpolated = ( augmented_tensor * (1 - coefficients) + T.roll(augmented_tensor, -1, axis) * coefficients ) elif boundary_condition == "mirror": assert axis == tensor.ndim - 1 other = T.index_update( T.roll(augmented_tensor, -1, axis), T.index[..., -1, :], augmented_tensor[..., -2, :], ) interpolated = augmented_tensor * (1 - coefficients) + other * coefficients tensor_aug = T.concatenate([augmented_tensor, interpolated], axis + 1) return tensor_aug.reshape(out_shape) def hermite_1d(samples, knots, values, derivatives): adj_knots = T.stack([knots[..., :-1], knots[..., 1:]], axis=-1) adj_v = T.stack([values[..., :-1], values[..., 1:]], axis=-1) adj_d = T.stack([derivatives[..., :-1], derivatives[..., 1:]], axis=-1) adj_vd = T.concatenate([adj_v, adj_d], axis=-1) yh = T.matmul(adj_vd, _HERMITE) if samples.ndim == 1: samples_ = samples.reshape([1] * knots.ndim + [-1]) else: samples_ = T.expand_dims(samples, -2) start = T.expand_dims(knots[..., :-1], -1) end = T.expand_dims(knots[..., 1:], -1) pos = (samples_ - start) / (end - start) mask = ((pos >= 0.0) * (pos <= 1.0)).astype("float32") mask = mask / T.maximum(1, mask.sum(-2, keepdims=True)) polynome = T.expand_dims(pos, -1) ** T.arange(4) mask_polynome = polynome * T.expand_dims(mask, -1) return (T.expand_dims(yh, -2) * mask_polynome).sum(axis=(-3, -1))
Apache License 2.0
ldv-klever/klever
klever/core/pfg/abstractions/__init__.py
Program.__init__
python
def __init__(self, logger, clade, source_paths, memory_efficient_mode=False, skip_missing_files=False): self.logger = logger self.clade = clade self.source_paths = source_paths self._files = dict() self._fragments = dict() self.__divide(skip_missing_files) if not memory_efficient_mode: self.logger.info("Extract dependencies between files from the program callgraph") self.__establish_dependencies()
The class that represents a program as different data structures: graphs of files or units. Also, it provides common methods to extract, modify or delete fragments and other information. :param logger: Logger object. :param clade: Clade object. :param source_paths: Iterable with paths to source code. :param memory_efficient_mode: Can we build a call graph? :param skip_missing_files: Tolerate errors when a CC input file is missing.
https://github.com/ldv-klever/klever/blob/160b1fe0a73dd5b2b0c220235f6c663045610edd/klever/core/pfg/abstractions/__init__.py#L28-L49
import os import glob from klever.core.utils import make_relative_path from klever.core.pfg.abstractions.files_repr import File from klever.core.pfg.abstractions.fragments_repr import Fragment class Program:
Apache License 2.0
ictu/quality-time
components/notifier/src/strategies/notification_strategy.py
NotificationFinder.status_changed
python
def status_changed(metric, most_recent_measurement_seen: datetime) -> bool: recent_measurements = metric.get("recent_measurements") or [] if len(recent_measurements) < 2: return False scale = metric["scale"] metric_had_other_status = recent_measurements[-2][scale]["status"] != recent_measurements[-1][scale]["status"] change_was_recent = datetime.fromisoformat(recent_measurements[-1]["start"]) > most_recent_measurement_seen return bool(metric_had_other_status and change_was_recent)
Determine if a metric got a new status after the given timestamp.
https://github.com/ictu/quality-time/blob/4bd5df14f584dcc174276da0d2ddb6fcfaf1d427/components/notifier/src/strategies/notification_strategy.py#L31-L39
from datetime import datetime from models.notification import Notification from models.metric_notification_data import MetricNotificationData class NotificationFinder: def __init__(self, data_model): self.data_model = data_model def get_notifications(self, json, most_recent_measurement_seen: datetime) -> list[Notification]: notifications = [] for report in json["reports"]: notable_metrics = [] for subject in report["subjects"].values(): for metric in subject["metrics"].values(): if self.status_changed(metric, most_recent_measurement_seen): notable_metrics.append(MetricNotificationData(metric, subject, self.data_model)) if notable_metrics: for destination_uuid, destination in report.get("notification_destinations", {}).items(): notifications.append(Notification(report, notable_metrics, destination_uuid, destination)) return notifications @staticmethod
Apache License 2.0
zyli93/interhat
interhat/data_loader.py
DataLoader.__init__
python
def __init__(self , dataset , batch_size): self.dataset = dataset self.cfg = Config(dataset=dataset) self.batch_size = batch_size self.train_size, self.test_size, self.valid_size = 0, 0, 0 self.batch_index = 0 self.has_next = False self.train_ind, self.train_label = self.load_data("train") self.test_ind, self.test_label = self.load_data("test") self.val_ind, self.val_label = self.load_data("val") self.train_size = self.train_label.shape[0] self.test_size = self.test_label.shape[0] self.val_size = self.val_label.shape[0] self.feature_size, self.field_size = self.load_statistics() self.train_iter_count = self.train_size // self.batch_size
:param: dataset: name of dataset :param: use_graph: whether need to build graph :param: batch_size:
https://github.com/zyli93/interhat/blob/1e40d8a7d0a089ae98d251fdb52e9d4047bdb128/interhat/data_loader.py#L17-L49
import pandas as pd from const import Constant, Config DATA_DIR = Constant.PARSE_DIR class DataLoader:
MIT License
cancerdhc/ccdhmodel
vendor/sheet2linkml/sheet2linkml/source/gsheetmodel/enum.py
Enum.__init__
python
def __init__(self, model, sheet: worksheet, name: str, rows: list[dict[str, str]]): self.model = model self.worksheet = sheet self.enum_name = name self.rows = rows
Create an enum based on a GSheetModel and a Google Sheet worksheet. :param model: The GSheetModel that this enum is a part of. :param sheet: A Google Sheet worksheet describing this enum. :param name: The name of this enum. :param rows: The rows in the spreadsheet describing this enum (as dictionaries of str -> str).
https://github.com/cancerdhc/ccdhmodel/blob/fe7d35226aca0cac619b8970de9df506327efe02/vendor/sheet2linkml/sheet2linkml/source/gsheetmodel/enum.py#L21-L34
import logging import re from functools import cached_property from linkml_runtime.linkml_model.meta import ( SchemaDefinition, EnumDefinition, PermissibleValue, ) from pygsheets import worksheet from sheet2linkml.model import ModelElement from sheet2linkml.source.gsheetmodel.mappings import MappingRelations, Mappings class Enum(ModelElement):
BSD 3-Clause New or Revised License
duetosymmetry/qnm
qnm/angular.py
give_M_matrix_elem_ufunc
python
def give_M_matrix_elem_ufunc(s, c, m): def elem(l, lprime): return M_matrix_elem(s, c, m, l, lprime) return np.frompyfunc(elem, 2, 1)
Legacy function. Gives ufunc that implements matrix elements of the spherical-spheroidal decomposition matrix. This function is used by :meth:`M_matrix_old`. Parameters ---------- s: int Spin-weight of interest c: complex Oblateness of the spheroidal harmonic m: int Magnetic quantum number Returns ------- ufunc Implements elements of M matrix
https://github.com/duetosymmetry/qnm/blob/25ceed85b0674707d6e4831dc2cb99c7ac568b05/qnm/angular.py#L151-L176
from __future__ import division, print_function, absolute_import from numba import njit import numpy as np @njit(cache=True) def _calF(s, l, m): if ((0==s) and (0 == l+1)): return 0. return (np.sqrt( ((l+1)**2 - m*m) / (2*l+3) / (2*l+1) ) * np.sqrt( ( (l+1)**2 - s*s) / (l+1)**2 )) @njit(cache=True) def _calG(s, l, m): if (0 == l): return 0. return np.sqrt( ( l*l - m*m ) / (4*l*l - 1)) * np.sqrt(1 - s*s/l/l) @njit(cache=True) def _calH(s, l, m): if (0 == l) or (0 == s): return 0. return - m*s/l/(l+1) @njit(cache=True) def _calA(s, l, m): return _calF(s,l,m) * _calF(s,l+1,m) @njit(cache=True) def _calD(s, l, m): return _calF(s,l,m) * (_calH(s,l+1,m) + _calH(s,l,m)) @njit(cache=True) def _calB(s, l, m): return (_calF(s,l,m) * _calG(s,l+1,m) + _calG(s,l,m) * _calF(s,l-1,m) + _calH(s,l,m)**2) @njit(cache=True) def _calE(s, l, m): return _calG(s,l,m) * (_calH(s,l-1,m) + _calH(s,l,m)) @njit(cache=True) def _calC(s, l, m): return _calG(s,l,m) * _calG(s,l-1,m) @njit(cache=True) def swsphericalh_A(s, l, m): return l*(l+1) - s*(s+1) @njit(cache=True) def M_matrix_elem(s, c, m, l, lprime): if (lprime == l-2): return -c*c*_calA(s,lprime,m) if (lprime == l-1): return (-c*c*_calD(s,lprime,m) + 2*c*s*_calF(s,lprime,m)) if (lprime == l ): return (swsphericalh_A(s,lprime,m) - c*c*_calB(s,lprime,m) + 2*c*s*_calH(s,lprime,m)) if (lprime == l+1): return (-c*c*_calE(s,lprime,m) + 2*c*s*_calG(s,lprime,m)) if (lprime == l+2): return -c*c*_calC(s,lprime,m) return 0.
MIT License
microsoft/qmt
qmt/infrastructure/data_utils.py
reduce_data
python
def reduce_data(reduce_function, task, dask_client): sweep_holder = task.computed_result sweep_vals = task.computed_result.sweep.sweep_list mappped_futures = list( map(lambda x: dask.delayed(reduce_function)(x), sweep_holder.futures) ) extracted_data = list( map(lambda x: dask_client.compute(x), mappped_futures) ) return sweep_vals, extracted_data
Given a task that has or will be been run, apply a reduce function to all of its outputs in dask. By specifying a custom `reduce_function`, the user is returning exactly what they want from a given run. Parameters ---------- reduce_function : function A function that takes the output data type of the supplied task and returns a dictionary of objects that can be stored in hdf5. task : Task The task that we would like to work on. Note that this function doesn't run the task, but this can be set up either before or after running. dask_client : The client we are using for the calculation Returns ------- sweep_vals, extracted_data
https://github.com/microsoft/qmt/blob/121fa3354c7bbd3b0257294a313057981cb88213/qmt/infrastructure/data_utils.py#L115-L147
import os import uuid import codecs import h5py import time import dask import dask.delayed import tempfile def serialize_file(path): with open(path, "rb") as f: serial_data = codecs.encode(f.read(), "base64").decode() return serial_data def write_deserialised(serial_obj, path): data = codecs.decode(serial_obj.encode(), "base64") with open(path, "wb") as f: f.write(data) def store_serial(obj, save_fct, ext_format, scratch_dir=None): if not scratch_dir: scratch_dir = tempfile.gettempdir() tmp_path = os.path.join(scratch_dir, uuid.uuid4().hex + "." + ext_format) save_fct(obj, tmp_path) serial_data = serialize_file(tmp_path) os.remove(tmp_path) return serial_data def load_serial(serial_obj, load_fct, ext_format=None, scratch_dir=None): if not ext_format: ext_format = "tmpdata" if not scratch_dir: scratch_dir = tempfile.gettempdir() tmp_path = os.path.join(scratch_dir, uuid.uuid4().hex + "." + ext_format) write_deserialised(serial_obj, tmp_path) obj = load_fct(tmp_path) os.remove(tmp_path) return obj
MIT License
hqsquantumsimulations/pyquest-cffi
pyquest_cffi/ops/ops.py
pauliX.call_interactive
python
def call_interactive(self, qureg: tqureg, qubit: int) -> None: quest.pauliX(qureg, qubit)
r"""Interactive call of PyQuest-cffi Args: qureg: quantum register qubit: qubit the unitary gate is applied to
https://github.com/hqsquantumsimulations/pyquest-cffi/blob/38dafab739364fd42b2e1f94e0c6617e11fe6229/pyquest_cffi/ops/ops.py#L76-L83
from pyquest_cffi.questlib import ( quest, _PYQUEST, ffi_quest, qreal, tqureg, tquestenv, paulihamil ) import numpy as np from typing import Sequence, Optional, Tuple from pyquest_cffi import cheat class hadamard(_PYQUEST): def call_interactive(self, qureg: tqureg, qubit: int) -> None: quest.hadamard(qureg, qubit) def matrix(self, **kwargs) -> np.ndarray: matrix = 1 / np.sqrt(2) * np.array([[1, 1], [1, -1]], dtype=complex) return matrix class pauliX(_PYQUEST):
Apache License 2.0
alteryx/snakeplane
snakeplane/interface_utilities.py
get_column_types_list
python
def get_column_types_list(record_info_in: sdk.RecordInfo) -> List[object]: return [field.type for field in record_info_in]
Collect the column types from an Alteryx record info object. Parameters ---------- record_info_in : object An Alteryx RecordInfo object Returns ------- List[object] A list of the column types as per the AlteryxSDK
https://github.com/alteryx/snakeplane/blob/cefd87ab9fe6d18e3961ad1299319a6f8bb41b7e/snakeplane/interface_utilities.py#L125-L138
from snakeplane.constants import SNAKEPLANE_NULL_VALUE_PLACEHOLDER from typing import Any, List, Optional, Tuple import AlteryxPythonSDK as sdk type_dict = { "blob": "get_as_blob", "byte": "get_as_int32", "int16": "get_as_int32", "int32": "get_as_int32", "int64": "get_as_int64", "float": "get_as_double", "double": "get_as_double", "date": "get_as_string", "time": "get_as_string", "datetime": "get_as_string", "bool": "get_as_bool", "string": "get_as_string", "v_string": "get_as_string", "v_wstring": "get_as_string", "wstring": "get_as_string", "fixeddecimal": "get_as_double", "spatialobj": "get_as_blob", } def get_getter_from_field(field): return getattr(field, type_dict[str(field.type)]) def get_dynamic_type_value(field: sdk.Field, record: sdk.RecordRef) -> Any: try: return getattr(field, type_dict[str(field.type)])(record) except KeyError: err_str = f"""Failed to automatically convert field type "{str(field.type)}" due to unidentified type name. This is due to a currently unsupported type.""" raise TypeError(err_str) def get_column_names_list(record_info_in: sdk.RecordInfo) -> List[str]: return [field.name for field in record_info_in] def get_column_metadata(record_info_in: sdk.RecordInfo) -> dict: from snakeplane.helper_classes import AnchorMetadata metadata = AnchorMetadata() for field in record_info_in: metadata.add_column( field.name, field.type, size=field.size, source=field.source, scale=field.scale, description=field.description, ) return metadata
Apache License 2.0
xanaduai/strawberryfields
strawberryfields/apps/train/param.py
VGBS.add_A_init_samples
python
def add_A_init_samples(self, samples: np.ndarray): if samples is None: return shape = samples.shape if shape[1] != self.n_modes: raise ValueError("Must input samples of shape (number, {})".format(self.n_modes)) if self.A_init_samples is None: self.A_init_samples = samples else: self.A_init_samples = np.vstack([self.A_init_samples, samples])
r"""Add samples of the initial adjacency matrix to :attr:`A_init_samples`. .. warning:: The added samples must be from the *input* adjacency matrix and not the trained one :math:`A(\theta)`. **Example usage:** >>> samples = np.array([[0, 1, 0, 0], [0, 1, 1, 1]]) >>> vgbs.add_A_init_samples(samples) Args: samples (array): samples from the initial adjacency matrix
https://github.com/xanaduai/strawberryfields/blob/c1eed81a93419cb9c28a6ca205925691063722ce/strawberryfields/apps/train/param.py#L279-L305
from typing import Optional import numpy as np import thewalrus.samples from thewalrus._hafnian import reduction from thewalrus._torontonian import tor from thewalrus.quantum import Qmat from thewalrus.quantum import adj_scaling as rescale from thewalrus.quantum import adj_scaling_torontonian as rescale_tor from thewalrus.quantum import photon_number_mean_vector, pure_state_amplitude import strawberryfields as sf def rescale_adjacency(A: np.ndarray, n_mean: float, threshold: bool) -> np.ndarray: scale = rescale_tor(A, n_mean) if threshold else rescale(A, n_mean) return A * scale def _Omat(A: np.ndarray) -> np.ndarray: return np.block([[np.zeros_like(A), np.conj(A)], [A, np.zeros_like(A)]]) def prob_click(A: np.ndarray, sample: np.ndarray): n = len(A) O = _Omat(A) sample_big = np.hstack([sample, sample]).astype("int") O_sub = reduction(O, sample_big) scale = np.sqrt(np.linalg.det(np.identity(2 * n) - O)) return scale * tor(O_sub) def prob_photon_sample(A: np.ndarray, sample: np.ndarray) -> float: n = len(A) mu = np.zeros(2 * n) cov = A_to_cov(A) sample = np.array(sample, dtype="int") return np.abs(pure_state_amplitude(mu, cov, sample, hbar=sf.hbar)) ** 2 def A_to_cov(A: np.ndarray) -> np.ndarray: n = len(A) I = np.identity(2 * n) return sf.hbar * (np.linalg.inv(I - _Omat(A)) - I / 2) class VGBS: def __init__( self, A: np.ndarray, n_mean: float, embedding, threshold: bool, samples: Optional[np.ndarray] = None, ): if not np.allclose(A, A.T): raise ValueError("Input must be a NumPy array corresponding to a symmetric matrix") self.A_init = rescale_adjacency(A, n_mean, threshold) self.A_init_samples = None self.embedding = embedding self.threshold = threshold self.n_modes = len(A) self.add_A_init_samples(samples) def W(self, params: np.ndarray) -> np.ndarray: return np.sqrt(np.diag(self.embedding(params))) def A(self, params: np.ndarray) -> np.ndarray: return self.W(params) @ self.A_init @ self.W(params) def generate_samples(self, A: np.ndarray, n_samples: int, **kwargs) -> np.ndarray: cov = A_to_cov(A) if self.threshold: samples = thewalrus.samples.torontonian_sample_state( cov, n_samples, hbar=sf.hbar, **kwargs ) else: samples = thewalrus.samples.hafnian_sample_state(cov, n_samples, hbar=sf.hbar, **kwargs) return samples
Apache License 2.0
seetaresearch/dragon
torch/core/nn/modules/activation.py
SELU.__init__
python
def __init__(self, inplace=False): super(SELU, self).__init__() self.inplace = inplace
Create a ``SELU`` module. Parameters ---------- inplace : bool, optional, default=False Whether to do the operation in-place.
https://github.com/seetaresearch/dragon/blob/3dfb6ea55d90d2fb2da9b1b471f5e1e7d7667810/torch/core/nn/modules/activation.py#L612-L622
from __future__ import absolute_import from __future__ import division from __future__ import print_function from dragon.vm.torch.core.nn import functional as F from dragon.vm.torch.core.nn.init import xavier_uniform_ from dragon.vm.torch.core.nn.modules.linear import Linear from dragon.vm.torch.core.nn.modules.module import Module from dragon.vm.torch.core.nn.parameter import Parameter from dragon.vm.torch.core.ops import init_ops from dragon.vm.torch.core.tensor import Tensor class ELU(Module): def __init__(self, alpha=1., inplace=False): super(ELU, self).__init__() self.alpha = alpha self.inplace = inplace def extra_repr(self): inplace_str = ', inplace' if self.inplace else '' return 'alpha={}{}'.format(self.alpha, inplace_str) def forward(self, input): return F.elu(input, self.alpha, self.inplace) class GELU(Module): def __init__(self): super(GELU, self).__init__() def forward(self, input): return F.gelu(input) class GumbelSoftmax(Module): def __init__(self, tau=1, dim=None, inplace=False): super(GumbelSoftmax, self).__init__() self.tau = tau self.dim = dim self.inplace = inplace def extra_repr(self): inplace_str = ', inplace' if self.inplace else '' return 'dim={}{}'.format(self.dim, inplace_str) def forward(self, input): u_dist = init_ops.rand(input.shape, dtype=input.dtype, device=input.device) gumbel = -((-(u_dist.log())).log()) gumbel = (input + gumbel) / self.tau return F.softmax(gumbel, self.dim, self.inplace) class Hardsigmoid(Module): def __init__(self, inplace=False): super(Hardsigmoid, self).__init__() self.inplace = inplace def extra_repr(self): inplace_str = 'inplace' if self.inplace else '' return inplace_str def forward(self, input): return F.hardsigmoid(input, self.inplace) class Hardswish(Module): def __init__(self): super(Hardswish, self).__init__() def forward(self, input): return F.hardswish(input) class LeakyReLU(Module): def __init__(self, negative_slope=0.01, inplace=False): super(LeakyReLU, self).__init__() self.negative_slope = negative_slope self.inplace = inplace def extra_repr(self): inplace_str = ', inplace' if self.inplace else '' return 'negative_slope={}{}'.format(self.negative_slope, inplace_str) def forward(self, input): return F.leaky_relu(input, self.negative_slope, self.inplace) class LogSoftmax(Module): def __init__(self, dim, inplace=False): super(LogSoftmax, self).__init__() self.dim = dim self.inplace = inplace def extra_repr(self): inplace_str = ', inplace' if self.inplace else '' return 'dim={}{}'.format(self.dim, inplace_str) def forward(self, input): return F.log_softmax(input, self.dim, self.inplace) class MultiheadAttention(Module): def __init__( self, embed_dim, num_heads, dropout=0., bias=True, kdim=None, vdim=None, ): super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError('<embed_dim> must be divisible by <num_heads>.') if not self._qkv_same_embed_dim: self.q_proj_weight = Parameter(Tensor(embed_dim, embed_dim)) self.k_proj_weight = Parameter(Tensor(embed_dim, self.kdim)) self.v_proj_weight = Parameter(Tensor(embed_dim, self.vdim)) self.register_parameter('in_proj_weight', None) else: self.in_proj_weight = Parameter(Tensor(3 * embed_dim, embed_dim)) self.register_parameter('q_proj_weight', None) self.register_parameter('k_proj_weight', None) self.register_parameter('v_proj_weight', None) if bias: self.in_proj_bias = Parameter(Tensor(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = Linear(embed_dim, embed_dim, bias=bias) self.reset_parameters() def reset_parameters(self): if self._qkv_same_embed_dim: xavier_uniform_(self.in_proj_weight) else: xavier_uniform_(self.q_proj_weight) xavier_uniform_(self.k_proj_weight) xavier_uniform_(self.v_proj_weight) if self.in_proj_bias is not None: self.in_proj_bias.zero_() self.out_proj.bias.zero_() def forward( self, query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None, ): return F.multi_head_attention_forward( query, key, value, embed_dim_to_check=self.embed_dim, num_heads=self.num_heads, in_proj_weight=self.in_proj_weight, in_proj_bias=self.in_proj_bias, out_proj_weight=self.out_proj.weight, out_proj_bias=self.out_proj.bias, dropout_p=self.dropout, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, use_separate_proj_weight=not self._qkv_same_embed_dim, q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight, ) class PReLU(Module): def __init__(self, num_parameters=1, init=0.25): super(PReLU, self).__init__() self.num_parameters = num_parameters self.weight = Parameter(Tensor(num_parameters).fill_(init)) def extra_repr(self): return 'num_parameters={}'.format(self.num_parameters) def forward(self, input): return F.prelu(input, self.weight) class ReLU(Module): def __init__(self, inplace=False): super(ReLU, self).__init__() self.inplace = inplace def extra_repr(self): inplace_str = 'inplace' if self.inplace else '' return inplace_str def forward(self, input): return F.relu(input, inplace=self.inplace) class ReLU6(Module): def __init__(self, inplace=False): super(ReLU6, self).__init__() self.inplace = inplace def extra_repr(self): inplace_str = 'inplace' if self.inplace else '' return inplace_str def forward(self, input): return F.relu6(input, inplace=self.inplace) class SELU(Module):
BSD 2-Clause Simplified License
dralshehri/hijri-converter
src/hijri_converter/convert.py
Hijri.day
python
def day(self) -> int: return self._day
Return day as an integer.
https://github.com/dralshehri/hijri-converter/blob/779516a06bcb3f72e4919d2deeed53d2c6db5e5b/src/hijri_converter/convert.py#L108-L110
import datetime from bisect import bisect from hijri_converter import helpers, locales, ummalqura class Hijri: __slots__ = "_year", "_month", "_day" def __init__(self, year: int, month: int, day: int, validate: bool = True): self._year = year self._month = month self._day = day if validate: self._check_date() def __repr__(self) -> str: class_name = self.__class__.__name__ return f"{class_name}({self._year}, {self._month}, {self._day})" def __str__(self) -> str: return self.isoformat() def __hash__(self) -> int: return hash(("Hijri", self._year, self._month, self._day)) def __eq__(self, other: object) -> bool: if not isinstance(other, Hijri): return NotImplemented return self._compare(other) == 0 def __gt__(self, other: object) -> bool: if not isinstance(other, Hijri): return NotImplemented return self._compare(other) > 0 def __ge__(self, other: object) -> bool: if not isinstance(other, Hijri): return NotImplemented return self._compare(other) >= 0 def __lt__(self, other: object) -> bool: if not isinstance(other, Hijri): return NotImplemented return self._compare(other) < 0 def __le__(self, other: object) -> bool: if not isinstance(other, Hijri): return NotImplemented return self._compare(other) <= 0 def _compare(self, other: "Hijri") -> int: x = self.datetuple() y = other.datetuple() return 0 if x == y else 1 if x > y else -1 @classmethod def fromisoformat(cls, date_string: str) -> "Hijri": year = int(date_string[0:4]) month = int(date_string[5:7]) day = int(date_string[8:10]) return cls(year, month, day) @classmethod def today(cls) -> "Hijri": return Gregorian.today().to_hijri() @property def year(self) -> int: return self._year @property def month(self) -> int: return self._month @property
MIT License
sciapp/pymoldyn
src/config/configuration.py
ConfigFile.save
python
def save(self): self.file = configobj.ConfigObj(CONFIG_FILE) self.parse_node_to_section(self.config, self.file) try: self._create_needed_parent_directories(CONFIG_FILE) self.file.write() self.generate_configspec() self.file.write() except IOError as e: print "IOError in ConfigFile.save"
recursively reads the object and saves it to the ConfigFile object and finally writes it into the file
https://github.com/sciapp/pymoldyn/blob/fba6ea91cb185f916b930cd25b4b1d28a22fb4c5/src/config/configuration.py#L142-L156
import configobj import validate import os import os.path import inspect CONFIG_DIRECTORY = '~/.pymoldyn/' CONFIG_FILE = os.path.expanduser('%s/config.cfg' % CONFIG_DIRECTORY) CONFIG_SPEC_FILE = os.path.expanduser('%s/config.spec' % CONFIG_DIRECTORY) type_dict = { int: ('integer', 'int'), float: ('float', 'float'), str: ('string', 'string'), unicode: ('string', 'string'), bool: ('boolean', 'bool'), } class ConfigNode(object): def __init__(self): pass class Configuration(ConfigNode): class Colors(ConfigNode): def __init__(self): self.surface_cavity = [0.2, 0.4, 1.] self.domain = [0., 1., 0.5] self.center_cavity = [0.9, 0.4, 0.2] self.background = [0.0, 0.0, 0.0] self.bounding_box = [1.0, 1.0, 1.0] self.bonds = [0.8, 0.8, 0.8] class OpenGL(ConfigNode): def __init__(self): self.gl_window_size = [1200, 400] self.atom_radius = 0.4 self.bond_radius = 0.1 pass class Computation(ConfigNode): def __init__(self): self.std_cutoff_radius = 2.8 self.std_resolution = 64 self.max_cachefiles = 0 class Path(ConfigNode): def __init__(self): self.cache_dir = os.path.join(CONFIG_DIRECTORY, 'cache') self.ffmpeg = '/usr/local/bin/ffmpeg' self.result_dir = os.path.join(CONFIG_DIRECTORY, 'results') def __init__(self): self.Colors = Configuration.Colors() self.OpenGL = Configuration.OpenGL() self.Computation = Configuration.Computation() self.Path = Configuration.Path() self.window_position = [-1, -1] self.recent_files = [''] self.max_files = 5 self._file = ConfigFile(self) def add_recent_file(self, filename): if len(self.recent_files) == 1 and not self.recent_files[0]: self.recent_files[0] = filename elif len(self.recent_files) == self.max_files: self.recent_files.pop(-1) self.recent_files.insert(0,filename) else: self.recent_files.insert(0, filename) self.save() def save(self): self._file.save() def read(self): self._file = ConfigFile(self) self._file.read() class ConfigFile(object): def __init__(self, cfg): self.config = cfg @staticmethod def _create_needed_parent_directories(filename): dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(dirname) def generate_configspec(self): spec_file = configobj.ConfigObj(CONFIG_SPEC_FILE) self.generate_spec_for_section(self.file, spec_file) try: self._create_needed_parent_directories(CONFIG_SPEC_FILE) spec_file.write() except IOError as e: print "IOError in ConfigFile.generate_configspec" def generate_spec_for_section(self, section, spec_section): for scalar in section.scalars: t = type(section[scalar]) type_string = type_dict[t][0] if t is not list else type_dict[type(section[scalar][0])][1] + '_list' spec_section[scalar] = type_string for sect in section.sections: spec_section[sect] = {} self.generate_spec_for_section(section[sect], spec_section[sect])
MIT License
vollib/py_vollib
py_vollib/black_scholes/greeks/numerical.py
theta
python
def theta(flag, S, K, t, r, sigma): b = r return numerical_theta(flag, S, K, t, r, sigma, b, f)
Return Black-Scholes theta of an option. :param S: underlying asset price :type S: float :param K: strike price :type K: float :param sigma: annualized standard deviation, or volatility :type sigma: float :param t: time to expiration in years :type t: float :param r: risk-free interest rate :type r: float :param flag: 'c' or 'p' for call or put. :type flag: str
https://github.com/vollib/py_vollib/blob/f5f3a1ecec73c0ae98a5e5ec9f17a8e65a4dc476/py_vollib/black_scholes/greeks/numerical.py#L87-L107
from py_vollib.black_scholes import black_scholes from py_vollib.helpers.numerical_greeks import delta as numerical_delta from py_vollib.helpers.numerical_greeks import vega as numerical_vega from py_vollib.helpers.numerical_greeks import theta as numerical_theta from py_vollib.helpers.numerical_greeks import rho as numerical_rho from py_vollib.helpers.numerical_greeks import gamma as numerical_gamma from py_vollib.black_scholes.greeks.analytical import gamma as agamma from py_vollib.black_scholes.greeks.analytical import delta as adelta from py_vollib.black_scholes.greeks.analytical import vega as avega from py_vollib.black_scholes.greeks.analytical import rho as arho from py_vollib.black_scholes.greeks.analytical import theta as atheta f = lambda flag, S, K, t, r, sigma, b: black_scholes(flag, S, K, t, r, sigma) def delta(flag, S, K, t, r, sigma): b = r return numerical_delta(flag, S, K, t, r, sigma, b, f)
MIT License
devopshq/teamcity
dohq_teamcity/models/build.py
Build.task_id
python
def task_id(self, task_id): self._task_id = task_id
Sets the task_id of this Build. :param task_id: The task_id of this Build. # noqa: E501 :type: int
https://github.com/devopshq/teamcity/blob/84f1757ec1fddef27d39246a75739d047be0e831/dohq_teamcity/models/build.py#L421-L429
from dohq_teamcity.custom.base_model import TeamCityObject class Build(TeamCityObject): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'int', 'task_id': 'int', 'build_type_id': 'str', 'build_type_internal_id': 'str', 'number': 'str', 'status': 'str', 'state': 'str', 'running': 'bool', 'composite': 'bool', 'failed_to_start': 'bool', 'personal': 'bool', 'percentage_complete': 'int', 'branch_name': 'str', 'default_branch': 'bool', 'unspecified_branch': 'bool', 'history': 'bool', 'pinned': 'bool', 'href': 'str', 'web_url': 'str', 'queue_position': 'int', 'limited_changes_count': 'int', 'artifacts_directory': 'str', 'links': 'Links', 'status_text': 'str', 'build_type': 'BuildType', 'comment': 'Comment', 'tags': 'Tags', 'pin_info': 'Comment', 'user': 'User', 'start_estimate': 'str', 'wait_reason': 'str', 'running_info': 'ProgressInfo', 'canceled_info': 'Comment', 'queued_date': 'str', 'start_date': 'str', 'finish_date': 'str', 'triggered': 'TriggeredBy', 'last_changes': 'Changes', 'changes': 'Changes', 'revisions': 'Revisions', 'versioned_settings_revision': 'Revision', 'artifact_dependency_changes': 'BuildChanges', 'agent': 'Agent', 'compatible_agents': 'Agents', 'test_occurrences': 'TestOccurrences', 'problem_occurrences': 'ProblemOccurrences', 'artifacts': 'Files', 'related_issues': 'IssuesUsages', 'properties': 'Properties', 'resulting_properties': 'Properties', 'attributes': 'Entries', 'statistics': 'Properties', 'metadata': 'Datas', 'snapshot_dependencies': 'Builds', 'artifact_dependencies': 'Builds', 'custom_artifact_dependencies': 'ArtifactDependencies', 'settings_hash': 'str', 'current_settings_hash': 'str', 'modification_id': 'str', 'chain_modification_id': 'str', 'replacement_ids': 'Items', 'related': 'Related', 'triggering_options': 'BuildTriggeringOptions', 'used_by_other_builds': 'bool', 'status_change_comment': 'Comment', 'locator': 'str' } attribute_map = { 'id': 'id', 'task_id': 'taskId', 'build_type_id': 'buildTypeId', 'build_type_internal_id': 'buildTypeInternalId', 'number': 'number', 'status': 'status', 'state': 'state', 'running': 'running', 'composite': 'composite', 'failed_to_start': 'failedToStart', 'personal': 'personal', 'percentage_complete': 'percentageComplete', 'branch_name': 'branchName', 'default_branch': 'defaultBranch', 'unspecified_branch': 'unspecifiedBranch', 'history': 'history', 'pinned': 'pinned', 'href': 'href', 'web_url': 'webUrl', 'queue_position': 'queuePosition', 'limited_changes_count': 'limitedChangesCount', 'artifacts_directory': 'artifactsDirectory', 'links': 'links', 'status_text': 'statusText', 'build_type': 'buildType', 'comment': 'comment', 'tags': 'tags', 'pin_info': 'pinInfo', 'user': 'user', 'start_estimate': 'startEstimate', 'wait_reason': 'waitReason', 'running_info': 'running-info', 'canceled_info': 'canceledInfo', 'queued_date': 'queuedDate', 'start_date': 'startDate', 'finish_date': 'finishDate', 'triggered': 'triggered', 'last_changes': 'lastChanges', 'changes': 'changes', 'revisions': 'revisions', 'versioned_settings_revision': 'versionedSettingsRevision', 'artifact_dependency_changes': 'artifactDependencyChanges', 'agent': 'agent', 'compatible_agents': 'compatibleAgents', 'test_occurrences': 'testOccurrences', 'problem_occurrences': 'problemOccurrences', 'artifacts': 'artifacts', 'related_issues': 'relatedIssues', 'properties': 'properties', 'resulting_properties': 'resultingProperties', 'attributes': 'attributes', 'statistics': 'statistics', 'metadata': 'metadata', 'snapshot_dependencies': 'snapshot-dependencies', 'artifact_dependencies': 'artifact-dependencies', 'custom_artifact_dependencies': 'custom-artifact-dependencies', 'settings_hash': 'settingsHash', 'current_settings_hash': 'currentSettingsHash', 'modification_id': 'modificationId', 'chain_modification_id': 'chainModificationId', 'replacement_ids': 'replacementIds', 'related': 'related', 'triggering_options': 'triggeringOptions', 'used_by_other_builds': 'usedByOtherBuilds', 'status_change_comment': 'statusChangeComment', 'locator': 'locator' } def __init__(self, id=None, task_id=None, build_type_id=None, build_type_internal_id=None, number=None, status=None, state=None, running=False, composite=False, failed_to_start=False, personal=False, percentage_complete=None, branch_name=None, default_branch=False, unspecified_branch=False, history=False, pinned=False, href=None, web_url=None, queue_position=None, limited_changes_count=None, artifacts_directory=None, links=None, status_text=None, build_type=None, comment=None, tags=None, pin_info=None, user=None, start_estimate=None, wait_reason=None, running_info=None, canceled_info=None, queued_date=None, start_date=None, finish_date=None, triggered=None, last_changes=None, changes=None, revisions=None, versioned_settings_revision=None, artifact_dependency_changes=None, agent=None, compatible_agents=None, test_occurrences=None, problem_occurrences=None, artifacts=None, related_issues=None, properties=None, resulting_properties=None, attributes=None, statistics=None, metadata=None, snapshot_dependencies=None, artifact_dependencies=None, custom_artifact_dependencies=None, settings_hash=None, current_settings_hash=None, modification_id=None, chain_modification_id=None, replacement_ids=None, related=None, triggering_options=None, used_by_other_builds=False, status_change_comment=None, locator=None, teamcity=None): self._id = None self._task_id = None self._build_type_id = None self._build_type_internal_id = None self._number = None self._status = None self._state = None self._running = None self._composite = None self._failed_to_start = None self._personal = None self._percentage_complete = None self._branch_name = None self._default_branch = None self._unspecified_branch = None self._history = None self._pinned = None self._href = None self._web_url = None self._queue_position = None self._limited_changes_count = None self._artifacts_directory = None self._links = None self._status_text = None self._build_type = None self._comment = None self._tags = None self._pin_info = None self._user = None self._start_estimate = None self._wait_reason = None self._running_info = None self._canceled_info = None self._queued_date = None self._start_date = None self._finish_date = None self._triggered = None self._last_changes = None self._changes = None self._revisions = None self._versioned_settings_revision = None self._artifact_dependency_changes = None self._agent = None self._compatible_agents = None self._test_occurrences = None self._problem_occurrences = None self._artifacts = None self._related_issues = None self._properties = None self._resulting_properties = None self._attributes = None self._statistics = None self._metadata = None self._snapshot_dependencies = None self._artifact_dependencies = None self._custom_artifact_dependencies = None self._settings_hash = None self._current_settings_hash = None self._modification_id = None self._chain_modification_id = None self._replacement_ids = None self._related = None self._triggering_options = None self._used_by_other_builds = None self._status_change_comment = None self._locator = None self.discriminator = None if id is not None: self.id = id if task_id is not None: self.task_id = task_id if build_type_id is not None: self.build_type_id = build_type_id if build_type_internal_id is not None: self.build_type_internal_id = build_type_internal_id if number is not None: self.number = number if status is not None: self.status = status if state is not None: self.state = state if running is not None: self.running = running if composite is not None: self.composite = composite if failed_to_start is not None: self.failed_to_start = failed_to_start if personal is not None: self.personal = personal if percentage_complete is not None: self.percentage_complete = percentage_complete if branch_name is not None: self.branch_name = branch_name if default_branch is not None: self.default_branch = default_branch if unspecified_branch is not None: self.unspecified_branch = unspecified_branch if history is not None: self.history = history if pinned is not None: self.pinned = pinned if href is not None: self.href = href if web_url is not None: self.web_url = web_url if queue_position is not None: self.queue_position = queue_position if limited_changes_count is not None: self.limited_changes_count = limited_changes_count if artifacts_directory is not None: self.artifacts_directory = artifacts_directory if links is not None: self.links = links if status_text is not None: self.status_text = status_text if build_type is not None: self.build_type = build_type if comment is not None: self.comment = comment if tags is not None: self.tags = tags if pin_info is not None: self.pin_info = pin_info if user is not None: self.user = user if start_estimate is not None: self.start_estimate = start_estimate if wait_reason is not None: self.wait_reason = wait_reason if running_info is not None: self.running_info = running_info if canceled_info is not None: self.canceled_info = canceled_info if queued_date is not None: self.queued_date = queued_date if start_date is not None: self.start_date = start_date if finish_date is not None: self.finish_date = finish_date if triggered is not None: self.triggered = triggered if last_changes is not None: self.last_changes = last_changes if changes is not None: self.changes = changes if revisions is not None: self.revisions = revisions if versioned_settings_revision is not None: self.versioned_settings_revision = versioned_settings_revision if artifact_dependency_changes is not None: self.artifact_dependency_changes = artifact_dependency_changes if agent is not None: self.agent = agent if compatible_agents is not None: self.compatible_agents = compatible_agents if test_occurrences is not None: self.test_occurrences = test_occurrences if problem_occurrences is not None: self.problem_occurrences = problem_occurrences if artifacts is not None: self.artifacts = artifacts if related_issues is not None: self.related_issues = related_issues if properties is not None: self.properties = properties if resulting_properties is not None: self.resulting_properties = resulting_properties if attributes is not None: self.attributes = attributes if statistics is not None: self.statistics = statistics if metadata is not None: self.metadata = metadata if snapshot_dependencies is not None: self.snapshot_dependencies = snapshot_dependencies if artifact_dependencies is not None: self.artifact_dependencies = artifact_dependencies if custom_artifact_dependencies is not None: self.custom_artifact_dependencies = custom_artifact_dependencies if settings_hash is not None: self.settings_hash = settings_hash if current_settings_hash is not None: self.current_settings_hash = current_settings_hash if modification_id is not None: self.modification_id = modification_id if chain_modification_id is not None: self.chain_modification_id = chain_modification_id if replacement_ids is not None: self.replacement_ids = replacement_ids if related is not None: self.related = related if triggering_options is not None: self.triggering_options = triggering_options if used_by_other_builds is not None: self.used_by_other_builds = used_by_other_builds if status_change_comment is not None: self.status_change_comment = status_change_comment if locator is not None: self.locator = locator super(Build, self).__init__(teamcity=teamcity) @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def task_id(self): return self._task_id @task_id.setter
MIT License
betterworks/django-anonymizer
anonymizer/replacers.py
positive_integer
python
def positive_integer(anon, obj, field, val): return anon.faker.positive_integer(field=field)
Returns a random positive integer (for a Django PositiveIntegerField)
https://github.com/betterworks/django-anonymizer/blob/2d25bb6e8b5e4230c58031c4b6d10cc536669b3e/anonymizer/replacers.py#L32-L36
def uuid(anon, obj, field, val): return anon.faker.uuid(field=field) def varchar(anon, obj, field, val): return anon.faker.varchar(field=field) def bool(anon, obj, field, val): return anon.faker.bool(field=field) def integer(anon, obj, field, val): return anon.faker.integer(field=field)
MIT License
alliefitter/boto3_type_annotations
boto3_type_annotations_with_docs/boto3_type_annotations/iot1click_projects/client.py
Client.get_devices_in_placement
python
def get_devices_in_placement(self, projectName: str, placementName: str) -> Dict: pass
Returns an object enumerating the devices in a placement. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/iot1click-projects-2018-05-14/GetDevicesInPlacement>`_ **Request Syntax** :: response = client.get_devices_in_placement( projectName='string', placementName='string' ) **Response Syntax** :: { 'devices': { 'string': 'string' } } **Response Structure** - *(dict) --* - **devices** *(dict) --* An object containing the devices (zero or more) within the placement. - *(string) --* - *(string) --* :type projectName: string :param projectName: **[REQUIRED]** The name of the project containing the placement. :type placementName: string :param placementName: **[REQUIRED]** The name of the placement to get the devices from. :rtype: dict :returns:
https://github.com/alliefitter/boto3_type_annotations/blob/2a88aa562b1aee6e8a6cc30402980884b3707fbb/boto3_type_annotations_with_docs/boto3_type_annotations/iot1click_projects/client.py#L412-L447
from typing import Optional from botocore.client import BaseClient from typing import Dict from botocore.paginate import Paginator from botocore.waiter import Waiter from typing import Union from typing import List class Client(BaseClient): def associate_device_with_placement(self, projectName: str, placementName: str, deviceId: str, deviceTemplateName: str) -> Dict: pass def can_paginate(self, operation_name: str = None): pass def create_placement(self, placementName: str, projectName: str, attributes: Dict = None) -> Dict: pass def create_project(self, projectName: str, description: str = None, placementTemplate: Dict = None, tags: Dict = None) -> Dict: pass def delete_placement(self, placementName: str, projectName: str) -> Dict: pass def delete_project(self, projectName: str) -> Dict: pass def describe_placement(self, placementName: str, projectName: str) -> Dict: pass def describe_project(self, projectName: str) -> Dict: pass def disassociate_device_from_placement(self, projectName: str, placementName: str, deviceTemplateName: str) -> Dict: pass def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None): pass
MIT License
googleapis/python-tasks
google/cloud/tasks_v2beta3/services/cloud_tasks/async_client.py
CloudTasksAsyncClient.purge_queue
python
async def purge_queue( self, request: cloudtasks.PurgeQueueRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> queue.Queue: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloudtasks.PurgeQueueRequest(request) if name is not None: request.name = name rpc = gapic_v1.method_async.wrap_method( self._client._transport.purge_queue, default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response
r"""Purges a queue by deleting all of its tasks. All tasks created before this method is called are permanently deleted. Purge operations can take up to one minute to take effect. Tasks might be dispatched before the purge takes effect. A purge is irreversible. Args: request (:class:`google.cloud.tasks_v2beta3.types.PurgeQueueRequest`): The request object. Request message for [PurgeQueue][google.cloud.tasks.v2beta3.CloudTasks.PurgeQueue]. name (:class:`str`): Required. The queue name. For example: ``projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.tasks_v2beta3.types.Queue: A queue is a container of related tasks. Queues are configured to manage how those tasks are dispatched. Configurable properties include rate limits, retry options, queue types, and others.
https://github.com/googleapis/python-tasks/blob/8bebdfc7b6520a2485e618ff7dee665c211c041a/google/cloud/tasks_v2beta3/services/cloud_tasks/async_client.py#L646-L724
from collections import OrderedDict import functools import re from typing import Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.oauth2 import service_account from google.cloud.tasks_v2beta3.services.cloud_tasks import pagers from google.cloud.tasks_v2beta3.types import cloudtasks from google.cloud.tasks_v2beta3.types import queue from google.cloud.tasks_v2beta3.types import queue as gct_queue from google.cloud.tasks_v2beta3.types import target from google.cloud.tasks_v2beta3.types import task from google.cloud.tasks_v2beta3.types import task as gct_task from google.iam.v1 import iam_policy_pb2 from google.iam.v1 import policy_pb2 from google.protobuf import duration_pb2 from google.protobuf import field_mask_pb2 from google.protobuf import timestamp_pb2 from .transports.base import CloudTasksTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import CloudTasksGrpcAsyncIOTransport from .client import CloudTasksClient class CloudTasksAsyncClient: _client: CloudTasksClient DEFAULT_ENDPOINT = CloudTasksClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = CloudTasksClient.DEFAULT_MTLS_ENDPOINT queue_path = staticmethod(CloudTasksClient.queue_path) parse_queue_path = staticmethod(CloudTasksClient.parse_queue_path) task_path = staticmethod(CloudTasksClient.task_path) parse_task_path = staticmethod(CloudTasksClient.parse_task_path) common_billing_account_path = staticmethod( CloudTasksClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( CloudTasksClient.parse_common_billing_account_path ) common_folder_path = staticmethod(CloudTasksClient.common_folder_path) parse_common_folder_path = staticmethod(CloudTasksClient.parse_common_folder_path) common_organization_path = staticmethod(CloudTasksClient.common_organization_path) parse_common_organization_path = staticmethod( CloudTasksClient.parse_common_organization_path ) common_project_path = staticmethod(CloudTasksClient.common_project_path) parse_common_project_path = staticmethod(CloudTasksClient.parse_common_project_path) common_location_path = staticmethod(CloudTasksClient.common_location_path) parse_common_location_path = staticmethod( CloudTasksClient.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): return CloudTasksClient.from_service_account_info.__func__(CloudTasksAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): return CloudTasksClient.from_service_account_file.__func__(CloudTasksAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> CloudTasksTransport: return self._client.transport get_transport_class = functools.partial( type(CloudTasksClient).get_transport_class, type(CloudTasksClient) ) def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, CloudTasksTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: self._client = CloudTasksClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def list_queues( self, request: cloudtasks.ListQueuesRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListQueuesAsyncPager: has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloudtasks.ListQueuesRequest(request) if parent is not None: request.parent = parent rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_queues, default_retry=retries.Retry( initial=0.1, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=20.0, ), default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) response = pagers.ListQueuesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) return response async def get_queue( self, request: cloudtasks.GetQueueRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> queue.Queue: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloudtasks.GetQueueRequest(request) if name is not None: request.name = name rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_queue, default_retry=retries.Retry( initial=0.1, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=20.0, ), default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response async def create_queue( self, request: cloudtasks.CreateQueueRequest = None, *, parent: str = None, queue: gct_queue.Queue = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_queue.Queue: has_flattened_params = any([parent, queue]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloudtasks.CreateQueueRequest(request) if parent is not None: request.parent = parent if queue is not None: request.queue = queue rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_queue, default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response async def update_queue( self, request: cloudtasks.UpdateQueueRequest = None, *, queue: gct_queue.Queue = None, update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_queue.Queue: has_flattened_params = any([queue, update_mask]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloudtasks.UpdateQueueRequest(request) if queue is not None: request.queue = queue if update_mask is not None: request.update_mask = update_mask rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_queue, default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("queue.name", request.queue.name),) ), ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response async def delete_queue( self, request: cloudtasks.DeleteQueueRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = cloudtasks.DeleteQueueRequest(request) if name is not None: request.name = name rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_queue, default_retry=retries.Retry( initial=0.1, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=20.0, ), default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) await rpc( request, retry=retry, timeout=timeout, metadata=metadata, )
Apache License 2.0
rmartin16/qbittorrent-api
tests/conftest.py
abort_if_qbittorrent_crashes
python
def abort_if_qbittorrent_crashes(client): try: _ = client.app.version yield except APIConnectionError: pytest.exit("qBittorrent crashed :(")
Abort tests if qbittorrent disappears during testing
https://github.com/rmartin16/qbittorrent-api/blob/613c21e0920b12876003a4f6b893e734e1c5d180/tests/conftest.py#L165-L171
from os import environ from os import path as os_path from sys import path as sys_path from time import sleep import pytest import six from qbittorrentapi import APIConnectionError from qbittorrentapi import Client from qbittorrentapi.request import Request qbt_version = "v" + environ["QBT_VER"] api_version_map = { "v4.1.0": "2.0", "v4.1.1": "2.0.1", "v4.1.2": "2.0.2", "v4.1.3": "2.1", "v4.1.4": "2.1.1", "v4.1.5": "2.2", "v4.1.6": "2.2", "v4.1.7": "2.2", "v4.1.8": "2.2", "v4.1.9": "2.2.1", "v4.1.9.1": "2.2.1", "v4.2.0": "2.3", "v4.2.1": "2.4", "v4.2.2": "2.4.1", "v4.2.3": "2.4.1", "v4.2.4": "2.5", "v4.2.5": "2.5.1", "v4.3.0": "2.6", "v4.3.0.1": "2.6", "v4.3.1": "2.6.1", "v4.3.2": "2.7", "v4.3.3": "2.7", "v4.3.4.1": "2.8.1", "v4.3.5": "2.8.2", "v4.3.6": "2.8.2", "v4.3.7": "2.8.2", "v4.3.8": "2.8.2", } _check_limit = 10 _orig_torrent_url = ( "http://releases.ubuntu.com/21.04/ubuntu-21.04-desktop-amd64.iso.torrent" ) _orig_torrent_hash = "64a980abe6e448226bb930ba061592e44c3781a1" with open( os_path.join(sys_path[0], "tests", "kubuntu-21.04-desktop-amd64.iso.torrent"), mode="rb", ) as f: torrent1_file = f.read() torrent1_url = "http://cdimage.ubuntu.com/kubuntu/releases/21.04/release/kubuntu-21.04-desktop-amd64.iso.torrent" torrent1_filename = torrent1_url.split("/")[-1] torrent1_hash = "d65d07329264aecb2d2be7a6c0e86b6613b2a600" torrent2_url = "http://cdimage.ubuntu.com/xubuntu/releases/21.04/release/xubuntu-21.04-desktop-amd64.iso.torrent" torrent2_filename = torrent2_url.split("/")[-1] torrent2_hash = "80d773cbf111e906608077967683a0ffcc3a7668" with open(os_path.join(sys_path[0], "tests", "root_folder.torrent"), mode="rb") as f: root_folder_torrent_file = f.read() root_folder_torrent_hash = "a14553bd936a6d496402082454a70ea7a9521adc" is_version_less_than = Request._is_version_less_than suppress_context = Request._suppress_context def get_func(client, func_str): func = client for attr in func_str.split("."): func = getattr(func, attr) return func def check( check_func, value, reverse=False, negate=False, any=False, check_limit=_check_limit ): def _do_check(_check_func_val, _v, _negate, _reverse): if _negate: if _reverse: assert _v not in _check_func_val else: assert _check_func_val not in (_v,) else: if _reverse: assert _v in _check_func_val else: assert _check_func_val in (_v,) if isinstance(value, (six.string_types, int)): value = (value,) try: for i in range(_check_limit): try: exp = None for v in value: exp = None if any else exp try: check_val = check_func() _do_check(check_val, v, negate, reverse) except AssertionError as e: exp = e if not any and exp: break if any and not exp: break if exp: raise exp break except AssertionError: if i >= check_limit - 1: raise sleep(1) except APIConnectionError: raise suppress_context(AssertionError("qBittrorrent crashed...")) def retry(retries=3): def inner(f): def wrapper(*args, **kwargs): for retry_count in range(retries): try: return f(*args, **kwargs) except: if retry_count >= (retries - 1): raise return wrapper return inner @pytest.fixture(autouse=True)
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/extensions_v1beta1_ingress_rule.py
ExtensionsV1beta1IngressRule.http
python
def http(self, http): self._http = http
Sets the http of this ExtensionsV1beta1IngressRule. :param http: The http of this ExtensionsV1beta1IngressRule. # noqa: E501 :type: ExtensionsV1beta1HTTPIngressRuleValue
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/extensions_v1beta1_ingress_rule.py#L94-L102
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class ExtensionsV1beta1IngressRule(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'host': 'str', 'http': 'ExtensionsV1beta1HTTPIngressRuleValue' } attribute_map = { 'host': 'host', 'http': 'http' } def __init__(self, host=None, http=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._host = None self._http = None self.discriminator = None if host is not None: self.host = host if http is not None: self.http = http @property def host(self): return self._host @host.setter def host(self, host): self._host = host @property def http(self): return self._http @http.setter
Apache License 2.0
andrewebdev/django-ostinato
ostinato/pages/templatetags/pages_tags.py
get_page
python
def get_page(ignore_sites=False, **kwargs): PAGES_SITE_TREEID = getattr(settings, 'OSTINATO_PAGES_SITE_TREEID', None) if PAGES_SITE_TREEID and not ignore_sites: kwargs.update({'tree_id': PAGES_SITE_TREEID}) try: return Page.objects.filter(**kwargs)[0] except IndexError: return None
A handy helper that returns the first page filtered by **kwargs
https://github.com/andrewebdev/django-ostinato/blob/2c435dea23319be6e9011e7381afca2b4092b5a2/ostinato/pages/templatetags/pages_tags.py#L53-L64
from django import template from django.conf import settings from ostinato.pages.models import Page register = template.Library() @register.inclusion_tag('pages/navbar.html', takes_context=True) def navbar(context, for_page=None): page = context.get('page', None) navbar = Page.objects.get_navbar(for_page=for_page) return { 'page': page, 'navbar': navbar, } @register.inclusion_tag('pages/breadcrumbs.html', takes_context=True) def breadcrumbs(context, for_page=None, obj=None): if not for_page: try: for_page = context['page'] except KeyError: return {} breadcrumbs = Page.objects.get_breadcrumbs(for_page=for_page) if obj: breadcrumbs.append({ 'title': obj.title, 'url': obj.get_absolute_url(), }) return { "breadcrumbs": breadcrumbs, "for_page": for_page } @register.assignment_tag
MIT License
rlworkgroup/gym-sawyer
sawyer/garage/misc/overrides.py
overrides
python
def overrides(method): return method
Decorator to indicate that the decorated method overrides a method in superclass. The decorator code is executed while loading class. Using this method should have minimal runtime performance implications. This is based on my idea about how to do this and fwc:s highly improved algorithm for the implementation fwc:s algorithm : http://stackoverflow.com/a/14631397/308189 my answer : http://stackoverflow.com/a/8313042/308189 How to use: from overrides import overrides class SuperClass: def method(self): return 2 class SubClass(SuperClass): @overrides def method(self): return 1 :raises AssertionError if no match in super classes for the method name :return method with possibly added (if the method doesn't have one) docstring from super class
https://github.com/rlworkgroup/gym-sawyer/blob/90d706cb0594c27045162bc9a00d56389f17615f/sawyer/garage/misc/overrides.py#L25-L63
import dis import sys __VERSION__ = '0.5' if sys.version > '3': long = int
MIT License
pyslackers/slack-sansio
slack/events.py
EventRouter.register
python
def register(self, event_type: str, handler: Any, **detail: Any) -> None: LOG.info("Registering %s, %s to %s", event_type, detail, handler) if len(detail) > 1: raise ValueError("Only one detail can be provided for additional routing") elif not detail: detail_key, detail_value = "*", "*" else: detail_key, detail_value = detail.popitem() if detail_key not in self._routes[event_type]: self._routes[event_type][detail_key] = {} if detail_value not in self._routes[event_type][detail_key]: self._routes[event_type][detail_key][detail_value] = [] self._routes[event_type][detail_key][detail_value].append(handler)
Register a new handler for a specific :class:`slack.events.Event` `type` (See `slack event types documentation <https://api.slack.com/events>`_ for a list of event types). The arbitrary keyword argument is used as a key/value pair to compare against what is in the incoming :class:`slack.events.Event` Args: event_type: Event type the handler is interested in handler: Callback **detail: Additional key for routing
https://github.com/pyslackers/slack-sansio/blob/3ff7bbcb55f8b5b26b0300c224e4d305b5caeff2/slack/events.py#L185-L212
import re import copy import json import logging import itertools from typing import Any, Dict, Iterator, Optional from collections import defaultdict from collections.abc import MutableMapping from . import exceptions LOG = logging.getLogger(__name__) class Event(MutableMapping): def __init__( self, raw_event: MutableMapping, metadata: Optional[MutableMapping] = None ) -> None: self.event = raw_event self.metadata = metadata def __getitem__(self, item): return self.event[item] def __setitem__(self, key, value): self.event[key] = value def __delitem__(self, key): del self.event[key] def __iter__(self): return iter(self.event) def __len__(self): return len(self.event) def __repr__(self): return "Slack Event: " + str(self.event) def clone(self) -> "Event": return self.__class__(copy.deepcopy(self.event), copy.deepcopy(self.metadata)) @classmethod def from_rtm(cls, raw_event: MutableMapping) -> "Event": if raw_event["type"].startswith("message"): return Message(raw_event) else: return Event(raw_event) @classmethod def from_http( cls, raw_body: MutableMapping, verification_token: Optional[str] = None, team_id: Optional[str] = None, ) -> "Event": if verification_token and raw_body["token"] != verification_token: raise exceptions.FailedVerification(raw_body["token"], raw_body["team_id"]) if team_id and raw_body["team_id"] != team_id: raise exceptions.FailedVerification(raw_body["token"], raw_body["team_id"]) if raw_body["event"]["type"].startswith("message"): return Message(raw_body["event"], metadata=raw_body) else: return Event(raw_body["event"], metadata=raw_body) class Message(Event): def __init__( self, msg: Optional[MutableMapping] = None, metadata: Optional[MutableMapping] = None, ) -> None: if not msg: msg = {} super().__init__(msg, metadata) def __repr__(self) -> str: return "Slack Message: " + str(self.event) def response(self, in_thread: Optional[bool] = None) -> "Message": data = {"channel": self["channel"]} if in_thread: if "message" in self: data["thread_ts"] = ( self["message"].get("thread_ts") or self["message"]["ts"] ) else: data["thread_ts"] = self.get("thread_ts") or self["ts"] elif in_thread is None: if "message" in self and "thread_ts" in self["message"]: data["thread_ts"] = self["message"]["thread_ts"] elif "thread_ts" in self: data["thread_ts"] = self["thread_ts"] return Message(data) def serialize(self) -> dict: data = {**self} if "attachments" in self: data["attachments"] = json.dumps(self["attachments"]) return data def to_json(self) -> str: return json.dumps({**self}) class EventRouter: def __init__(self): self._routes: Dict[str, Dict] = defaultdict(dict)
MIT License
openstack/senlin
senlin/engine/cluster.py
Cluster.do_check
python
def do_check(self, context, **kwargs): self.set_status(context, consts.CS_CHECKING, 'Check in progress') return True
Additional logic at the beginning of cluster checking process. Set cluster status to CHECKING.
https://github.com/openstack/senlin/blob/390779ca1e08f819683e79993696f945f1c0393e/senlin/engine/cluster.py#L279-L285
from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from senlin.common import consts from senlin.common import exception from senlin.engine import cluster_policy as cpm from senlin.engine import health_manager from senlin.engine import node as node_mod from senlin.objects import cluster as co from senlin.objects import cluster_policy as cpo from senlin.objects import node as no from senlin.policies import base as pcb from senlin.profiles import base as pfb LOG = logging.getLogger(__name__) CONF = cfg.CONF class Cluster(object): def __init__(self, name, desired_capacity, profile_id, context=None, **kwargs): self.id = kwargs.get('id', None) self.name = name self.profile_id = profile_id self.user = kwargs.get('user', '') self.project = kwargs.get('project', '') self.domain = kwargs.get('domain', '') self.init_at = kwargs.get('init_at', None) self.created_at = kwargs.get('created_at', None) self.updated_at = kwargs.get('updated_at', None) self.min_size = (kwargs.get('min_size') or consts.CLUSTER_DEFAULT_MIN_SIZE) self.max_size = (kwargs.get('max_size') or consts.CLUSTER_DEFAULT_MAX_SIZE) self.desired_capacity = desired_capacity self.next_index = kwargs.get('next_index', 1) self.timeout = (kwargs.get('timeout') or cfg.CONF.default_action_timeout) self.status = kwargs.get('status', consts.CS_INIT) self.status_reason = kwargs.get('status_reason', 'Initializing') self.data = kwargs.get('data', {}) self.metadata = kwargs.get('metadata') or {} self.dependents = kwargs.get('dependents') or {} self.config = kwargs.get('config') or {} self.rt = { 'profile': None, 'nodes': [], 'policies': [] } if context is not None: self._load_runtime_data(context) def _load_runtime_data(self, context): if self.id is None: return policies = [] bindings = cpo.ClusterPolicy.get_all(context, self.id) for b in bindings: policy = pcb.Policy.load(context, b.policy_id, project_safe=False) policies.append(policy) self.rt = { 'profile': pfb.Profile.load(context, profile_id=self.profile_id, project_safe=False), 'nodes': no.Node.get_all_by_cluster(context, self.id), 'policies': policies } def store(self, context): values = { 'name': self.name, 'profile_id': self.profile_id, 'user': self.user, 'project': self.project, 'domain': self.domain, 'init_at': self.init_at, 'created_at': self.created_at, 'updated_at': self.updated_at, 'min_size': self.min_size, 'max_size': self.max_size, 'desired_capacity': self.desired_capacity, 'next_index': self.next_index, 'timeout': self.timeout, 'status': self.status, 'status_reason': self.status_reason, 'meta_data': self.metadata, 'data': self.data, 'dependents': self.dependents, 'config': self.config, } timestamp = timeutils.utcnow(True) if self.id: values['updated_at'] = timestamp co.Cluster.update(context, self.id, values) else: self.init_at = timestamp values['init_at'] = timestamp cluster = co.Cluster.create(context, values) self.id = cluster.id self._load_runtime_data(context) return self.id @classmethod def _from_object(cls, context, obj): kwargs = { 'id': obj.id, 'user': obj.user, 'project': obj.project, 'domain': obj.domain, 'init_at': obj.init_at, 'created_at': obj.created_at, 'updated_at': obj.updated_at, 'min_size': obj.min_size, 'max_size': obj.max_size, 'next_index': obj.next_index, 'timeout': obj.timeout, 'status': obj.status, 'status_reason': obj.status_reason, 'data': obj.data, 'metadata': obj.metadata, 'dependents': obj.dependents, 'config': obj.config, } return cls(obj.name, obj.desired_capacity, obj.profile_id, context=context, **kwargs) @classmethod def load(cls, context, cluster_id=None, dbcluster=None, project_safe=True): if dbcluster is None: dbcluster = co.Cluster.get(context, cluster_id, project_safe=project_safe) if dbcluster is None: raise exception.ResourceNotFound(type='cluster', id=cluster_id) return cls._from_object(context, dbcluster) @classmethod def load_all(cls, context, limit=None, marker=None, sort=None, filters=None, project_safe=True): objs = co.Cluster.get_all(context, limit=limit, marker=marker, sort=sort, filters=filters, project_safe=project_safe) for obj in objs: cluster = cls._from_object(context, obj) yield cluster def set_status(self, context, status, reason=None, **kwargs): values = {} now = timeutils.utcnow(True) if status == consts.CS_ACTIVE and self.status == consts.CS_CREATING: self.created_at = now values['created_at'] = now elif (status == consts.CS_ACTIVE and self.status in (consts.CS_UPDATING, consts.CS_RESIZING)): self.updated_at = now values['updated_at'] = now self.status = status values['status'] = status if reason: self.status_reason = reason values['status_reason'] = reason for k, v in kwargs.items(): if hasattr(self, k): setattr(self, k, v) values[k] = v if 'profile_id' in values: profile = pfb.Profile.load(context, profile_id=self.profile_id) self.rt['profile'] = profile co.Cluster.update(context, self.id, values) return def do_create(self, context, **kwargs): if self.status != consts.CS_INIT: LOG.error('Cluster is in status "%s"', self.status) return False self.set_status(context, consts.CS_CREATING, 'Creation in progress') try: pfb.Profile.create_cluster_object(context, self) except exception.EResourceCreation as ex: self.set_status(context, consts.CS_ERROR, str(ex)) return False return True def do_delete(self, context, **kwargs): self.set_status(context, consts.CS_DELETING, 'Deletion in progress') try: pfb.Profile.delete_cluster_object(context, self) except exception.EResourceDeletion as ex: self.set_status(context, consts.CS_ERROR, str(ex)) return False co.Cluster.delete(context, self.id) return True def do_update(self, context, **kwargs): self.set_status(context, consts.CS_UPDATING, 'Update in progress') return True
Apache License 2.0
tmarenko/mff_auto
lib/game/battle_bot.py
BattleBot.skip_tap_the_screen
python
def skip_tap_the_screen(self): if self.emulator.is_ui_element_on_screen(ui_element=ui.SKIP_CUTSCENE) or self.emulator.is_ui_element_on_screen(ui_element=self._skip_tap_screen_high) or self.emulator.is_ui_element_on_screen(ui_element=self._skip_tap_screen_low): logger.debug("Skipping TAP THE SCREEN.") self.emulator.click_button(ui.SKIP_TAP_THE_SCREEN)
Skips TAP SCREEN battle cutscene.
https://github.com/tmarenko/mff_auto/blob/e5d150c4a76d13f05652bbde811c4c5cd0f2246e/lib/game/battle_bot.py#L65-L71
import re from itertools import cycle import lib.logger as logging from lib.functions import wait_until, r_sleep, confirm_condition_by_time from lib.game import ui logger = logging.get_logger(__name__) t3_percentage_regexp = re.compile(r"([0-9][0-9]?\.?[0-9]? ?%?)") class BattleBot: def __init__(self, game, battle_over_conditions, disconnect_conditions=None): self.game = game self.emulator = game.emulator self._is_battle_cached = None self._disconnected = False self._battle_over_conditions = battle_over_conditions if battle_over_conditions else [] self._disconnect_conditions = disconnect_conditions if disconnect_conditions else [] self._skip_tap_screen_high = ui.SKIP_TAP_THE_SCREEN.copy() self._skip_tap_screen_high.text_threshold += 20 self._skip_tap_screen_low = ui.SKIP_TAP_THE_SCREEN.copy() self._skip_tap_screen_low.text_threshold -= 20 def is_battle(self): is_battle = self.emulator.is_image_on_screen(ui.MELEE_BUTTON) self._is_battle_cached = is_battle return is_battle def is_battle_over(self): for condition in self._disconnect_conditions: if condition(): self._disconnected = True return True is_battle = self.is_battle() if not self._is_battle_cached else self._is_battle_cached if not is_battle: for condition in self._battle_over_conditions: if condition(): return True self._is_battle_cached = None return False def skip_cutscene(self): if self.emulator.is_ui_element_on_screen(ui_element=ui.SKIP_CUTSCENE): logger.debug("Skipping cutscene.") self.emulator.click_button(ui.SKIP_CUTSCENE) self.skip_tap_the_screen() self.skip_frost_beast()
Apache License 2.0
wildmeorg/wildbook-ia
wbia/unstable/scorenorm.py
compare_score_pdfs
python
def compare_score_pdfs(testres): import utool as ut ut.ensureqt() testres.draw_annot_scoresep(f='fail=False') encoder = testres.draw_feat_scoresep(f='fail=False', disttype=None) encoder = testres.draw_feat_scoresep(f='fail=False', disttype=['lnbnn', 'fg']) return encoder
CommandLine: python -m wbia.expt.test_result --exec-compare_score_pdfs --show --present python -m wbia.expt.test_result --exec-compare_score_pdfs --show --present --nocache python -m wbia.expt.test_result --exec-compare_score_pdfs --show --present -a timectrl:qindex=0:50 Example: >>> # DISABLE_DOCTEST >>> from wbia.expt.test_result import * # NOQA >>> import wbia >>> defaultdb = 'PZ_MTEST' >>> defaultdb = 'PZ_Master1' >>> ibs, testres = wbia.testdata_expts( >>> defaultdb=defaultdb, a=['timectrl'], t=['best']) >>> testres.compare_score_pdfs() >>> ut.quit_if_noshow() >>> import wbia.plottool as pt >>> ut.show_if_requested()
https://github.com/wildmeorg/wildbook-ia/blob/017057cfd3a2a7ea22f575842c9473e121c66ea4/wbia/unstable/scorenorm.py#L46-L88
import logging import re from wbia import dtool import numpy as np import utool as ut import vtool as vt from functools import partial from os.path import join from wbia import constants as const from wbia.init import sysres print, rrr, profile = ut.inject2(__name__) logger = logging.getLogger('wbia')
Apache License 2.0
pelioniot/mbed-cloud-sdk-python
src/mbed_cloud/_backends/device_directory/models/device_data_post_request.py
DeviceDataPostRequest.description
python
def description(self, description): if description is not None and len(description) > 2000: raise ValueError("Invalid value for `description`, length must be less than or equal to `2000`") self._description = description
Sets the description of this DeviceDataPostRequest. The description of the device. :param description: The description of this DeviceDataPostRequest. :type: str
https://github.com/pelioniot/mbed-cloud-sdk-python/blob/71dc67fc2a8d1aff31e35ec781fb328e6a60639c/src/mbed_cloud/_backends/device_directory/models/device_data_post_request.py#L293-L304
from pprint import pformat from six import iteritems import re class DeviceDataPostRequest(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'auto_update': 'bool', 'bootstrap_expiration_date': 'datetime', 'bootstrapped_timestamp': 'datetime', 'ca_id': 'str', 'connector_expiration_date': 'datetime', 'custom_attributes': 'dict(str, str)', 'deployment': 'str', 'description': 'str', 'device_class': 'str', 'device_execution_mode': 'int', 'device_key': 'str', 'endpoint_name': 'str', 'endpoint_type': 'str', 'firmware_checksum': 'str', 'groups': 'list[str]', 'host_gateway': 'str', 'manifest': 'str', 'mechanism': 'str', 'mechanism_url': 'str', 'name': 'str', 'object': 'str', 'serial_number': 'str', 'state': 'str', 'vendor_id': 'str' } attribute_map = { 'auto_update': 'auto_update', 'bootstrap_expiration_date': 'bootstrap_expiration_date', 'bootstrapped_timestamp': 'bootstrapped_timestamp', 'ca_id': 'ca_id', 'connector_expiration_date': 'connector_expiration_date', 'custom_attributes': 'custom_attributes', 'deployment': 'deployment', 'description': 'description', 'device_class': 'device_class', 'device_execution_mode': 'device_execution_mode', 'device_key': 'device_key', 'endpoint_name': 'endpoint_name', 'endpoint_type': 'endpoint_type', 'firmware_checksum': 'firmware_checksum', 'groups': 'groups', 'host_gateway': 'host_gateway', 'manifest': 'manifest', 'mechanism': 'mechanism', 'mechanism_url': 'mechanism_url', 'name': 'name', 'object': 'object', 'serial_number': 'serial_number', 'state': 'state', 'vendor_id': 'vendor_id' } def __init__(self, auto_update=None, bootstrap_expiration_date=None, bootstrapped_timestamp=None, ca_id=None, connector_expiration_date=None, custom_attributes=None, deployment=None, description=None, device_class=None, device_execution_mode=None, device_key=None, endpoint_name=None, endpoint_type=None, firmware_checksum=None, groups=None, host_gateway=None, manifest=None, mechanism=None, mechanism_url=None, name=None, object=None, serial_number=None, state=None, vendor_id=None): self._auto_update = auto_update self._bootstrap_expiration_date = bootstrap_expiration_date self._bootstrapped_timestamp = bootstrapped_timestamp self._ca_id = ca_id self._connector_expiration_date = connector_expiration_date self._custom_attributes = custom_attributes self._deployment = deployment self._description = description self._device_class = device_class self._device_execution_mode = device_execution_mode self._device_key = device_key self._endpoint_name = endpoint_name self._endpoint_type = endpoint_type self._firmware_checksum = firmware_checksum self._groups = groups self._host_gateway = host_gateway self._manifest = manifest self._mechanism = mechanism self._mechanism_url = mechanism_url self._name = name self._object = object self._serial_number = serial_number self._state = state self._vendor_id = vendor_id self.discriminator = None @property def auto_update(self): return self._auto_update @auto_update.setter def auto_update(self, auto_update): self._auto_update = auto_update @property def bootstrap_expiration_date(self): return self._bootstrap_expiration_date @bootstrap_expiration_date.setter def bootstrap_expiration_date(self, bootstrap_expiration_date): self._bootstrap_expiration_date = bootstrap_expiration_date @property def bootstrapped_timestamp(self): return self._bootstrapped_timestamp @bootstrapped_timestamp.setter def bootstrapped_timestamp(self, bootstrapped_timestamp): self._bootstrapped_timestamp = bootstrapped_timestamp @property def ca_id(self): return self._ca_id @ca_id.setter def ca_id(self, ca_id): if ca_id is not None and len(ca_id) > 500: raise ValueError("Invalid value for `ca_id`, length must be less than or equal to `500`") self._ca_id = ca_id @property def connector_expiration_date(self): return self._connector_expiration_date @connector_expiration_date.setter def connector_expiration_date(self, connector_expiration_date): self._connector_expiration_date = connector_expiration_date @property def custom_attributes(self): return self._custom_attributes @custom_attributes.setter def custom_attributes(self, custom_attributes): self._custom_attributes = custom_attributes @property def deployment(self): return self._deployment @deployment.setter def deployment(self, deployment): self._deployment = deployment @property def description(self): return self._description @description.setter
Apache License 2.0
eugeniy/pytest-tornado
test/create_cert.py
randomBytes
python
def randomBytes(length): if secrets: return secrets.token_bytes(512) else: return os.urandom(512)
Return _length_ random bytes. :rtype: bytes
https://github.com/eugeniy/pytest-tornado/blob/fb163a5952fcc2bc6e9fce211e72dbdc81bb1d0e/test/create_cert.py#L73-L82
import argparse import os import random import socket from tempfile import NamedTemporaryFile from OpenSSL import crypto try: import secrets except ImportError: secrets = None def createCertificate(path): cert = crypto.X509() cert.get_subject().C = "DE" cert.get_subject().ST = "HE" cert.get_subject().L = "Wiesbaden" cert.get_subject().O = "pytest-tornado" cert.get_subject().OU = "Testing Department" cert.get_subject().CN = socket.getfqdn() cert.set_serial_number(random.randint(0, pow(2, 16))) cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(60 * 60) k = crypto.PKey() k.generate_key(crypto.TYPE_RSA, 2048) cert.set_issuer(cert.get_subject()) cert.set_pubkey(k) cert.set_version(2) cert.sign(k, 'sha512') certcontext = b"".join( ( crypto.dump_certificate(crypto.FILETYPE_PEM, cert), crypto.dump_privatekey(crypto.FILETYPE_PEM, k) ) ) with open(path, "wt") as certfile: certfile.write(certcontext.decode()) try: with NamedTemporaryFile(mode="wb", delete=False) as randfile: randfile.write(randomBytes(512)) command = u"openssl dhparam -rand {tempfile} 512 >> {target}".format( tempfile=randfile.name, target=path ) os.system(command) finally: os.remove(randfile.name)
Apache License 2.0
kklmn/xrt
xrt/backends/shadow.py
read_input
python
def read_input(fileName, vtype, *getlines): lines = [] f = None try: f = open(fileName, 'rU') for line in f: lines.append(line.split()) except IOError: print("The file ", fileName, " does not exist or corrupt!") return -1 finally: if f: f.close() results = [] for el in getlines: for line in lines: if line[0].lower() == el.lower(): results.append(vtype(line[2])) break if len(results) == 0: raise Exception( "The parameter(s) %s cannot be found in %s" % (getlines, fileName)) return results
reads a shadow text input file (like ``start.NN``) which consists of lines ``field = value``. Parameters: *fileName*: str *vtype*: {int|str|float} Type of the returned value. *getlines*: list of strings Returns: *results*: list a list of values which correspond to the list *getlines* if successful, otherwise -1. Example: >>> fPolar = read_input('start.00', int, 'f_polar')[0]
https://github.com/kklmn/xrt/blob/b8a9d4a06e1d0c35cf6d5055d6229ae55346856f/xrt/backends/shadow.py#L77-L120
 __author__ = "Konstantin Klementiev, Roman Chernikov" __date__ = "10 Apr 2015" import os import time import numpy as np import subprocess _sourceAsciiFile = 'start.00'
MIT License
pykeen/pykeen
src/pykeen/models/unimodal/conv_e.py
ConvE.__init__
python
def __init__( self, triples_factory: CoreTriplesFactory, input_channels: Optional[int] = None, output_channels: int = 32, embedding_height: Optional[int] = None, embedding_width: Optional[int] = None, kernel_height: int = 3, kernel_width: int = 3, input_dropout: float = 0.2, output_dropout: float = 0.3, feature_map_dropout: float = 0.2, embedding_dim: int = 200, apply_batch_normalization: bool = True, entity_initializer: Hint[Initializer] = xavier_normal_, relation_initializer: Hint[Initializer] = xavier_normal_, **kwargs, ) -> None: if not triples_factory.create_inverse_triples: logger.warning( "\nThe ConvE model should be trained with inverse triples.\n" "This can be done by defining the TriplesFactory class with the _create_inverse_triples_ parameter set " "to true.", ) super().__init__( triples_factory=triples_factory, entity_representations=EmbeddingSpecification( embedding_dim=embedding_dim, initializer=entity_initializer, ), relation_representations=EmbeddingSpecification( embedding_dim=embedding_dim, initializer=relation_initializer, ), **kwargs, ) self.bias_term = Embedding.init_with_device( num_embeddings=self.num_entities, embedding_dim=1, device=self.device, initializer=nn.init.zeros_, ) logger.info(f"Resolving {input_channels} * {embedding_width} * {embedding_height} = {embedding_dim}.") if embedding_dim is None: embedding_dim = input_channels * embedding_width * embedding_height input_channels, embedding_width, embedding_height = _calculate_missing_shape_information( embedding_dim=embedding_dim, input_channels=input_channels, width=embedding_width, height=embedding_height, ) logger.info(f"Resolved to {input_channels} * {embedding_width} * {embedding_height} = {embedding_dim}.") self.embedding_height = embedding_height self.embedding_width = embedding_width self.input_channels = input_channels if self.input_channels * self.embedding_height * self.embedding_width != self.embedding_dim: raise ValueError( f"Product of input channels ({self.input_channels}), height ({self.embedding_height}), and width " f"({self.embedding_width}) does not equal target embedding dimension ({self.embedding_dim})", ) self.inp_drop = nn.Dropout(input_dropout) self.hidden_drop = nn.Dropout(output_dropout) self.feature_map_drop = nn.Dropout2d(feature_map_dropout) self.conv1 = torch.nn.Conv2d( in_channels=self.input_channels, out_channels=output_channels, kernel_size=(kernel_height, kernel_width), stride=1, padding=0, bias=True, ) self.apply_batch_normalization = apply_batch_normalization if self.apply_batch_normalization: self.bn0 = nn.BatchNorm2d(self.input_channels) self.bn1 = nn.BatchNorm2d(output_channels) self.bn2 = nn.BatchNorm1d(self.embedding_dim) else: self.bn0 = None self.bn1 = None self.bn2 = None num_in_features = ( output_channels * (2 * self.embedding_height - kernel_height + 1) * (self.embedding_width - kernel_width + 1) ) self.fc = nn.Linear(num_in_features, self.embedding_dim)
Initialize the model.
https://github.com/pykeen/pykeen/blob/7ee2a6f9bc43200d44c874bfc3de0a747bd3632d/src/pykeen/models/unimodal/conv_e.py#L127-L226
import logging import sys from typing import Any, ClassVar, Mapping, Optional, Type import torch from torch import nn from torch.nn import functional as F from ..base import EntityRelationEmbeddingModel from ...constants import DEFAULT_DROPOUT_HPO_RANGE from ...losses import BCEAfterSigmoidLoss, Loss from ...nn.emb import Embedding, EmbeddingSpecification from ...nn.init import xavier_normal_ from ...nn.modules import _calculate_missing_shape_information from ...triples import CoreTriplesFactory from ...typing import Hint, Initializer from ...utils import is_cudnn_error __all__ = [ "ConvE", ] logger = logging.getLogger(__name__) class ConvE(EntityRelationEmbeddingModel): hpo_default: ClassVar[Mapping[str, Any]] = dict( output_channels=dict(type=int, low=4, high=6, scale="power_two"), input_dropout=DEFAULT_DROPOUT_HPO_RANGE, output_dropout=DEFAULT_DROPOUT_HPO_RANGE, feature_map_dropout=DEFAULT_DROPOUT_HPO_RANGE, ) loss_default: ClassVar[Type[Loss]] = BCEAfterSigmoidLoss loss_default_kwargs: ClassVar[Mapping[str, Any]] = {} bn0: Optional[torch.nn.BatchNorm2d] bn1: Optional[torch.nn.BatchNorm2d] bn2: Optional[torch.nn.BatchNorm1d]
MIT License
martinthoma/lidtk
lidtk/classifiers/char_features.py
FeatureExtractor.transform
python
def transform(self, xs): dist = None if isinstance(xs, (list, np.ndarray, np.generic)): dist = self.transform_multiple(xs) else: dist = self.transform_single(xs) return dist
Get distribution of characters in sample.
https://github.com/martinthoma/lidtk/blob/061f17b0dc0e4a759f883138672a535a22900f6e/lidtk/classifiers/char_features.py#L71-L78
import logging import os import pickle from collections import Counter, defaultdict from typing import Any, Dict, List, Optional, Set import numpy as np import progressbar import lidtk.utils logger = logging.getLogger(__name__) class FeatureExtractor: def __init__(self, xs, ys, coverage: float = 0.8): self.chars = [] self.char2index = {} self.coverage = coverage self.fit(xs, ys) def fit(self, xs: List[str], ys: List[str]): logger.info("count characters") char_counter_by_lang: Dict[str, Counter] = defaultdict(Counter) for x, y in zip(xs, ys): char_counter_by_lang[y] += Counter(x) logger.info(f"get common characters to get coverage of {self.coverage}") common_chars_by_lang = {} for key, character_counter in char_counter_by_lang.items(): common_chars_by_lang[key] = self._get_common_characters( character_counter, coverage=self.coverage ) logger.info("unify set of common characters") common_chars = set() for _lang, char_list in common_chars_by_lang.items(): common_chars = common_chars.union(char_list) common_chars.add("other") self.chars = list(common_chars) for index, char in enumerate(self.chars): self.char2index[char] = index return self
MIT License
mvlearn/mvlearn
tests/embed/test_select_dimension.py
generate_data
python
def generate_data(n=10, elbows=3, seed=1): np.random.seed(seed) x = np.random.binomial(1, 0.6, (n ** 2)).reshape(n, n) xorth = orth(x) d = np.zeros(xorth.shape[0]) for i in range(0, len(d), int(len(d) / (elbows + 1))): d[:i] += 10 A = xorth.T.dot(np.diag(d)).dot(xorth) return A, d
Generate data matrix with a specific number of elbows on scree plot
https://github.com/mvlearn/mvlearn/blob/56a84e073bf8e5e9b0c4c0d9ad72019af1f8cf29/tests/embed/test_select_dimension.py#L9-L20
import numpy as np import pytest from numpy.testing import assert_equal from scipy.linalg import orth from mvlearn.embed.utils import select_dimension
MIT License
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_5/models/host_response.py
HostResponse.__init__
python
def __init__( self, items=None, ): if items is not None: self.items = items
Keyword args: items (list[Host]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_5/models/host_response.py#L43-L52
import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flasharray.FA_2_5 import models class HostResponse(object): swagger_types = { 'items': 'list[Host]' } attribute_map = { 'items': 'items' } required_args = { }
BSD 2-Clause Simplified License
joeyhendricks/quickpotato
QuickPotato/database/operations.py
ContextManager.spawn_connection
python
def spawn_connection(self, database_name): try: engine = self.spawn_engine(database_name) return engine, engine.connect() except Exception: raise DatabaseConnectionCannotBeSpawned()
:param database_name: :return:
https://github.com/joeyhendricks/quickpotato/blob/5e33e64d77997b00a43f5573353138436b1f1a34/QuickPotato/database/operations.py#L30-L41
from QuickPotato.configuration.management import options from QuickPotato.database.schemas import RawStatisticsSchemas, UnitPerformanceTestResultSchemas from sqlalchemy import create_engine from sqlalchemy.exc import ProgrammingError from QuickPotato.utilities.exceptions import DatabaseConnectionCannotBeSpawned, DatabaseSchemaCannotBeSpawned from sqlalchemy_utils import database_exists, create_database, drop_database import tempfile class ContextManager(RawStatisticsSchemas, UnitPerformanceTestResultSchemas): URL = options.connection_url def __init__(self): RawStatisticsSchemas.__init__(self) UnitPerformanceTestResultSchemas.__init__(self) def spawn_engine(self, database_name): try: url = self._validate_connection_url(database_name=database_name) engine = create_engine(url, echo=options.enable_database_echo) return engine except Exception: raise DatabaseConnectionCannotBeSpawned()
MIT License
funcwj/aps
aps/metric/sse.py
aps_pesq
python
def aps_pesq(ref: np.ndarray, est: np.ndarray, fs: int = 16000) -> float: return pesq(ref, est, fs=fs)
Wrapper for pypesq.pesq
https://github.com/funcwj/aps/blob/d7208bca3a2f04e751fcc6e2d3c56964eeb179a5/aps/metric/sse.py#L43-L47
import numpy as np from itertools import permutations from typing import Optional, Callable, Union, Tuple from pypesq import pesq from pystoi import stoi from museval.metrics import bss_eval_images def aps_sisnr(s: np.ndarray, x: np.ndarray, eps: float = 1e-8, remove_dc: bool = True, fs: Optional[int] = None) -> float: def vec_l2norm(x): return np.linalg.norm(x, 2) if remove_dc: x_zm = x - np.mean(x) s_zm = s - np.mean(s) t = np.inner(x_zm, s_zm) * s_zm / (vec_l2norm(s_zm)**2 + eps) n = x_zm - t else: t = np.inner(x, s) * s / (vec_l2norm(s)**2 + eps) n = x - t return 20 * np.log10(vec_l2norm(t) / (vec_l2norm(n) + eps) + eps)
Apache License 2.0
casebeer/python-hkdf
tests.py
check_fun_tv
python
def check_fun_tv(tv): test_prk = hkdf.hkdf_extract(tv["salt"], tv["IKM"], tv["hash"]) test_okm = hkdf.hkdf_expand(test_prk, tv["info"], tv["L"], tv["hash"]) print("%s" % tv) print("PRK: %s" % ("match" if test_prk == tv["PRK"] else "FAIL")) print("OKM: %s" % ("match" if test_okm == tv["OKM"] else "FAIL")) print assert_equals(test_prk, tv["PRK"]) assert_equals(test_okm, tv["OKM"])
Generate and check HKDF pseudorandom key and output key material for a specific test vector PRK = HKDF-Extract([test vector values]) OKM = HKDF-Expand(PRK, [test vector values])
https://github.com/casebeer/python-hkdf/blob/cc3c9dbf0a271b27a7ac5cd04cc1485bbc3b4307/tests.py#L181-L197
from __future__ import print_function import hkdf import codecs import hashlib from binascii import hexlify, unhexlify import sys if sys.version_info[0] == 2: from UserDict import IterableUserDict as UserDict else: from collections import UserDict try: from nose.tools import assert_equals except ImportError as e: def assert_equals(a, b): try: assert a == b except AssertionError: print("AssertionError: {a} != {b}".format(a=a.encode("hex"), b=b.encode("hex"))) raise class TestCase(UserDict): def __str__(self): def format_(bytes_, max_len=4): if bytes_ is None: return u"None" else: return u'"{prefix}{rest}"'.format( prefix=hexlify(bytes_[:max_len]), rest=u"..." if len(bytes_) > max_len else u"" ) return u"""{name} ({algo}, IKM={ikm_start}, salt={salt_start})""".format( name=self.get("name", u"Unnamed test case"), algo=self["hash"]().name, ikm_start=format_(self["IKM"]), salt_start=format_(self["salt"]) ) __repr__ = __str__ def decode_hex(s): return codecs.decode(s, "hex_codec") test_vectors = {} test_vectors[1] = TestCase({ "name" : "A.1 Test Case 1", "hash" : hashlib.sha256, "IKM" : decode_hex("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"), "salt" : decode_hex("000102030405060708090a0b0c"), "info" : decode_hex("f0f1f2f3f4f5f6f7f8f9"), "L" : 42, "PRK" : decode_hex("077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5"), "OKM" : decode_hex("3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865"), }) test_vectors[2] = TestCase({ "name" : "A.2 Test Case 2", "hash" : hashlib.sha256, "IKM" : decode_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f"), "salt" : decode_hex("606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf"), "info" : decode_hex("b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff"), "L" : 82, "PRK" : decode_hex("06a6b88c5853361a06104c9ceb35b45cef760014904671014a193f40c15fc244"), "OKM" : decode_hex("b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87"), }) test_vectors[3] = TestCase({ "name" : "A.3 Test Case 3", "hash" : hashlib.sha256, "IKM" : decode_hex("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"), "salt" : b"", "info" : b"", "L" : 42, "PRK" : decode_hex("19ef24a32c717b167f33a91d6f648bdf96596776afdb6377ac434c1c293ccb04"), "OKM" : decode_hex("8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8"), }) test_vectors[4] = TestCase({ "name" : "A.4 Test Case 4", "hash" : hashlib.sha1, "IKM" : decode_hex("0b0b0b0b0b0b0b0b0b0b0b"), "salt" : decode_hex("000102030405060708090a0b0c"), "info" : decode_hex("f0f1f2f3f4f5f6f7f8f9"), "L" : 42, "PRK" : decode_hex("9b6c18c432a7bf8f0e71c8eb88f4b30baa2ba243"), "OKM" : decode_hex("085a01ea1b10f36933068b56efa5ad81a4f14b822f5b091568a9cdd4f155fda2c22e422478d305f3f896"), }) test_vectors[5] = TestCase({ "name" : "A.5 Test Case 5", "hash" : hashlib.sha1, "IKM" : decode_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f"), "salt" : decode_hex("606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf"), "info" : decode_hex("b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff"), "L" : 82, "PRK" : decode_hex("8adae09a2a307059478d309b26c4115a224cfaf6"), "OKM" : decode_hex("0bd770a74d1160f7c9f12cd5912a06ebff6adcae899d92191fe4305673ba2ffe8fa3f1a4e5ad79f3f334b3b202b2173c486ea37ce3d397ed034c7f9dfeb15c5e927336d0441f4c4300e2cff0d0900b52d3b4"), }) test_vectors[6] = TestCase({ "name" : "A.6 Test Case 6", "hash" : hashlib.sha1, "IKM" : decode_hex("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"), "salt" : b"", "info" : b"", "L" : 42, "PRK" : decode_hex("da8c8a73c7fa77288ec6f5e7c297786aa0d32d01"), "OKM" : decode_hex("0ac1af7002b3d761d1e55298da9d0506b9ae52057220a306e07b6b87e8df21d0ea00033de03984d34918"), }) test_vectors[7] = TestCase({ "name" : "A.7 Test Case 7", "hash" : hashlib.sha1, "IKM" : decode_hex("0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c"), "salt" : None, "info" : b"", "L" : 42, "PRK" : decode_hex("2adccada18779e7c2077ad2eb19d3f3e731385dd"), "OKM" : decode_hex("2c91117204d745f3500d636a62f64f0ab3bae548aa53d423b0d1f27ebba6f5e5673a081d70cce7acfc48"), }) def tv_extract(tv_number): tv = test_vectors[tv_number] return hkdf.hkdf_extract(tv["salt"], tv["IKM"], tv["hash"]) def tv_expand(tv_number): tv = test_vectors[tv_number] test_prk = hkdf.hkdf_extract(tv["salt"], tv["IKM"], tv["hash"]) return hkdf.hkdf_expand(test_prk, tv["info"], tv["L"], tv["hash"]) def test_functional_interface(): for tv in test_vectors.values(): yield check_fun_tv, tv def test_wrapper_class(): for tv in test_vectors.values(): yield check_class_tv, tv
BSD 2-Clause Simplified License
codeforamerica/pittsburgh-purchasing-suite
purchasing/opportunities/models.py
Opportunity.estimate_submission_end
python
def estimate_submission_end(self): return pytz.UTC.localize(self.planned_submission_end).astimezone( current_app.config['DISPLAY_TIMEZONE'] ).strftime('%B %d, %Y at %I:%M%p %Z')
Returns the localized date and time based on submission end date
https://github.com/codeforamerica/pittsburgh-purchasing-suite/blob/9552eda6df396746feedc9ce45f35842a716de6a/purchasing/opportunities/models.py#L312-L317
import pytz import datetime from flask import current_app from purchasing.database import Column, Model, db, ReferenceCol from purchasing.utils import localize_today, localize_now from sqlalchemy.schema import Table from sqlalchemy.orm import backref from sqlalchemy.dialects.postgres import ARRAY from sqlalchemy.dialects.postgresql import TSVECTOR from purchasing.notifications import Notification from purchasing.utils import build_downloadable_groups, random_id from purchasing.users.models import User, Role category_vendor_association_table = Table( 'category_vendor_association', Model.metadata, Column('category_id', db.Integer, db.ForeignKey('category.id', ondelete='SET NULL'), index=True), Column('vendor_id', db.Integer, db.ForeignKey('vendor.id', ondelete='SET NULL'), index=True) ) category_opportunity_association_table = Table( 'category_opportunity_association', Model.metadata, Column('category_id', db.Integer, db.ForeignKey('category.id', ondelete='SET NULL'), index=True), Column('opportunity_id', db.Integer, db.ForeignKey('opportunity.id', ondelete='SET NULL'), index=True) ) opportunity_vendor_association_table = Table( 'opportunity_vendor_association_table', Model.metadata, Column('opportunity_id', db.Integer, db.ForeignKey('opportunity.id', ondelete='SET NULL'), index=True), Column('vendor_id', db.Integer, db.ForeignKey('vendor.id', ondelete='SET NULL'), index=True) ) class Category(Model): __tablename__ = 'category' id = Column(db.Integer, primary_key=True, index=True) nigp_codes = Column(ARRAY(db.Integer())) category = Column(db.String(255)) subcategory = Column(db.String(255)) category_friendly_name = Column(db.Text) examples = Column(db.Text) examples_tsv = Column(TSVECTOR) def __unicode__(self): return '{sub} (in {main})'.format(sub=self.category_friendly_name, main=self.category) @classmethod def parent_category_query_factory(cls): return db.session.query(db.distinct(cls.category).label('category')).order_by('category') @classmethod def query_factory(cls): return cls.query class Opportunity(Model): __tablename__ = 'opportunity' id = Column(db.Integer, primary_key=True) title = Column(db.String(255)) description = Column(db.Text) planned_publish = Column(db.DateTime, nullable=False) planned_submission_start = Column(db.DateTime, nullable=False) planned_submission_end = Column(db.DateTime, nullable=False) vendor_documents_needed = Column(ARRAY(db.Integer())) is_public = Column(db.Boolean(), default=False) is_archived = Column(db.Boolean(), default=False, nullable=False) published_at = Column(db.DateTime, nullable=True) publish_notification_sent = Column(db.Boolean, default=False, nullable=False) department_id = ReferenceCol('department', ondelete='SET NULL', nullable=True) department = db.relationship( 'Department', backref=backref('opportunities', lazy='dynamic') ) contact_id = ReferenceCol('users', ondelete='SET NULL') contact = db.relationship( 'User', backref=backref('opportunities', lazy='dynamic'), foreign_keys='Opportunity.contact_id' ) categories = db.relationship( 'Category', secondary=category_opportunity_association_table, backref='opportunities', collection_class=set ) created_from_id = ReferenceCol('contract', ondelete='cascade', nullable=True) opportunity_type_id = ReferenceCol('contract_type', ondelete='SET NULL', nullable=True) opportunity_type = db.relationship( 'ContractType', backref=backref('opportunities', lazy='dynamic'), ) @classmethod def create(cls, data, user, documents, publish=False): opportunity = Opportunity(**data) current_app.logger.info( .format( opportunity.id, opportunity.department.name if opportunity.department else '', opportunity.title.encode('ascii', 'ignore'), str(opportunity.planned_publish), str(opportunity.planned_submission_start), str(opportunity.planned_submission_end) ) ) if not (user.is_conductor() or publish): opportunity.notify_approvals(user) opportunity._handle_uploads(documents) opportunity._publish(publish) return opportunity def raw_update(self, **kwargs): super(Opportunity, self).update(**kwargs) def update(self, data, user, documents, publish=False): data.pop('publish_notification_sent', None) for attr, value in data.iteritems(): setattr(self, attr, value) current_app.logger.info( .format( self.id, self.title.encode('ascii', 'ignore'), str(self.planned_publish), str(self.planned_submission_start), str(self.planned_submission_end) ) ) self._handle_uploads(documents) self._publish(publish) @property def is_published(self): return self.coerce_to_date(self.planned_publish) <= localize_today() and self.is_public @property def is_upcoming(self): return self.coerce_to_date(self.planned_publish) <= localize_today() and not self.is_submission_start and not self.is_submission_end and self.is_public @property def is_submission_start(self): return self.coerce_to_date(self.planned_submission_start) <= localize_today() and self.coerce_to_date(self.planned_publish) <= localize_today() and not self.is_submission_end and self.is_public @property def is_submission_end(self): return pytz.UTC.localize(self.planned_submission_end).astimezone( current_app.config['DISPLAY_TIMEZONE'] ) <= localize_now() and self.is_public @property def has_docs(self): return self.opportunity_documents.count() > 0 def estimate_submission_start(self): return self.planned_submission_start.strftime('%B %d, %Y')
BSD 3-Clause New or Revised License
onitu/onitu
onitu/utils.py
get_available_drivers
python
def get_available_drivers(): entry_points = pkg_resources.iter_entry_points('onitu.drivers') return {e.name: e for e in entry_points}
Return a dict mapping the name of each installed driver with its entry point. You can use it like that: ``` drivers = get_available_drivers() if 'local_storage' in drivers: local_storage = drivers['local_storage'].load() ```
https://github.com/onitu/onitu/blob/33c928a55de8f79098aa5931efb8d1ee17a6fbb6/onitu/utils.py#L181-L194
import os import sys import uuid import string import signal import socket import random import tempfile import mimetypes import traceback import pkg_resources PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 IS_WINDOWS = os.name == 'nt' TMPDIR = tempfile.gettempdir() NAMESPACE_ONITU = uuid.UUID('bcd336f2-d023-4856-bc92-e79dd24b64d7') UNICODE = unicode if PY2 else str def b(chars): if type(chars) == UNICODE: return chars.encode('utf-8') return chars def u(chars): if type(chars) == bytes: return chars.decode('utf-8') return chars def n(string): return (b if PY2 else u)(string) def at_exit(callback, *args, **kwargs): if IS_WINDOWS: signals = (signal.SIGILL, signal.SIGABRT, signal.SIGINT, signal.SIGTERM) else: signals = (signal.SIGINT, signal.SIGTERM, signal.SIGQUIT) for s in signals: signal.signal(s, lambda *_, **__: callback(*args, **kwargs)) def get_fid(folder, filename): folder = n(folder) filename = n(filename) return str(uuid.uuid5(NAMESPACE_ONITU, "{}:{}".format(folder, filename))) def get_mimetype(filename): mimetype = mimetypes.guess_type(filename)[0] if not mimetype: mimetype = 'application/octet-stream' return mimetype def get_random_string(length): return ''.join( random.sample(string.ascii_letters + string.digits, length) ) if IS_WINDOWS: def _get_uri(session, name): sock_file = os.path.join( TMPDIR, u'onitu-{}-{}.txt' ).format(session, name) if os.path.exists(sock_file): with open(sock_file) as f: return f.read() tmpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) tmpsock.bind(('127.0.0.1', 0)) uri = 'tcp://{}:{}'.format(*tmpsock.getsockname()) tmpsock.close() with open(sock_file, 'w+') as f: f.write(uri) return uri def delete_sock_files(): import glob for sock_file in glob.glob(os.path.join(TMPDIR, 'onitu-*.txt')): os.unlink(sock_file) else: def _get_uri(session, name): return u'ipc://{}/onitu-{}-{}.sock'.format(TMPDIR, session, name) def get_escalator_uri(session): return _get_uri(session, 'escalator') def get_events_uri(session, name, suffix=None): if suffix: name = u"{}:{}".format(name, suffix) return _get_uri(session, name) def get_brocker_uri(session): return _get_uri(session, 'brocker') def get_logs_uri(session): return _get_uri(session, 'logs') def get_circusctl_endpoint(session): return _get_uri(session, 'circusctl') def get_pubsub_endpoint(session): return _get_uri(session, 'pubsub') def get_stats_endpoint(session): return _get_uri(session, 'stats')
MIT License
meejah/txtorcon
txtorcon/socks.py
_TorSocksFactory._get_address
python
def _get_address(self): return self._when_connected.when_fired()
Returns a Deferred that fires with the transport's getHost() when this SOCKS protocol becomes connected.
https://github.com/meejah/txtorcon/blob/7da6ad6f91c395951be1b4e7e1011baa2f7a689f/txtorcon/socks.py#L562-L567
from __future__ import print_function import six import struct from socket import inet_pton, inet_ntoa, inet_aton, AF_INET6, AF_INET from twisted.internet.defer import inlineCallbacks, returnValue, Deferred from twisted.internet.protocol import Protocol, Factory from twisted.internet.address import IPv4Address, IPv6Address, HostnameAddress from twisted.python.failure import Failure from twisted.protocols import portforward from twisted.protocols import tls from twisted.internet.interfaces import IStreamClientEndpoint from zope.interface import implementer import ipaddress import automat from txtorcon import util __all__ = ( 'resolve', 'resolve_ptr', 'SocksError', 'GeneralServerFailureError', 'ConnectionNotAllowedError', 'NetworkUnreachableError', 'HostUnreachableError', 'ConnectionRefusedError', 'TtlExpiredError', 'CommandNotSupportedError', 'AddressTypeNotSupportedError', 'TorSocksEndpoint', ) def _create_ip_address(host, port): if not isinstance(host, six.text_type): raise ValueError( "'host' must be {}, not {}".format(six.text_type, type(host)) ) try: a = ipaddress.ip_address(host) except ValueError: a = None if isinstance(a, ipaddress.IPv4Address): return IPv4Address('TCP', host, port) if isinstance(a, ipaddress.IPv6Address): return IPv6Address('TCP', host, port) addr = HostnameAddress(host, port) addr.host = host return addr class _SocksMachine(object): _machine = automat.MethodicalMachine() SUCCEEDED = 0x00 REPLY_IPV4 = 0x01 REPLY_HOST = 0x03 REPLY_IPV6 = 0x04 def __init__(self, req_type, host, port=0, on_disconnect=None, on_data=None, create_connection=None): if req_type not in self._dispatch: raise ValueError( "Unknown request type '{}'".format(req_type) ) if req_type == 'CONNECT' and create_connection is None: raise ValueError( "create_connection function required for '{}'".format( req_type ) ) if not isinstance(host, (bytes, str, six.text_type)): raise ValueError( "'host' must be text (not {})".format(type(host)) ) self._req_type = req_type self._addr = _create_ip_address(six.text_type(host), port) self._data = b'' self._on_disconnect = on_disconnect self._create_connection = create_connection self._on_data = on_data self._outgoing_data = [] self._sender = None self._when_done = util.SingleObserver() def when_done(self): return self._when_done.when_fired() def _data_to_send(self, data): if self._on_data: self._on_data(data) else: self._outgoing_data.append(data) def send_data(self, callback): while len(self._outgoing_data): data = self._outgoing_data.pop(0) callback(data) def feed_data(self, data): self._data += data self.got_data() @_machine.output() def _parse_version_reply(self): if len(self._data) >= 2: reply = self._data[:2] self._data = self._data[2:] (version, method) = struct.unpack('BB', reply) if version == 5 and method in [0x00, 0x02]: self.version_reply(method) else: if version != 5: self.version_error(SocksError( "Expected version 5, got {}".format(version))) else: self.version_error(SocksError( "Wanted method 0 or 2, got {}".format(method))) def _parse_ipv4_reply(self): if len(self._data) >= 10: addr = inet_ntoa(self._data[4:8]) port = struct.unpack('H', self._data[8:10])[0] self._data = self._data[10:] if self._req_type == 'CONNECT': self.reply_ipv4(addr, port) else: self.reply_domain_name(addr) def _parse_ipv6_reply(self): if len(self._data) >= 22: addr = self._data[4:20] port = struct.unpack('H', self._data[20:22])[0] self._data = self._data[22:] self.reply_ipv6(addr, port) def _parse_domain_name_reply(self): assert len(self._data) >= 8 addrlen = struct.unpack('B', self._data[4:5])[0] if len(self._data) < (5 + addrlen + 2): return addr = self._data[5:5 + addrlen] self._data = self._data[5 + addrlen + 2:] self.reply_domain_name(addr) @_machine.output() def _parse_request_reply(self): if len(self._data) < 8: return msg = self._data[:4] (version, reply, _, typ) = struct.unpack('BBBB', msg) if version != 5: self.reply_error(SocksError( "Expected version 5, got {}".format(version))) return if reply != self.SUCCEEDED: self.reply_error(_create_socks_error(reply)) return reply_dispatcher = { self.REPLY_IPV4: self._parse_ipv4_reply, self.REPLY_HOST: self._parse_domain_name_reply, self.REPLY_IPV6: self._parse_ipv6_reply, } try: method = reply_dispatcher[typ] except KeyError: self.reply_error(SocksError( "Unexpected response type {}".format(typ))) return method() @_machine.output() def _make_connection(self, addr, port): sender = self._create_connection(addr, port) self._sender = sender self._when_done.fire(sender) @_machine.output() def _domain_name_resolved(self, domain): self._when_done.fire(domain) @_machine.input() def connection(self): @_machine.input() def disconnected(self, error): @_machine.input() def got_data(self): @_machine.input() def version_reply(self, auth_method): @_machine.input() def version_error(self, error): @_machine.input() def reply_error(self, error): @_machine.input() def reply_ipv4(self, addr, port): @_machine.input() def reply_ipv6(self, addr, port): @_machine.input() def reply_domain_name(self, domain): @_machine.input() def answer(self): @_machine.output() def _send_version(self): self._data_to_send( struct.pack('BBB', 5, 1, 0) ) @_machine.output() def _disconnect(self, error): if self._on_disconnect: self._on_disconnect(str(error)) if self._sender: self._sender.connectionLost(Failure(error)) self._when_done.fire(Failure(error)) @_machine.output() def _send_request(self, auth_method): assert auth_method == 0x00 return self._dispatch[self._req_type](self) @_machine.output() def _relay_data(self): if self._data: d = self._data self._data = b'' self._sender.dataReceived(d) def _send_connect_request(self): host = self._addr.host port = self._addr.port if isinstance(self._addr, (IPv4Address, IPv6Address)): is_v6 = isinstance(self._addr, IPv6Address) self._data_to_send( struct.pack( '!BBBB4sH', 5, 0x01, 0x00, 0x04 if is_v6 else 0x01, inet_pton(AF_INET6 if is_v6 else AF_INET, host), port, ) ) else: host = host.encode('ascii') self._data_to_send( struct.pack( '!BBBBB{}sH'.format(len(host)), 5, 0x01, 0x00, 0x03, len(host), host, port, ) ) @_machine.output() def _send_resolve_request(self): host = self._addr.host.encode() self._data_to_send( struct.pack( '!BBBBB{}sH'.format(len(host)), 5, 0xF0, 0x00, 0x03, len(host), host, 0, ) ) @_machine.output() def _send_resolve_ptr_request(self): addr_type = 0x04 if isinstance(self._addr, ipaddress.IPv4Address) else 0x01 encoded_host = inet_aton(self._addr.host) self._data_to_send( struct.pack( '!BBBB4sH', 5, 0xF1, 0x00, addr_type, encoded_host, 0, ) ) @_machine.state(initial=True) def unconnected(self): @_machine.state() def sent_version(self): @_machine.state() def sent_request(self): @_machine.state() def relaying(self): @_machine.state() def abort(self, error_message): @_machine.state() def done(self): unconnected.upon( connection, enter=sent_version, outputs=[_send_version], ) sent_version.upon( got_data, enter=sent_version, outputs=[_parse_version_reply], ) sent_version.upon( version_error, enter=abort, outputs=[_disconnect], ) sent_version.upon( version_reply, enter=sent_request, outputs=[_send_request], ) sent_version.upon( disconnected, enter=unconnected, outputs=[_disconnect] ) sent_request.upon( got_data, enter=sent_request, outputs=[_parse_request_reply], ) sent_request.upon( reply_ipv4, enter=relaying, outputs=[_make_connection], ) sent_request.upon( reply_ipv6, enter=relaying, outputs=[_make_connection], ) sent_request.upon( reply_domain_name, enter=done, outputs=[_domain_name_resolved], ) sent_request.upon( reply_error, enter=abort, outputs=[_disconnect], ) sent_request.upon( disconnected, enter=abort, outputs=[_disconnect], ) relaying.upon( got_data, enter=relaying, outputs=[_relay_data], ) relaying.upon( disconnected, enter=done, outputs=[_disconnect], ) abort.upon( got_data, enter=abort, outputs=[], ) abort.upon( disconnected, enter=abort, outputs=[], ) done.upon( disconnected, enter=done, outputs=[], ) _dispatch = { 'CONNECT': _send_connect_request, 'RESOLVE': _send_resolve_request, 'RESOLVE_PTR': _send_resolve_ptr_request, } class _TorSocksProtocol(Protocol): def __init__(self, host, port, socks_method, factory): self._machine = _SocksMachine( req_type=socks_method, host=host, port=port, on_disconnect=self._on_disconnect, on_data=self._on_data, create_connection=self._create_connection, ) self._factory = factory def when_done(self): return self._machine.when_done() def connectionMade(self): self._machine.connection() self.factory._did_connect(self.transport.getHost()) def connectionLost(self, reason): self._machine.disconnected(SocksError(reason)) def dataReceived(self, data): self._machine.feed_data(data) def _on_data(self, data): self.transport.write(data) def _create_connection(self, addr, port): addr = IPv4Address('TCP', addr, port) sender = self._factory.buildProtocol(addr) client_proxy = portforward.ProxyClient() sender.makeConnection(self.transport) setattr(sender, 'setPeer', lambda _: None) client_proxy.setPeer(sender) self._sender = sender return sender def _on_disconnect(self, error_message): self.transport.loseConnection() class _TorSocksFactory(Factory): protocol = _TorSocksProtocol def __init__(self, *args, **kw): self._args = args self._kw = kw self._host = None self._when_connected = util.SingleObserver()
MIT License
rustychris/stompy
stompy/model/hydro_model.py
BC.get_inward_normal
python
def get_inward_normal(self,grid_edge=None): if grid_edge is None: grid_edge=self.grid_edge assert grid_edge is not None return self.model.grid.edges_normals(grid_edge,force_inward=True)
Query the grid based on self.grid_edge to find the unit normal vector for this velocity BC, positive pointing into the domain.
https://github.com/rustychris/stompy/blob/ef04d8b3ee9c9af827c87c72c7b50d365e5e567d/stompy/model/hydro_model.py#L99-L108
import os,shutil,glob,inspect import six import logging log=logging.getLogger('HydroModel') import copy import numpy as np import xarray as xr from shapely import geometry import stompy.model.delft.io as dio from stompy import xr_utils from stompy.io.local import noaa_coops, hycom from stompy import utils, filters, memoize from stompy.spatial import wkb2shp, proj_utils import stompy.grid.unstructured_grid as ugrid import re class BC(object): name=None standard_name=None _geom=None geom_type=[] grid_edge=None grid_cell=None grid_edges=None grid_cells=None mode='overwrite' on_insufficient_data='exception' pad=np.timedelta64(24,'h') def __init__(self,name,model=None,**kw): self.model=model self.name=name self.filters=[] utils.set_keywords(self,kw) for f in self.filters: f.setup(self) @property def geom(self): if (self._geom is None) and (self.model is not None): kw={} if self.geom_type is not None: kw['geom_type']=self.geom_type self._geom=self.model.get_geometry(name=self.name,**kw) return self._geom @geom.setter def geom(self,g): if isinstance(g,np.ndarray): if g.ndim==1: g=geometry.Point(g) elif g.ndim==2: g=geometry.LineString(g) else: raise Exception("Not sure how to convert %s to a shapely geometry"%g) self._geom=g
MIT License
kuri65536/python-for-android
python-build/python-libs/gdata/build/lib/gdata/Crypto/PublicKey/qNEW.py
qNEWobj.publickey
python
def publickey(self): return construct((self.p, self.q, self.g, self.y))
Return a new key object containing only the public information.
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-build/python-libs/gdata/build/lib/gdata/Crypto/PublicKey/qNEW.py#L165-L167
__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $" from Crypto.PublicKey import pubkey from Crypto.Util.number import * from Crypto.Hash import SHA class error (Exception): pass HASHBITS = 160 def generate(bits, randfunc, progress_func=None): obj=qNEWobj() if progress_func: progress_func('p,q\n') while (1): obj.q = getPrime(160, randfunc) obj.seed = S = long_to_bytes(obj.q) C, N, V = 0, 2, {} n= (bits-1) / HASHBITS b= (bits-1) % HASHBITS ; powb=2L << b powL1=pow(long(2), bits-1) while C<4096: for k in range(0, n+1): V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest()) p = V[n] % powb for k in range(n-1, -1, -1): p= (p << long(HASHBITS) )+V[k] p = p+powL1 p = p - (p % (2*obj.q)-1) if powL1<=p and isPrime(p): break C, N = C+1, N+n+1 if C<4096: break if progress_func: progress_func('4096 values of p tried\n') obj.p = p power=(p-1)/obj.q if progress_func: progress_func('h,g\n') while (1): h=bytes_to_long(randfunc(bits)) % (p-1) g=pow(h, power, p) if 1<h<p-1 and g>1: break obj.g=g if progress_func: progress_func('x,y\n') while (1): x=bytes_to_long(randfunc(20)) if 0 < x < obj.q: break obj.x, obj.y=x, pow(g, x, p) return obj def construct(tuple): obj=qNEWobj() if len(tuple) not in [4,5]: raise error, 'argument for construct() wrong length' for i in range(len(tuple)): field = obj.keydata[i] setattr(obj, field, tuple[i]) return obj class qNEWobj(pubkey.pubkey): keydata=['p', 'q', 'g', 'y', 'x'] def _sign(self, M, K=''): if (self.q<=K): raise error, 'K is greater than q' if M<0: raise error, 'Illegal value of M (<0)' if M>=pow(2,161L): raise error, 'Illegal value of M (too large)' r=pow(self.g, K, self.p) % self.q s=(K- (r*M*self.x % self.q)) % self.q return (r,s) def _verify(self, M, sig): r, s = sig if r<=0 or r>=self.q or s<=0 or s>=self.q: return 0 if M<0: raise error, 'Illegal value of M (<0)' if M<=0 or M>=pow(2,161L): return 0 v1 = pow(self.g, s, self.p) v2 = pow(self.y, M*r, self.p) v = ((v1*v2) % self.p) v = v % self.q if v==r: return 1 return 0 def size(self): return 160 def has_private(self): return hasattr(self, 'x') def can_sign(self): return 1 def can_encrypt(self): return 0
Apache License 2.0
brython-dev/brython
www/src/Lib/http/client.py
HTTPResponse.getheader
python
def getheader(self, name, default=None): if self.headers is None: raise ResponseNotReady() headers = self.headers.get_all(name) or default if isinstance(headers, str) or not hasattr(headers, '__iter__'): return headers else: return ', '.join(headers)
Returns the value of the header matching *name*. If there are multiple matching headers, the values are combined into a single string separated by commas and spaces. If no matching header is found, returns *default* or None if the *default* is not specified. If the headers are unknown, raises http.client.ResponseNotReady.
https://github.com/brython-dev/brython/blob/33aeaab551f1b73209326c5a0aecf98642d4c126/www/src/Lib/http/client.py#L696-L714
import email.parser import email.message import http import io import re import socket import collections.abc from urllib.parse import urlsplit __all__ = ["HTTPResponse", "HTTPConnection", "HTTPException", "NotConnected", "UnknownProtocol", "UnknownTransferEncoding", "UnimplementedFileMode", "IncompleteRead", "InvalidURL", "ImproperConnectionState", "CannotSendRequest", "CannotSendHeader", "ResponseNotReady", "BadStatusLine", "LineTooLong", "RemoteDisconnected", "error", "responses"] HTTP_PORT = 80 HTTPS_PORT = 443 _UNKNOWN = 'UNKNOWN' _CS_IDLE = 'Idle' _CS_REQ_STARTED = 'Request-started' _CS_REQ_SENT = 'Request-sent' globals().update(http.HTTPStatus.__members__) responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()} _MAXLINE = 65536 _MAXHEADERS = 100 _is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*').fullmatch _is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search _contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f]') _contains_disallowed_method_pchar_re = re.compile('[\x00-\x1f]') _METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'} def _encode(data, name='data'): try: return data.encode("latin-1") except UnicodeEncodeError as err: raise UnicodeEncodeError( err.encoding, err.object, err.start, err.end, "%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') " "if you want to send it encoded in UTF-8." % (name.title(), data[err.start:err.end], name)) from None class HTTPMessage(email.message.Message): def getallmatchingheaders(self, name): name = name.lower() + ':' n = len(name) lst = [] hit = 0 for line in self.keys(): if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: lst.append(line) return lst def parse_headers(fp, _class=HTTPMessage): headers = [] while True: line = fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("header line") headers.append(line) if len(headers) > _MAXHEADERS: raise HTTPException("got more than %d headers" % _MAXHEADERS) if line in (b'\r\n', b'\n', b''): break hstring = b''.join(headers).decode('iso-8859-1') return email.parser.Parser(_class=_class).parsestr(hstring) class HTTPResponse(io.BufferedIOBase): def __init__(self, sock, debuglevel=0, method=None, url=None): self.fp = sock.makefile("rb") self.debuglevel = debuglevel self._method = method self.headers = self.msg = None self.version = _UNKNOWN self.status = _UNKNOWN self.reason = _UNKNOWN self.chunked = _UNKNOWN self.chunk_left = _UNKNOWN self.length = _UNKNOWN self.will_close = _UNKNOWN def _read_status(self): line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1") if len(line) > _MAXLINE: raise LineTooLong("status line") if self.debuglevel > 0: print("reply:", repr(line)) if not line: raise RemoteDisconnected("Remote end closed connection without" " response") try: version, status, reason = line.split(None, 2) except ValueError: try: version, status = line.split(None, 1) reason = "" except ValueError: version = "" if not version.startswith("HTTP/"): self._close_conn() raise BadStatusLine(line) try: status = int(status) if status < 100 or status > 999: raise BadStatusLine(line) except ValueError: raise BadStatusLine(line) return version, status, reason def begin(self): if self.headers is not None: return while True: version, status, reason = self._read_status() if status != CONTINUE: break while True: skip = self.fp.readline(_MAXLINE + 1) if len(skip) > _MAXLINE: raise LineTooLong("header line") skip = skip.strip() if not skip: break if self.debuglevel > 0: print("header:", skip) self.code = self.status = status self.reason = reason.strip() if version in ("HTTP/1.0", "HTTP/0.9"): self.version = 10 elif version.startswith("HTTP/1."): self.version = 11 else: raise UnknownProtocol(version) self.headers = self.msg = parse_headers(self.fp) if self.debuglevel > 0: for hdr, val in self.headers.items(): print("header:", hdr + ":", val) tr_enc = self.headers.get("transfer-encoding") if tr_enc and tr_enc.lower() == "chunked": self.chunked = True self.chunk_left = None else: self.chunked = False self.will_close = self._check_close() self.length = None length = self.headers.get("content-length") tr_enc = self.headers.get("transfer-encoding") if length and not self.chunked: try: self.length = int(length) except ValueError: self.length = None else: if self.length < 0: self.length = None else: self.length = None if (status == NO_CONTENT or status == NOT_MODIFIED or 100 <= status < 200 or self._method == "HEAD"): self.length = 0 if (not self.will_close and not self.chunked and self.length is None): self.will_close = True def _check_close(self): conn = self.headers.get("connection") if self.version == 11: if conn and "close" in conn.lower(): return True return False if self.headers.get("keep-alive"): return False if conn and "keep-alive" in conn.lower(): return False pconn = self.headers.get("proxy-connection") if pconn and "keep-alive" in pconn.lower(): return False return True def _close_conn(self): fp = self.fp self.fp = None fp.close() def close(self): try: super().close() finally: if self.fp: self._close_conn() def flush(self): super().flush() if self.fp: self.fp.flush() def readable(self): return True def isclosed(self): return self.fp is None def read(self, amt=None): if self.fp is None: return b"" if self._method == "HEAD": self._close_conn() return b"" if amt is not None: b = bytearray(amt) n = self.readinto(b) return memoryview(b)[:n].tobytes() else: if self.chunked: return self._readall_chunked() if self.length is None: s = self.fp.read() else: try: s = self._safe_read(self.length) except IncompleteRead: self._close_conn() raise self.length = 0 self._close_conn() return s def readinto(self, b): if self.fp is None: return 0 if self._method == "HEAD": self._close_conn() return 0 if self.chunked: return self._readinto_chunked(b) if self.length is not None: if len(b) > self.length: b = memoryview(b)[0:self.length] n = self.fp.readinto(b) if not n and b: self._close_conn() elif self.length is not None: self.length -= n if not self.length: self._close_conn() return n def _read_next_chunk_size(self): line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("chunk size") i = line.find(b";") if i >= 0: line = line[:i] try: return int(line, 16) except ValueError: self._close_conn() raise def _read_and_discard_trailer(self): while True: line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("trailer line") if not line: break if line in (b'\r\n', b'\n', b''): break def _get_chunk_left(self): chunk_left = self.chunk_left if not chunk_left: if chunk_left is not None: self._safe_read(2) try: chunk_left = self._read_next_chunk_size() except ValueError: raise IncompleteRead(b'') if chunk_left == 0: self._read_and_discard_trailer() self._close_conn() chunk_left = None self.chunk_left = chunk_left return chunk_left def _readall_chunked(self): assert self.chunked != _UNKNOWN value = [] try: while True: chunk_left = self._get_chunk_left() if chunk_left is None: break value.append(self._safe_read(chunk_left)) self.chunk_left = 0 return b''.join(value) except IncompleteRead: raise IncompleteRead(b''.join(value)) def _readinto_chunked(self, b): assert self.chunked != _UNKNOWN total_bytes = 0 mvb = memoryview(b) try: while True: chunk_left = self._get_chunk_left() if chunk_left is None: return total_bytes if len(mvb) <= chunk_left: n = self._safe_readinto(mvb) self.chunk_left = chunk_left - n return total_bytes + n temp_mvb = mvb[:chunk_left] n = self._safe_readinto(temp_mvb) mvb = mvb[n:] total_bytes += n self.chunk_left = 0 except IncompleteRead: raise IncompleteRead(bytes(b[0:total_bytes])) def _safe_read(self, amt): data = self.fp.read(amt) if len(data) < amt: raise IncompleteRead(data, amt-len(data)) return data def _safe_readinto(self, b): amt = len(b) n = self.fp.readinto(b) if n < amt: raise IncompleteRead(bytes(b[:n]), amt-n) return n def read1(self, n=-1): if self.fp is None or self._method == "HEAD": return b"" if self.chunked: return self._read1_chunked(n) if self.length is not None and (n < 0 or n > self.length): n = self.length result = self.fp.read1(n) if not result and n: self._close_conn() elif self.length is not None: self.length -= len(result) return result def peek(self, n=-1): if self.fp is None or self._method == "HEAD": return b"" if self.chunked: return self._peek_chunked(n) return self.fp.peek(n) def readline(self, limit=-1): if self.fp is None or self._method == "HEAD": return b"" if self.chunked: return super().readline(limit) if self.length is not None and (limit < 0 or limit > self.length): limit = self.length result = self.fp.readline(limit) if not result and limit: self._close_conn() elif self.length is not None: self.length -= len(result) return result def _read1_chunked(self, n): chunk_left = self._get_chunk_left() if chunk_left is None or n == 0: return b'' if not (0 <= n <= chunk_left): n = chunk_left read = self.fp.read1(n) self.chunk_left -= len(read) if not read: raise IncompleteRead(b"") return read def _peek_chunked(self, n): try: chunk_left = self._get_chunk_left() except IncompleteRead: return b'' if chunk_left is None: return b'' return self.fp.peek(chunk_left)[:chunk_left] def fileno(self): return self.fp.fileno()
BSD 3-Clause New or Revised License
trailbehind/deeposm
bin/update_deeposmorg.py
main
python
def main(): naip_year = 2013 naip_states = {'de': ['http://download.geofabrik.de/north-america/us/delaware-latest.osm.pbf'], 'ia': ['http://download.geofabrik.de/north-america/us/iowa-latest.osm.pbf'], 'me': ['http://download.geofabrik.de/north-america/us/maine-latest.osm.pbf'] } number_of_naips = 175 extract_type = 'highway' bands = [1, 1, 1, 1] tile_size = 64 pixels_to_fatten_roads = 3 tile_overlap = 1 neural_net = 'two_layer_relu_conv' number_of_epochs = 10 randomize_naips = False for state in naip_states: filenames = naip_states[state] raster_data_paths = download_and_serialize(number_of_naips, randomize_naips, state, naip_year, extract_type, bands, tile_size, pixels_to_fatten_roads, filenames, tile_overlap) model = train_on_cached_data(neural_net, number_of_epochs) with open(CACHE_PATH + METADATA_PATH, 'r') as infile: training_info = pickle.load(infile) post_findings_to_s3(raster_data_paths, model, training_info, training_info['bands'], False) requests.get('http://www.deeposm.org/refresh_findings/')
Analyze each state and publish results to deeposm.org.
https://github.com/trailbehind/deeposm/blob/4361273723ba271105e2401ecc1707556816f20c/bin/update_deeposmorg.py#L11-L48
import pickle import requests from src.s3_client_deeposm import post_findings_to_s3 from src.single_layer_network import train_on_cached_data from src.training_data import CACHE_PATH, METADATA_PATH, download_and_serialize
MIT License
zulko/moviepy
moviepy/video/VideoClip.py
VideoClip.write_gif
python
def write_gif( self, filename, fps=None, program="imageio", opt="nq", fuzz=1, loop=0, dispose=False, colors=None, tempfiles=False, logger="bar", pixel_format=None, ): if program == "imageio": write_gif_with_image_io( self, filename, fps=fps, opt=opt, loop=loop, colors=colors, logger=logger, ) elif tempfiles: opt = "optimizeplus" if opt == "nq" else "OptimizeTransparency" write_gif_with_tempfiles( self, filename, fps=fps, program=program, opt=opt, fuzz=fuzz, loop=loop, dispose=dispose, colors=colors, logger=logger, pixel_format=pixel_format, ) else: opt = "optimizeplus" if opt == "nq" else "OptimizeTransparency" write_gif( self, filename, fps=fps, program=program, opt=opt, fuzz=fuzz, loop=loop, dispose=dispose, colors=colors, logger=logger, pixel_format=pixel_format, )
Write the VideoClip to a GIF file. Converts a VideoClip into an animated GIF using ImageMagick or ffmpeg. Parameters ---------- filename Name of the resulting gif file, as a string or a path-like object. fps Number of frames per second (see note below). If it isn't provided, then the function will look for the clip's ``fps`` attribute (VideoFileClip, for instance, have one). program Software to use for the conversion, either 'imageio' (this will use the library FreeImage through ImageIO), or 'ImageMagick', or 'ffmpeg'. opt Optimalization to apply. If program='imageio', opt must be either 'wu' (Wu) or 'nq' (Neuquant). If program='ImageMagick', either 'optimizeplus' or 'OptimizeTransparency'. fuzz (ImageMagick only) Compresses the GIF by considering that the colors that are less than fuzz% different are in fact the same. tempfiles Writes every frame to a file instead of passing them in the RAM. Useful on computers with little RAM. Can only be used with ImageMagick' or 'ffmpeg'. progress_bar If True, displays a progress bar pixel_format Pixel format for the output gif file. If is not specified 'rgb24' will be used as the default format unless ``clip.mask`` exist, then 'rgba' will be used. This option is only going to be accepted if ``program=ffmpeg`` or when ``tempfiles=True`` Notes ----- The gif will be playing the clip in real time (you can only change the frame rate). If you want the gif to be played slower than the clip you will use :: >>> # slow down clip 50% and make it a gif >>> myClip.multiply_speed(0.5).to_gif('myClip.gif')
https://github.com/zulko/moviepy/blob/576fb1ab980ea53212675a775e059e1460488672/moviepy/video/VideoClip.py#L460-L576
import copy as _copy import os import subprocess as sp import tempfile import numpy as np import proglog from imageio import imread, imsave from PIL import Image from moviepy.Clip import Clip from moviepy.config import IMAGEMAGICK_BINARY from moviepy.decorators import ( add_mask_if_none, apply_to_mask, convert_masks_to_RGB, convert_parameter_to_seconds, convert_path_to_string, outplace, requires_duration, requires_fps, use_clip_fps_by_default, ) from moviepy.tools import ( cross_platform_popen_params, extensions_dict, find_extension, subprocess_call, ) from moviepy.video.io.ffmpeg_writer import ffmpeg_write_video from moviepy.video.io.gif_writers import ( write_gif, write_gif_with_image_io, write_gif_with_tempfiles, ) from moviepy.video.tools.drawing import blit class VideoClip(Clip): def __init__( self, make_frame=None, is_mask=False, duration=None, has_constant_size=True ): super().__init__() self.mask = None self.audio = None self.pos = lambda t: (0, 0) self.relative_pos = False self.layer = 0 if make_frame: self.make_frame = make_frame self.size = self.get_frame(0).shape[:2][::-1] self.is_mask = is_mask self.has_constant_size = has_constant_size if duration is not None: self.duration = duration self.end = duration @property def w(self): return self.size[0] @property def h(self): return self.size[1] @property def aspect_ratio(self): return self.w / float(self.h) @property @requires_duration @requires_fps def n_frames(self): return int(self.duration * self.fps) def __copy__(self): cls = self.__class__ new_clip = cls.__new__(cls) for attr in self.__dict__: value = getattr(self, attr) if attr in ("mask", "audio"): value = _copy.copy(value) setattr(new_clip, attr, value) return new_clip copy = __copy__ @convert_parameter_to_seconds(["t"]) @convert_masks_to_RGB def save_frame(self, filename, t=0, with_mask=True): im = self.get_frame(t) if with_mask and self.mask is not None: mask = 255 * self.mask.get_frame(t) im = np.dstack([im, mask]).astype("uint8") else: im = im.astype("uint8") imsave(filename, im) @requires_duration @use_clip_fps_by_default @convert_masks_to_RGB @convert_path_to_string(["filename", "temp_audiofile", "temp_audiofile_path"]) def write_videofile( self, filename, fps=None, codec=None, bitrate=None, audio=True, audio_fps=44100, preset="medium", audio_nbytes=4, audio_codec=None, audio_bitrate=None, audio_bufsize=2000, temp_audiofile=None, temp_audiofile_path="", remove_temp=True, write_logfile=False, threads=None, ffmpeg_params=None, logger="bar", pixel_format=None, ): name, ext = os.path.splitext(os.path.basename(filename)) ext = ext[1:].lower() logger = proglog.default_bar_logger(logger) if codec is None: try: codec = extensions_dict[ext]["codec"][0] except KeyError: raise ValueError( "MoviePy couldn't find the codec associated " "with the filename. Provide the 'codec' " "parameter in write_videofile." ) if audio_codec is None: if ext in ["ogv", "webm"]: audio_codec = "libvorbis" else: audio_codec = "libmp3lame" elif audio_codec == "raw16": audio_codec = "pcm_s16le" elif audio_codec == "raw32": audio_codec = "pcm_s32le" audiofile = audio if isinstance(audio, str) else None make_audio = ( (audiofile is None) and (audio is True) and (self.audio is not None) ) if make_audio and temp_audiofile: audiofile = temp_audiofile elif make_audio: audio_ext = find_extension(audio_codec) audiofile = os.path.join( temp_audiofile_path, name + Clip._TEMP_FILES_PREFIX + "wvf_snd.%s" % audio_ext, ) logger(message="Moviepy - Building video %s." % filename) if make_audio: self.audio.write_audiofile( audiofile, audio_fps, audio_nbytes, audio_bufsize, audio_codec, bitrate=audio_bitrate, write_logfile=write_logfile, logger=logger, ) ffmpeg_write_video( self, filename, fps, codec, bitrate=bitrate, preset=preset, write_logfile=write_logfile, audiofile=audiofile, threads=threads, ffmpeg_params=ffmpeg_params, logger=logger, pixel_format=pixel_format, ) if remove_temp and make_audio: if os.path.exists(audiofile): os.remove(audiofile) logger(message="Moviepy - video ready %s" % filename) @requires_duration @use_clip_fps_by_default @convert_masks_to_RGB def write_images_sequence( self, name_format, fps=None, with_mask=True, logger="bar" ): logger = proglog.default_bar_logger(logger) timings = np.arange(0, self.duration, 1.0 / fps) filenames = [] for i, t in logger.iter_bar(t=list(enumerate(timings))): name = name_format % i filenames.append(name) self.save_frame(name, t, with_mask=with_mask) return filenames @requires_duration @convert_masks_to_RGB @convert_path_to_string("filename")
MIT License
mrkipling/maraschino
lib/rtorrent/rpc/__init__.py
Multicall.call
python
def call(self): m = xmlrpclib.MultiCall(self.rt_obj._get_conn()) for call in self.calls: method, args = call rpc_call = getattr(method, "rpc_call") getattr(m, rpc_call)(*args) results = m() results = tuple(results) results_processed = [] for r, c in zip(results, self.calls): method = c[0] result = process_result(method, r) results_processed.append(result) exists = hasattr(self.class_obj, method.varname) if not exists or not inspect.ismethod(getattr(self.class_obj, method.varname)): setattr(self.class_obj, method.varname, result) return(tuple(results_processed))
Execute added multicall calls @return: the results (post-processed), in the order they were added @rtype: tuple
https://github.com/mrkipling/maraschino/blob/c6be9286937783ae01df2d6d8cebfc8b2734a7d7/lib/rtorrent/rpc/__init__.py#L156-L181
import inspect import rtorrent import re from rtorrent.common import bool_to_int, convert_version_tuple_to_str, safe_repr from rtorrent.err import MethodError from rtorrent.compat import xmlrpclib def get_varname(rpc_call): r = re.search( "([ptdf]\.|system\.|get\_|is\_|set\_)+([^=]*)", rpc_call, re.I) if r: return(r.groups()[-1]) else: return(None) def _handle_unavailable_rpc_method(method, rt_obj): msg = "Method isn't available." if rt_obj._get_client_version_tuple() < method.min_version: msg = "This method is only available in " "RTorrent version v{0} or later".format( convert_version_tuple_to_str(method.min_version)) raise MethodError(msg) class DummyClass: def __init__(self): pass class Method: def __init__(self, _class, method_name, rpc_call, docstring=None, varname=None, **kwargs): self._class = _class self.class_name = _class.__name__ self.method_name = method_name self.rpc_call = rpc_call self.docstring = docstring self.varname = varname self.min_version = kwargs.get("min_version", ( 0, 0, 0)) self.boolean = kwargs.get("boolean", False) self.post_process_func = kwargs.get( "post_process_func", None) self.aliases = kwargs.get( "aliases", []) self.required_args = [] self.method_type = self._get_method_type() if self.varname is None: self.varname = get_varname(self.rpc_call) assert self.varname is not None, "Couldn't get variable name." def __repr__(self): return safe_repr("Method(method_name='{0}', rpc_call='{1}')", self.method_name, self.rpc_call) def _get_method_type(self): if self.method_name[:4] == "set_": return('m') else: return('r') def is_modifier(self): if self.method_type == 'm': return(True) else: return(False) def is_retriever(self): if self.method_type == 'r': return(True) else: return(False) def is_available(self, rt_obj): if rt_obj._get_client_version_tuple() < self.min_version or self.rpc_call not in rt_obj._get_rpc_methods(): return(False) else: return(True) class Multicall: def __init__(self, class_obj, **kwargs): self.class_obj = class_obj if class_obj.__class__.__name__ == "RTorrent": self.rt_obj = class_obj else: self.rt_obj = class_obj._rt_obj self.calls = [] def add(self, method, *args): if isinstance(method, str): result = find_method(method) if result == -1: method = Method(DummyClass, method, method) else: method = result if not method.is_available(self.rt_obj): _handle_unavailable_rpc_method(method, self.rt_obj) self.calls.append((method, args)) def list_calls(self): for c in self.calls: print(c)
MIT License
blurstudio/cross3d
cross3d/abstract/abstractscenewrapper.py
AbstractSceneWrapper._setNativeController
python
def _setNativeController(self, name, nativeController): return False
\remarks find a controller for this object based on the inputed name \param name <str> \param <variant> nativeController || None \return <bool> success
https://github.com/blurstudio/cross3d/blob/277968d1227de740fc87ef61005c75034420eadf/cross3d/abstract/abstractscenewrapper.py#L101-L108
import cross3d from cross3d import UserProps, abstractmethod from cross3d.constants import ControllerType, PointerTypes class AbstractSceneWrapper(object): def __eq__(self, other): if (isinstance(other, AbstractSceneWrapper)): return other._nativePointer == self._nativePointer return False def __hash__(self): return self.uniqueId() def __call__(self, retType=PointerTypes.Pointer): return self._nativePointer def __init__(self, scene, nativePointer=None): super(AbstractSceneWrapper, self).__init__() self._scene = scene self._nativePointer = nativePointer def __str__(self): return '<%s (%s)>' % (super(AbstractSceneWrapper, self).__str__().split()[0].split('.')[-1], self.displayName()) @abstractmethod def _nativeControllers(self): return [] @abstractmethod def _nativeController(self, name): return None @abstractmethod def _nativeCopy(self): return None @abstractmethod def _nativeProperty(self, key, default=None): return default @abstractmethod
MIT License
simphony/osp-core
osp/core/utils/wrapper_development.py
create_from_triples
python
def create_from_triples(triples, neighbor_triples, session, fix_neighbors=True): from osp.core.utils.general import uid_from_iri from osp.core.cuds import Cuds from osp.core.session.wrapper_session import WrapperSession triples = list(triples) if not triples: return None uid = uid_from_iri(triples[0][0]) if isinstance(session, WrapperSession) and uid in session._expired: session._expired.remove(uid) if uid in session._registry: cuds_object = session._registry.get(uid) cuds_object.session._notify_read(cuds_object) if fix_neighbors: rels = set(cuds_object._neighbors.keys()) for rel in rels: cuds_object.remove(rel=rel) session.graph.remove((cuds_object.iri, None, None)) for triple in set(triples): session.graph.add(triple) else: cuds_object = Cuds(attributes={}, oclass=None, session=session, uid=uid, extra_triples=set(triples)) for triple in set(neighbor_triples): session.graph.add(triple) if isinstance(session, WrapperSession): session._store_checks(cuds_object) cuds_object.session._notify_update(cuds_object) return cuds_object
Create a CUDS object from triples. Args: triples (List[Tuple]): The list of triples of the CUDS object to create. neighbor_triples (List[Tuple]): A list of important triples of neighbors, most importantly their types. session (Session): The session to create the CUDS object in. fix_neighbors (bool): Whether to remove the link from the old neighbors to this cuds object, defaults to True.
https://github.com/simphony/osp-core/blob/19f233ebe6c40e92884aa07bf498304d772f7f27/osp/core/utils/wrapper_development.py#L195-L243
import rdflib from copy import deepcopy from osp.core.ontology.datatypes import convert_to from osp.core.utils.general import get_relationships_between from osp.core.namespaces import cuba def check_arguments(types, *args): for arg in args: if not isinstance(arg, types): message = '{!r} is not a correct object of allowed types {}' raise TypeError(message.format(arg, types)) def get_neighbor_diff(cuds1, cuds2, mode="all"): allowed_modes = ["all", "active", "non-active"] if mode not in allowed_modes: raise ValueError("Illegal mode specified. Choose one of %s" % allowed_modes) if cuds1 is None: return [] result = list() for relationship in cuds1._neighbors.keys(): if (( mode == "active" and not relationship.is_subclass_of(cuba.activeRelationship) ) or ( mode == "non-active" and relationship.is_subclass_of(cuba.activeRelationship) )): continue old_neighbor_uids = set() if cuds2 is not None and relationship in cuds2._neighbors: old_neighbor_uids = cuds2._neighbors[relationship].keys() new_neighbor_uids = list( cuds1._neighbors[relationship].keys() - old_neighbor_uids) result += list(zip(new_neighbor_uids, [relationship] * len(new_neighbor_uids))) return result def clone_cuds_object(cuds_object): if cuds_object is None: return None session = cuds_object._session clone = deepcopy(cuds_object) clone._session = session return clone def create_recycle(oclass, kwargs, session, uid, fix_neighbors=True, _force=False): from osp.core.session.wrapper_session import WrapperSession from osp.core.cuds import Cuds uid = convert_to(uid, "UID") if isinstance(session, WrapperSession) and uid in session._expired: session._expired.remove(uid) if uid in session._registry: cuds_object = session._registry.get(uid) for rel in set(cuds_object._neighbors.keys()): if not fix_neighbors: del cuds_object._neighbors[rel] else: cuds_object.remove(rel=rel) change_oclass(cuds_object, oclass, kwargs, _force=_force) else: if oclass is not None: cuds_object = oclass(uid=uid, session=session, **kwargs, _force=_force) else: cuds_object = Cuds(uid=uid, session=session, **kwargs) return cuds_object def create_from_cuds_object(cuds_object, session): assert cuds_object.session is not session kwargs = {k.argname: v for k, v in cuds_object.get_attributes().items()} clone = create_recycle(oclass=cuds_object.oclass, kwargs=kwargs, session=session, uid=cuds_object.uid, fix_neighbors=False) for rel, target_dict in cuds_object._neighbors.items(): clone._neighbors[rel] = {} for uid, target_oclass in target_dict.items(): clone._neighbors[rel][uid] = target_oclass return clone def change_oclass(cuds_object, new_oclass, kwargs, _force=False): cuds_object.session._notify_read(cuds_object) if cuds_object.oclass != new_oclass: for neighbor in cuds_object.get(rel=cuba.relationship): for rel in get_relationships_between(cuds_object, neighbor): neighbor._neighbors[rel.inverse][cuds_object.uid] = [new_oclass] attributes = new_oclass._get_attributes_values(kwargs, _force=_force) cuds_object._graph.remove((cuds_object.iri, None, None)) cuds_object._graph.add(( cuds_object.iri, rdflib.RDF.type, new_oclass.iri )) for k, v in attributes.items(): cuds_object._graph.set(( cuds_object.iri, k.iri, rdflib.Literal(k.convert_to_datatype(v), datatype=k.datatype) )) cuds_object.session._notify_update(cuds_object)
BSD 3-Clause New or Revised License
dcos/dcos
release/storage/aws.py
S3StorageProvider.__init__
python
def __init__(self, bucket, object_prefix, download_url, access_key_id=None, secret_access_key=None, region_name=None): if object_prefix is not None: assert object_prefix and not object_prefix.startswith('/') and not object_prefix.endswith('/') self.__session = get_aws_session(access_key_id, secret_access_key, region_name) self.__bucket = self.__session.resource('s3').Bucket(bucket) self.__object_prefix = object_prefix self.__url = download_url
If access_key_id and secret_acccess_key are unset, boto3 will try to authenticate by other methods. See here for other credential options: http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials
https://github.com/dcos/dcos/blob/79b9a39b4e639dc2c9435a869918399b50bfaf24/release/storage/aws.py#L33-L45
from typing import Optional import boto3 import botocore from release.storage import AbstractStorageProvider def get_aws_session(access_key_id, secret_access_key, region_name=None): if not access_key_id: access_key_id = None if not secret_access_key: secret_access_key = None return boto3.session.Session( aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, region_name=region_name) class S3StorageProvider(AbstractStorageProvider): name = 'aws'
Apache License 2.0
galactics/beyond
beyond/orbits/statevector.py
Infos.parabolic
python
def parabolic(self): return self.kep.e == 1
True if the orbit it parabolic
https://github.com/galactics/beyond/blob/1326a03ef2c918244078c48f8878705510a8eb1d/beyond/orbits/statevector.py#L370-L372
import numpy as np from textwrap import indent from ..constants import c from ..dates import timedelta from ..errors import OrbitError from .forms import get_form, Form from ..frames.frames import get_frame, orbit2frame from .man import Man from .cov import Cov class StateVector(np.ndarray): def __new__(cls, coord, date, form, frame, **kwargs): if len(coord) != 6: raise OrbitError("Should be 6 in length") if isinstance(form, str): form = get_form(form) if isinstance(frame, str): frame = get_frame(frame) obj = np.ndarray.__new__( cls, (6,), buffer=np.array([float(x) for x in coord]), dtype=float ) obj._data = kwargs obj._data["date"] = date obj._data["form"] = form obj._data["frame"] = frame return obj def __array_finalize__(self, obj): if obj is None: return self._data = obj._data.copy() def __reduce__(self): reconstruct, clsinfo, state = super().__reduce__() new_state = { "basestate": state, "data": self._data, } return reconstruct, clsinfo, new_state def __setstate__(self, state): super().__setstate__(state["basestate"]) self._data = state["data"] def copy(self, *, frame=None, form=None, same=None): new_compl = {} for k, v in self._data.items(): new_compl[k] = v.copy() if hasattr(v, "copy") else v new_obj = self.__class__(self.base, **new_compl) if same is not None: if hasattr(same, "frame") and hasattr(same, "form"): frame = same.frame form = same.form else: raise TypeError("'same' does not have a frame and/or a form attribute") if frame and frame != self.frame: new_obj.frame = frame if form and form != self.form: new_obj.form = form return new_obj def __getattr__(self, name): name = Form.alt.get(name, name) if name in self.form.param_names: i = self.form.param_names.index(name) res = self[i] elif name in self._data.keys(): res = self._data[name] else: raise AttributeError(f"'{self.__class__}' object has no attribute {name!r}") return res def __getitem__(self, key): if isinstance(key, (int, slice)): return super().__getitem__(key) else: try: return self.__getattr__(key) except AttributeError as err: raise KeyError(str(err)) def __str__(self): return str(self.base) def __repr__(self): coord_str = "\n".join( [ " %s = %s" % (name, arg) for name, arg in zip(self.form.param_names, self) ] ) fmt = f""" StateVector = date = {self.date} form = {self.form} frame = {self.frame} coord = {coord_str} """ if self.cov is not None: fmt += indent(repr(self.cov), " " * 2) return fmt @property def date(self): return self._data["date"] @date.setter def date(self, value): self._data["date"] = value @property def event(self): return self._data.get("event") @event.setter def event(self, value): self._data["event"] = value @property def cov(self): if "cov" not in self._data.keys(): self._data["cov"] = None return self._data["cov"] @cov.setter def cov(self, value): if not isinstance(value, Cov): raise TypeError(f"Unknwon covariance type : {type(value)}") self._data["cov"] = value self._data["cov"].orb = self @cov.deleter def cov(self): self._data["cov"] = None @property def maneuvers(self): mans = self._data.setdefault("maneuvers", []) if isinstance(mans, Man): mans = [mans] self._data["maneuvers"] = mans return mans @maneuvers.setter def maneuvers(self, mans): if isinstance(mans, Man): mans = [mans] self._data["maneuvers"] = mans @maneuvers.deleter def maneuvers(self): del self._data["maneuvers"] @property def form(self): return self._data["form"] @form.setter def form(self, new_form): if isinstance(new_form, str): new_form = get_form(new_form) self.base.setfield(self._data["form"](self, new_form), dtype=float) self._data["form"] = new_form @property def frame(self): return self._data["frame"] @frame.setter def frame(self, new_frame): old_form = self.form old_frame = self.frame if isinstance(new_frame, str): new_frame = get_frame(new_frame) if new_frame != self.frame: self.form = "cartesian" try: new_coord = self.frame.transform(self, new_frame) self.base.setfield(new_coord, dtype=float) self._data["frame"] = new_frame finally: self.form = old_form if self.cov is not None and self.cov.frame == old_frame: self.cov.frame = new_frame def as_frame(self, name, **kwargs): return orbit2frame(name, self, **kwargs) def as_orbit(self, propagator): from .orbit import Orbit new_dict = self._data.copy() new_dict["propagator"] = propagator return Orbit(self.base, **new_dict) @property def infos(self): if not hasattr(self, "_infos"): self._data["infos"] = Infos(self) return self._data["infos"] class Infos: def __init__(self, orb): self.orb = orb @property def kep(self): if not hasattr(self, "_kep"): self._kep = self.orb.copy(form="keplerian") return self._kep @property def sphe(self): if not hasattr(self, "_sphe"): self._sphe = self.orb.copy(form="spherical") return self._sphe @property def mu(self): return self.orb.frame.center.body.mu @property def type(self): for t in "elliptic hyperbolic parabolic".split(): if getattr(self, t): return t @property def elliptic(self): return self.kep.e < 1 @property
MIT License
anymesh/anymesh-python
example/urwid/wimp.py
CheckBox.get_label
python
def get_label(self): return self._label.text
Return label text. >>> cb = CheckBox(u"Seriously") >>> print cb.get_label() Seriously >>> print cb.label Seriously >>> cb.set_label([('bright_attr', u"flashy"), u" normal"]) >>> print cb.label # only text is returned flashy normal
https://github.com/anymesh/anymesh-python/blob/017b7808f2fbdc765604488d325678c28be438c0/example/urwid/wimp.py#L182-L195
from urwid.widget import (Text, WidgetWrap, delegate_to_widget_mixin, BOX, FLOW) from urwid.canvas import CompositeCanvas from urwid.signals import connect_signal from urwid.container import Columns, Overlay from urwid.util import is_mouse_press from urwid.text_layout import calc_coords from urwid.signals import disconnect_signal from urwid.split_repr import python3_repr from urwid.decoration import WidgetDecoration from urwid.command_map import ACTIVATE class SelectableIcon(Text): _selectable = True def __init__(self, text, cursor_position=1): self.__super.__init__(text) self._cursor_position = cursor_position def render(self, size, focus=False): c = self.__super.render(size, focus) if focus: c = CompositeCanvas(c) c.cursor = self.get_cursor_coords(size) return c def get_cursor_coords(self, size): if self._cursor_position > len(self.text): return None (maxcol,) = size trans = self.get_line_translation(maxcol) x, y = calc_coords(self.text, trans, self._cursor_position) if maxcol <= x: return None return x, y def keypress(self, size, key): return key class CheckBoxError(Exception): pass class CheckBox(WidgetWrap): def sizing(self): return frozenset([FLOW]) states = { True: SelectableIcon("[X]"), False: SelectableIcon("[ ]"), 'mixed': SelectableIcon("[#]") } reserve_columns = 4 signals = ["change"] def __init__(self, label, state=False, has_mixed=False, on_state_change=None, user_data=None): self.__super.__init__(None) self._label = Text("") self.has_mixed = has_mixed self._state = None if on_state_change: connect_signal(self, 'change', on_state_change, user_data) self.set_label(label) self.set_state(state) def _repr_words(self): return self.__super._repr_words() + [ python3_repr(self.label)] def _repr_attrs(self): return dict(self.__super._repr_attrs(), state=self.state) def set_label(self, label): self._label.set_text(label)
MIT License
castagnait/plugin.video.netflix
resources/lib/services/nfsession/msl/msl_utils.py
update_play_times_duration
python
def update_play_times_duration(play_times, player_state): duration = player_state['elapsed_seconds'] * 1000 play_times['total'] = duration play_times['audio'][0]['duration'] = duration play_times['video'][0]['duration'] = duration
Update the playTimes duration values
https://github.com/castagnait/plugin.video.netflix/blob/1c68c7d4c399603a5dcbeef1e7637de7a9036a72/resources/lib/services/nfsession/msl/msl_utils.py#L76-L81
import json import random import time from functools import wraps from urllib.parse import urlencode import xbmcgui import resources.lib.kodi.ui as ui from resources.lib import common from resources.lib.common.exceptions import MSLError from resources.lib.database.db_utils import TABLE_SESSION from resources.lib.globals import G from resources.lib.utils.esn import get_esn CHROME_BASE_URL = 'https://www.netflix.com/nq/msl_v1/cadmium/' CHROME_PLAYAPI_URL = 'https://www.netflix.com/msl/playapi/cadmium/' ENDPOINTS = { 'manifest': CHROME_BASE_URL + 'pbo_manifests/%5E1.0.0/router', 'license': CHROME_BASE_URL + 'pbo_licenses/%5E1.0.0/router', 'events': CHROME_PLAYAPI_URL + 'event/1', 'logblobs': CHROME_PLAYAPI_URL + 'logblob/1' } MSL_DATA_FILENAME = 'msl_data.json' EVENT_START = 'start' EVENT_STOP = 'stop' EVENT_KEEP_ALIVE = 'keepAlive' EVENT_ENGAGE = 'engage' EVENT_BIND = 'bind' AUDIO_CHANNELS_CONV = {1: '1.0', 2: '2.0', 6: '5.1', 8: '7.1'} def display_error_info(func): @wraps(func) def error_catching_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as exc: message = f'{exc.__class__.__name__}: {exc}' ui.show_error_info(common.get_local_string(30028), message, unknown_error=not message, netflix_error=isinstance(exc, MSLError)) raise return error_catching_wrapper def is_media_changed(previous_player_state, player_state): if not previous_player_state: return True if player_state['currentvideostream'] != previous_player_state['currentvideostream'] or player_state['currentaudiostream'] != previous_player_state['currentaudiostream']: return True return False
MIT License
everware/everware
setup.py
get_data_files
python
def get_data_files(): data_files = [] ntrim = len(here) + 1 for (d, dirs, filenames) in os.walk(static): data_files.append(( d[ntrim:], [ pjoin(d, f) for f in filenames ] )) return data_files
Get data files in share/jupyter
https://github.com/everware/everware/blob/aefa4a993da6ea11b22122c04ebec04d700835ad/setup.py#L165-L176
from __future__ import print_function import os import sys import shutil from subprocess import check_call from glob import glob v = sys.version_info if v[:2] < (3,3): error = "ERROR: Jupyter Hub requires Python version 3.3 or above." print(error, file=sys.stderr) sys.exit(1) def mtime(path): return os.stat(path).st_mtime if os.name in ('nt', 'dos'): error = "ERROR: Windows is not supported" print(error, file=sys.stderr) from distutils.core import setup pjoin = os.path.join here = os.path.abspath(os.path.dirname(__file__)) from distutils.cmd import Command from distutils.command.build_py import build_py from distutils.command.sdist import sdist npm_path = ':'.join([ pjoin(here, 'node_modules', '.bin'), os.environ.get("PATH", os.defpath), ]) here = os.path.abspath(os.path.dirname(__file__)) share = pjoin(here, 'share') static = pjoin(share, 'static') class BaseCommand(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def get_inputs(self): return [] def get_outputs(self): return [] class Bower(BaseCommand): description = "fetch static client-side components with bower" user_options = [] bower_dir = pjoin(static, 'components') node_modules = pjoin(here, 'node_modules') def should_run(self): if not os.path.exists(self.bower_dir): return True return mtime(self.bower_dir) < mtime(pjoin(here, 'bower.json')) def should_run_npm(self): if not shutil.which('npm'): print("npm unavailable", file=sys.stderr) return False if not os.path.exists(self.node_modules): return True return mtime(self.node_modules) < mtime(pjoin(here, 'package.json')) def run(self): if not self.should_run(): print("bower dependencies up to date") return if self.should_run_npm(): print("installing build dependencies with npm") check_call(['npm', 'install'], cwd=here) os.utime(self.node_modules) env = os.environ.copy() env['PATH'] = npm_path try: check_call( ['bower', 'install', '--allow-root', '--config.interactive=false'], cwd=here, env=env, ) except OSError as e: print("Failed to run bower: %s" % e, file=sys.stderr) print("You can install js dependencies with `npm install`", file=sys.stderr) raise os.utime(self.bower_dir) self.distribution.data_files = get_data_files() class CSS(BaseCommand): description = "compile CSS from LESS" def should_run(self): css_targets = [pjoin(static, 'css', 'style.min.css')] css_maps = [t + '.map' for t in css_targets] targets = css_targets + css_maps if not all(os.path.exists(t) for t in targets): return True earliest_target = sorted(mtime(t) for t in targets)[0] for (dirpath, dirnames, filenames) in os.walk(static): for f in filenames: if f.endswith('.less'): path = pjoin(static, dirpath, f) timestamp = mtime(path) if timestamp > earliest_target: return True return False def run(self): if not self.should_run(): print("CSS up-to-date") return self.run_command('js') style_less = pjoin(static, 'less', 'style.less') style_css = pjoin(static, 'css', 'style.min.css') sourcemap = style_css + '.map' env = os.environ.copy() env['PATH'] = npm_path try: check_call([ 'lessc', '--clean-css', '--source-map-basepath={}'.format(static), '--source-map={}'.format(sourcemap), '--source-map-rootpath=../', style_less, style_css, ], cwd=here, env=env) except OSError as e: print("Failed to run lessc: %s" % e, file=sys.stderr) print("You can install js dependencies with `npm install`", file=sys.stderr) raise self.distribution.data_files = get_data_files()
BSD 3-Clause New or Revised License
gamer-os/steam-buddy
chimera_app/shortcuts.py
SteamShortcutsFile.match_app_id
python
def match_app_id(self, app_id: str) -> dict: if not self.current_data: self.load_data() data = self.current_data for short in data: if 'appid' in data[short] and data[short]['appid'] == app_id: return data[short].copy() return {}
Returns the a copy of the shortcut dictionary of the given app_id. If not found returns a new empty dictionary.
https://github.com/gamer-os/steam-buddy/blob/2df83290d32c03ce71f694166570a2247684e4cd/chimera_app/shortcuts.py#L129-L142
import os from typing import List from datetime import date from zlib import crc32 import vdf import yaml import chimera_app.context as context import chimera_app.utils as utils import chimera_app.steam_config as steam_config def create_all_shortcuts(): if not os.path.isdir(context.SHORTCUT_DIRS): print(f'Shortcuts directory does not exist ({context.SHORTCUT_DIRS})') return manager = ShortcutsManager() for user_dir in context.STEAM_USER_DIRS: manager.add_steam_file_for_user(os.path.basename(user_dir)) for file in os.scandir(context.SHORTCUT_DIRS): if file.is_file(): manager.add_shortcuts_file_from_path(file.path) manager.load_shortcut_entries() manager.create_shortcuts() manager.create_images() manager.register_compat_data() def get_banner_id(exe, name): crc_input = ''.join([exe, name]) high_32 = crc32(crc_input.encode('utf-8')) | 0x80000000 full_64 = (high_32 << 32) | 0x02000000 return full_64 def get_compat_id(exe, name): crc_input = ''.join([exe, name]) return (crc32(crc_input.encode('utf-8')) & 0xffffffff) | 0x80000000 def get_shortcut_id(exe, name): return get_compat_id(exe, name) - 2**32 def get_poster_id(exe, name): return str(get_compat_id(exe, name)) + 'p' def get_background_id(exe, name): return str(get_compat_id(exe, name)) + '_hero' def get_logo_id(exe, name): return str(get_compat_id(exe, name)) + '_logo' def get_image_id(type, exe, name): if type == 'banner': return get_banner_id(exe, name) elif type == 'poster': return get_poster_id(exe, name) elif type == 'background': return get_background_id(exe, name) elif type == 'logo': return get_logo_id(exe, name) class SteamShortcutsFile(): path: str user_id: str new_data: dict current_data: dict def __init__(self, user_id: str, auto_load: bool = True): self.user_id = user_id self.path = os.path.join(context.STEAM_DIR, 'userdata', user_id, 'config/shortcuts.vdf') self.current_data = None self.new_data = {} if auto_load: self.load_data() def exists(self) -> bool: return os.path.exists(self.path) def get_current_data(self) -> dict: return self.current_data def get_new_data(self) -> dict: return self.new_data def load_data(self) -> None: if not self.exists(): self.current_data = {} return with open(self.path, 'rb') as vdf_file: data = vdf.binary_load(vdf_file) if 'shortcuts' in data: self.current_data = data['shortcuts']
MIT License
yijiangh/pybullet_planning
src/pybullet_planning/interfaces/robots/collision.py
pairwise_link_collision_info
python
def pairwise_link_collision_info(body1, link1, body2, link2=BASE_LINK, max_distance=MAX_DISTANCE, distance_threshold=0.0): return p.getClosestPoints(bodyA=body1, bodyB=body2, distance=max_distance, linkIndexA=link1, linkIndexB=link2, physicsClientId=CLIENT)
check pairwise collision checking info between bodies See getClosestPoints in <pybullet documentation `https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit?usp=sharing`>_ TODO: [DOC] shared reference to pybullet doc Note: `getContactPoints` can be used here as well Parameters ---------- body1 : pb_body [description] link1 : pb_link [description] body2 : pb_body [description] link2 : pb_link, optional [description], by default BASE_LINK max_distance : float, optional If the distance between objects exceeds this maximum distance, no points may be returned. by default MAX_DISTANCE (set in pybullet_planning.utils.shared_const) distance_threshold : float, optional this argument here is not used but rather to make `**kwargs` working without overhauling too many functions. See `pairwise_link_collision`'s `distance_threshold` argument for real usage. Returns ------- list of contact points each element of the list has the following fields: contactFlag : int reserved bodyUniqueIdA : int body unique id of body A bodyUniqueIdB : int body unique id of body B linkIndexA : int link index of body A, -1 for base linkIndexB : int link index of body B, -1 for base positionOnA : vec3, list of 3 floats contact position on A, in Cartesian world coordinates positionOnB : vec3, list of 3 floats contact position on B, in Cartesian world coordinates contactNormalOnB : vec3, list of 3 floats contact normal on B, pointing towards A contactDistance : float contact distance, positive for separation, negative for penetration normalForce : float normal force applied during the last 'stepSimulation' lateralFriction1 : float lateral friction force in the lateralFrictionDir1 direction lateralFrictionDir1 : vec3, list of 3 floats first lateral friction direction lateralFriction2 : float lateral friction force in the lateralFrictionDir2 direction lateralFrictionDir2 : vec3, list of 3 floats second lateral friction direction
https://github.com/yijiangh/pybullet_planning/blob/cc90e8620dd61acc7fc4246638d91f222b64e77a/src/pybullet_planning/interfaces/robots/collision.py#L32-L93
import warnings from collections import namedtuple from itertools import product import numpy as np import pybullet as p from pybullet_planning.utils import CLIENT, BASE_LINK, MAX_DISTANCE, UNKNOWN_FILE from pybullet_planning.utils import get_client from pybullet_planning.interfaces.env_manager.user_io import step_simulation from pybullet_planning.interfaces.env_manager.pose_transformation import get_distance from pybullet_planning.interfaces.robots.body import get_all_links, get_bodies, get_links def contact_collision(): step_simulation() return len(p.getContactPoints(physicsClientId=CLIENT)) != 0 ContactResult = namedtuple('ContactResult', ['contactFlag', 'bodyUniqueIdA', 'bodyUniqueIdB', 'linkIndexA', 'linkIndexB', 'positionOnA', 'positionOnB', 'contactNormalOnB', 'contactDistance', 'normalForce'])
MIT License
qax-a-team/luwu
src/backend/utils/redis.py
RedisPool.__getattr__
python
def __getattr__(self, name): return getattr(self.conn, name)
the __getattr__() method is actually a fallback method that only gets called when an attribute is not found
https://github.com/qax-a-team/luwu/blob/c96e3b25992ff5300ff76b6bf82f3d75a15c8477/src/backend/utils/redis.py#L41-L46
import json import redis from sqlalchemy.orm import Session from core.config import CELERY_TASK_EXPIRE from core.config import PROJECT_NAME from core.config import REDIS from crud.crud_config import crud_isp from crud.crud_vps import crud_vps class RedisPool: _pool_instance = None TASK_SCHEDULED_STATUS = 'scheduled' TASK_QUEUED_STATUS = 'queued' TASK_RUNNING_STATUS = 'running' VPS_SPEC_DATA_KEY_PATTERN = 'vps_spec_{name}' VPS_SPEC_CACHE_EXPIRE_TIME = 7 * 24 * 60 * 60 def __new__(cls, *args, **kwargs): redis_host = REDIS.get('host', 'localhost') redis_port = REDIS.get('port', 6379) redis_db = REDIS.get('db', 0) redis_password = REDIS.get('password', None) if cls._pool_instance is None: cls._pool_instance = redis.ConnectionPool( host=redis_host, port=redis_port, db=redis_db, password=redis_password, *args, **kwargs ) return super().__new__(cls, *args, **kwargs) def __init__(self): self.conn = redis.Redis(connection_pool=self._pool_instance)
Apache License 2.0
zachchristensen28/ta-opnsense
bin/ta_opnsense/aob_py3/future/backports/xmlrpc/client.py
dumps
python
def dumps(params, methodname=None, methodresponse=None, encoding=None, allow_none=False): assert isinstance(params, (tuple, Fault)), "argument must be tuple or Fault instance" if isinstance(params, Fault): methodresponse = 1 elif methodresponse and isinstance(params, tuple): assert len(params) == 1, "response tuple must be a singleton" if not encoding: encoding = "utf-8" if FastMarshaller: m = FastMarshaller(encoding) else: m = Marshaller(encoding, allow_none) data = m.dumps(params) if encoding != "utf-8": xmlheader = "<?xml version='1.0' encoding='%s'?>\n" % str(encoding) else: xmlheader = "<?xml version='1.0'?>\n" if methodname: if not isinstance(methodname, str): methodname = methodname.encode(encoding) data = ( xmlheader, "<methodCall>\n" "<methodName>", methodname, "</methodName>\n", data, "</methodCall>\n" ) elif methodresponse: data = ( xmlheader, "<methodResponse>\n", data, "</methodResponse>\n" ) else: return data return str("").join(data)
data [,options] -> marshalled data Convert an argument tuple or a Fault instance to an XML-RPC request (or response, if the methodresponse option is used). In addition to the data object, the following options can be given as keyword arguments: methodname: the method name for a methodCall packet methodresponse: true to create a methodResponse packet. If this option is used with a tuple, the tuple must be a singleton (i.e. it can contain only one element). encoding: the packet encoding (default is UTF-8) All byte strings in the data structure are assumed to use the packet encoding. Unicode strings are automatically converted, where necessary.
https://github.com/zachchristensen28/ta-opnsense/blob/fc736f4c6f0fa7866b4f6d2dcf9761b6b693d6cf/bin/ta_opnsense/aob_py3/future/backports/xmlrpc/client.py#L931-L997
from __future__ import (absolute_import, division, print_function, unicode_literals) from future.builtins import bytes, dict, int, range, str import base64 base64.encodebytes = base64.encodestring base64.decodebytes = base64.decodestring import sys import time from datetime import datetime from future.backports.http import client as http_client from future.backports.urllib import parse as urllib_parse from future.utils import ensure_new_type from xml.parsers import expat import socket import errno from io import BytesIO try: import gzip except ImportError: gzip = None def escape(s): s = s.replace("&", "&amp;") s = s.replace("<", "&lt;") return s.replace(">", "&gt;",) __version__ = sys.version[:3] MAXINT = 2**31-1 MININT = -2**31 PARSE_ERROR = -32700 SERVER_ERROR = -32600 APPLICATION_ERROR = -32500 SYSTEM_ERROR = -32400 TRANSPORT_ERROR = -32300 NOT_WELLFORMED_ERROR = -32700 UNSUPPORTED_ENCODING = -32701 INVALID_ENCODING_CHAR = -32702 INVALID_XMLRPC = -32600 METHOD_NOT_FOUND = -32601 INVALID_METHOD_PARAMS = -32602 INTERNAL_ERROR = -32603 class Error(Exception): def __str__(self): return repr(self) class ProtocolError(Error): def __init__(self, url, errcode, errmsg, headers): Error.__init__(self) self.url = url self.errcode = errcode self.errmsg = errmsg self.headers = headers def __repr__(self): return ( "<ProtocolError for %s: %s %s>" % (self.url, self.errcode, self.errmsg) ) class ResponseError(Error): pass class Fault(Error): def __init__(self, faultCode, faultString, **extra): Error.__init__(self) self.faultCode = faultCode self.faultString = faultString def __repr__(self): return "<Fault %s: %r>" % (ensure_new_type(self.faultCode), ensure_new_type(self.faultString)) boolean = Boolean = bool def _iso8601_format(value): return "%04d%02d%02dT%02d:%02d:%02d" % ( value.year, value.month, value.day, value.hour, value.minute, value.second) def _strftime(value): if isinstance(value, datetime): return _iso8601_format(value) if not isinstance(value, (tuple, time.struct_time)): if value == 0: value = time.time() value = time.localtime(value) return "%04d%02d%02dT%02d:%02d:%02d" % value[:6] class DateTime(object): def __init__(self, value=0): if isinstance(value, str): self.value = value else: self.value = _strftime(value) def make_comparable(self, other): if isinstance(other, DateTime): s = self.value o = other.value elif isinstance(other, datetime): s = self.value o = _iso8601_format(other) elif isinstance(other, str): s = self.value o = other elif hasattr(other, "timetuple"): s = self.timetuple() o = other.timetuple() else: otype = (hasattr(other, "__class__") and other.__class__.__name__ or type(other)) raise TypeError("Can't compare %s and %s" % (self.__class__.__name__, otype)) return s, o def __lt__(self, other): s, o = self.make_comparable(other) return s < o def __le__(self, other): s, o = self.make_comparable(other) return s <= o def __gt__(self, other): s, o = self.make_comparable(other) return s > o def __ge__(self, other): s, o = self.make_comparable(other) return s >= o def __eq__(self, other): s, o = self.make_comparable(other) return s == o def __ne__(self, other): s, o = self.make_comparable(other) return s != o def timetuple(self): return time.strptime(self.value, "%Y%m%dT%H:%M:%S") def __str__(self): return self.value def __repr__(self): return "<DateTime %r at %x>" % (ensure_new_type(self.value), id(self)) def decode(self, data): self.value = str(data).strip() def encode(self, out): out.write("<value><dateTime.iso8601>") out.write(self.value) out.write("</dateTime.iso8601></value>\n") def _datetime(data): value = DateTime() value.decode(data) return value def _datetime_type(data): return datetime.strptime(data, "%Y%m%dT%H:%M:%S") class Binary(object): def __init__(self, data=None): if data is None: data = b"" else: if not isinstance(data, (bytes, bytearray)): raise TypeError("expected bytes or bytearray, not %s" % data.__class__.__name__) data = bytes(data) self.data = data def __str__(self): return str(self.data, "latin-1") def __eq__(self, other): if isinstance(other, Binary): other = other.data return self.data == other def __ne__(self, other): if isinstance(other, Binary): other = other.data return self.data != other def decode(self, data): self.data = base64.decodebytes(data) def encode(self, out): out.write("<value><base64>\n") encoded = base64.encodebytes(self.data) out.write(encoded.decode('ascii')) out.write("</base64></value>\n") def _binary(data): value = Binary() value.decode(data) return value WRAPPERS = (DateTime, Binary) class ExpatParser(object): def __init__(self, target): self._parser = parser = expat.ParserCreate(None, None) self._target = target parser.StartElementHandler = target.start parser.EndElementHandler = target.end parser.CharacterDataHandler = target.data encoding = None target.xml(encoding, None) def feed(self, data): self._parser.Parse(data, 0) def close(self): self._parser.Parse("", 1) del self._target, self._parser class Marshaller(object): def __init__(self, encoding=None, allow_none=False): self.memo = {} self.data = None self.encoding = encoding self.allow_none = allow_none dispatch = {} def dumps(self, values): out = [] write = out.append dump = self.__dump if isinstance(values, Fault): write("<fault>\n") dump({'faultCode': values.faultCode, 'faultString': values.faultString}, write) write("</fault>\n") else: write("<params>\n") for v in values: write("<param>\n") dump(v, write) write("</param>\n") write("</params>\n") result = "".join(out) return str(result) def __dump(self, value, write): try: f = self.dispatch[type(ensure_new_type(value))] except KeyError: if not hasattr(value, '__dict__'): raise TypeError("cannot marshal %s objects" % type(value)) for type_ in type(value).__mro__: if type_ in self.dispatch.keys(): raise TypeError("cannot marshal %s objects" % type(value)) f = self.dispatch["_arbitrary_instance"] f(self, value, write) def dump_nil (self, value, write): if not self.allow_none: raise TypeError("cannot marshal None unless allow_none is enabled") write("<value><nil/></value>") dispatch[type(None)] = dump_nil def dump_bool(self, value, write): write("<value><boolean>") write(value and "1" or "0") write("</boolean></value>\n") dispatch[bool] = dump_bool def dump_long(self, value, write): if value > MAXINT or value < MININT: raise OverflowError("long int exceeds XML-RPC limits") write("<value><int>") write(str(int(value))) write("</int></value>\n") dispatch[int] = dump_long dump_int = dump_long def dump_double(self, value, write): write("<value><double>") write(repr(ensure_new_type(value))) write("</double></value>\n") dispatch[float] = dump_double def dump_unicode(self, value, write, escape=escape): write("<value><string>") write(escape(value)) write("</string></value>\n") dispatch[str] = dump_unicode def dump_bytes(self, value, write): write("<value><base64>\n") encoded = base64.encodebytes(value) write(encoded.decode('ascii')) write("</base64></value>\n") dispatch[bytes] = dump_bytes dispatch[bytearray] = dump_bytes def dump_array(self, value, write): i = id(value) if i in self.memo: raise TypeError("cannot marshal recursive sequences") self.memo[i] = None dump = self.__dump write("<value><array><data>\n") for v in value: dump(v, write) write("</data></array></value>\n") del self.memo[i] dispatch[tuple] = dump_array dispatch[list] = dump_array def dump_struct(self, value, write, escape=escape): i = id(value) if i in self.memo: raise TypeError("cannot marshal recursive dictionaries") self.memo[i] = None dump = self.__dump write("<value><struct>\n") for k, v in value.items(): write("<member>\n") if not isinstance(k, str): raise TypeError("dictionary key must be string") write("<name>%s</name>\n" % escape(k)) dump(v, write) write("</member>\n") write("</struct></value>\n") del self.memo[i] dispatch[dict] = dump_struct def dump_datetime(self, value, write): write("<value><dateTime.iso8601>") write(_strftime(value)) write("</dateTime.iso8601></value>\n") dispatch[datetime] = dump_datetime def dump_instance(self, value, write): if value.__class__ in WRAPPERS: self.write = write value.encode(self) del self.write else: self.dump_struct(value.__dict__, write) dispatch[DateTime] = dump_instance dispatch[Binary] = dump_instance dispatch["_arbitrary_instance"] = dump_instance class Unmarshaller(object): def __init__(self, use_datetime=False, use_builtin_types=False): self._type = None self._stack = [] self._marks = [] self._data = [] self._methodname = None self._encoding = "utf-8" self.append = self._stack.append self._use_datetime = use_builtin_types or use_datetime self._use_bytes = use_builtin_types def close(self): if self._type is None or self._marks: raise ResponseError() if self._type == "fault": raise Fault(**self._stack[0]) return tuple(self._stack) def getmethodname(self): return self._methodname def xml(self, encoding, standalone): self._encoding = encoding def start(self, tag, attrs): if tag == "array" or tag == "struct": self._marks.append(len(self._stack)) self._data = [] self._value = (tag == "value") def data(self, text): self._data.append(text) def end(self, tag): try: f = self.dispatch[tag] except KeyError: pass else: return f(self, "".join(self._data)) def end_dispatch(self, tag, data): try: f = self.dispatch[tag] except KeyError: pass else: return f(self, data) dispatch = {} def end_nil (self, data): self.append(None) self._value = 0 dispatch["nil"] = end_nil def end_boolean(self, data): if data == "0": self.append(False) elif data == "1": self.append(True) else: raise TypeError("bad boolean value") self._value = 0 dispatch["boolean"] = end_boolean def end_int(self, data): self.append(int(data)) self._value = 0 dispatch["i4"] = end_int dispatch["i8"] = end_int dispatch["int"] = end_int def end_double(self, data): self.append(float(data)) self._value = 0 dispatch["double"] = end_double def end_string(self, data): if self._encoding: data = data.decode(self._encoding) self.append(data) self._value = 0 dispatch["string"] = end_string dispatch["name"] = end_string def end_array(self, data): mark = self._marks.pop() self._stack[mark:] = [self._stack[mark:]] self._value = 0 dispatch["array"] = end_array def end_struct(self, data): mark = self._marks.pop() dict = {} items = self._stack[mark:] for i in range(0, len(items), 2): dict[items[i]] = items[i+1] self._stack[mark:] = [dict] self._value = 0 dispatch["struct"] = end_struct def end_base64(self, data): value = Binary() value.decode(data.encode("ascii")) if self._use_bytes: value = value.data self.append(value) self._value = 0 dispatch["base64"] = end_base64 def end_dateTime(self, data): value = DateTime() value.decode(data) if self._use_datetime: value = _datetime_type(data) self.append(value) dispatch["dateTime.iso8601"] = end_dateTime def end_value(self, data): if self._value: self.end_string(data) dispatch["value"] = end_value def end_params(self, data): self._type = "params" dispatch["params"] = end_params def end_fault(self, data): self._type = "fault" dispatch["fault"] = end_fault def end_methodName(self, data): if self._encoding: data = data.decode(self._encoding) self._methodname = data self._type = "methodName" dispatch["methodName"] = end_methodName class _MultiCallMethod(object): def __init__(self, call_list, name): self.__call_list = call_list self.__name = name def __getattr__(self, name): return _MultiCallMethod(self.__call_list, "%s.%s" % (self.__name, name)) def __call__(self, *args): self.__call_list.append((self.__name, args)) class MultiCallIterator(object): def __init__(self, results): self.results = results def __getitem__(self, i): item = self.results[i] if isinstance(type(item), dict): raise Fault(item['faultCode'], item['faultString']) elif type(item) == type([]): return item[0] else: raise ValueError("unexpected type in multicall result") class MultiCall(object): def __init__(self, server): self.__server = server self.__call_list = [] def __repr__(self): return "<MultiCall at %x>" % id(self) __str__ = __repr__ def __getattr__(self, name): return _MultiCallMethod(self.__call_list, name) def __call__(self): marshalled_list = [] for name, args in self.__call_list: marshalled_list.append({'methodName' : name, 'params' : args}) return MultiCallIterator(self.__server.system.multicall(marshalled_list)) FastMarshaller = FastParser = FastUnmarshaller = None def getparser(use_datetime=False, use_builtin_types=False): if FastParser and FastUnmarshaller: if use_builtin_types: mkdatetime = _datetime_type mkbytes = base64.decodebytes elif use_datetime: mkdatetime = _datetime_type mkbytes = _binary else: mkdatetime = _datetime mkbytes = _binary target = FastUnmarshaller(True, False, mkbytes, mkdatetime, Fault) parser = FastParser(target) else: target = Unmarshaller(use_datetime=use_datetime, use_builtin_types=use_builtin_types) if FastParser: parser = FastParser(target) else: parser = ExpatParser(target) return parser, target
MIT License
pytorchlightning/lightning-bolts
pl_bolts/models/detection/yolo/yolo_module.py
YOLO.load_darknet_weights
python
def load_darknet_weights(self, weight_file): version = np.fromfile(weight_file, count=3, dtype=np.int32) images_seen = np.fromfile(weight_file, count=1, dtype=np.int64) rank_zero_info( f"Loading weights from Darknet model version {version[0]}.{version[1]}.{version[2]} " f"that has been trained on {images_seen[0]} images." ) def read(tensor): x = np.fromfile(weight_file, count=tensor.numel(), dtype=np.float32) if x.shape[0] == 0: return x = torch.from_numpy(x).view_as(tensor) with torch.no_grad(): tensor.copy_(x) for module in self.network: if not isinstance(module, nn.Sequential): continue conv = module[0] assert isinstance(conv, nn.Conv2d) if len(module) > 1 and isinstance(module[1], nn.BatchNorm2d): bn = module[1] read(bn.bias) read(bn.weight) read(bn.running_mean) read(bn.running_var) else: read(conv.bias) read(conv.weight)
Loads weights to layer modules from a pretrained Darknet model. One may want to continue training from the pretrained weights, on a dataset with a different number of object categories. The number of kernels in the convolutional layers just before each detection layer depends on the number of output classes. The Darknet solution is to truncate the weight file and stop reading weights at the first incompatible layer. For this reason the function silently leaves the rest of the layers unchanged, when the weight file ends. Args: weight_file: A file object containing model weights in the Darknet binary format.
https://github.com/pytorchlightning/lightning-bolts/blob/f4f6d53a039c521f3441750fa5297c7694320119/pl_bolts/models/detection/yolo/yolo_module.py#L279-L330
import logging from typing import Any, Dict, List, Optional, Tuple, Type import numpy as np import torch import torch.nn as nn from pytorch_lightning import LightningModule from pytorch_lightning.utilities import rank_zero_info from torch import Tensor, optim from pl_bolts.models.detection.yolo.yolo_layers import DetectionLayer, RouteLayer, ShortcutLayer from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR from pl_bolts.utils import _TORCHVISION_AVAILABLE from pl_bolts.utils.warnings import warn_missing_pkg if _TORCHVISION_AVAILABLE: from torchvision.ops import nms from torchvision.transforms import functional as F else: warn_missing_pkg("torchvision") log = logging.getLogger(__name__) class YOLO(LightningModule): def __init__( self, network: nn.ModuleList, optimizer: Type[optim.Optimizer] = optim.SGD, optimizer_params: Dict[str, Any] = {"lr": 0.001, "momentum": 0.9, "weight_decay": 0.0005}, lr_scheduler: Type[optim.lr_scheduler._LRScheduler] = LinearWarmupCosineAnnealingLR, lr_scheduler_params: Dict[str, Any] = {"warmup_epochs": 1, "max_epochs": 300, "warmup_start_lr": 0.0}, confidence_threshold: float = 0.2, nms_threshold: float = 0.45, max_predictions_per_image: int = -1, ) -> None: super().__init__() if not _TORCHVISION_AVAILABLE: raise ModuleNotFoundError("YOLO model uses `torchvision`, which is not installed yet.") self.network = network self.optimizer_class = optimizer self.optimizer_params = optimizer_params self.lr_scheduler_class = lr_scheduler self.lr_scheduler_params = lr_scheduler_params self.confidence_threshold = confidence_threshold self.nms_threshold = nms_threshold self.max_predictions_per_image = max_predictions_per_image def forward( self, images: Tensor, targets: Optional[List[Dict[str, Tensor]]] = None ) -> Tuple[Tensor, Dict[str, Tensor]]: outputs = [] detections = [] losses = [] hits = [] image_height = images.shape[2] image_width = images.shape[3] image_size = torch.tensor([image_width, image_height], device=images.device) x = images for module in self.network: if isinstance(module, (RouteLayer, ShortcutLayer)): x = module(x, outputs) elif isinstance(module, DetectionLayer): if targets is None: x = module(x, image_size) detections.append(x) else: x, layer_losses, layer_hits = module(x, image_size, targets) detections.append(x) losses.append(layer_losses) hits.append(layer_hits) else: x = module(x) outputs.append(x) detections = torch.cat(detections, 1) if targets is None: return detections total_hits = sum(hits) num_targets = sum(len(image_targets["boxes"]) for image_targets in targets) if total_hits != num_targets: log.warning( f"{num_targets} training targets were matched a total of {total_hits} times by detection layers. " "Anchors may have been configured incorrectly." ) for layer_idx, layer_hits in enumerate(hits): hit_rate = torch.true_divide(layer_hits, total_hits) if total_hits > 0 else 1.0 self.log(f"layer_{layer_idx}_hit_rate", hit_rate, sync_dist=False) def total_loss(loss_name): loss_tuple = tuple(layer_losses[loss_name] for layer_losses in losses) return torch.stack(loss_tuple).sum() losses = {loss_name: total_loss(loss_name) for loss_name in losses[0].keys()} return detections, losses def configure_optimizers(self) -> Tuple[List, List]: optimizer = self.optimizer_class(self.parameters(), **self.optimizer_params) lr_scheduler = self.lr_scheduler_class(optimizer, **self.lr_scheduler_params) return [optimizer], [lr_scheduler] def training_step(self, batch: Tuple[List[Tensor], List[Dict[str, Tensor]]], batch_idx: int) -> Dict[str, Tensor]: images, targets = self._validate_batch(batch) _, losses = self(images, targets) total_loss = torch.stack(tuple(losses.values())).sum() for name, value in losses.items(): self.log(f"train/{name}_loss", value, prog_bar=True, sync_dist=False) self.log("train/total_loss", total_loss, sync_dist=False) return {"loss": total_loss} def validation_step(self, batch: Tuple[List[Tensor], List[Dict[str, Tensor]]], batch_idx: int): images, targets = self._validate_batch(batch) detections, losses = self(images, targets) detections = self._split_detections(detections) detections = self._filter_detections(detections) total_loss = torch.stack(tuple(losses.values())).sum() for name, value in losses.items(): self.log(f"val/{name}_loss", value, sync_dist=True) self.log("val/total_loss", total_loss, sync_dist=True) def test_step(self, batch: Tuple[List[Tensor], List[Dict[str, Tensor]]], batch_idx: int): images, targets = self._validate_batch(batch) detections, losses = self(images, targets) detections = self._split_detections(detections) detections = self._filter_detections(detections) total_loss = torch.stack(tuple(losses.values())).sum() for name, value in losses.items(): self.log(f"test/{name}_loss", value, sync_dist=True) self.log("test/total_loss", total_loss, sync_dist=True) def infer(self, image: Tensor) -> Tuple[Tensor, Tensor, Tensor]: if not isinstance(image, torch.Tensor): image = F.to_tensor(image) self.eval() detections = self(image.unsqueeze(0)) detections = self._split_detections(detections) detections = self._filter_detections(detections) boxes = detections["boxes"][0] scores = detections["scores"][0] labels = detections["labels"][0] return boxes, scores, labels
Apache License 2.0
pysmt/pysmt
pysmt/shortcuts.py
Not
python
def Not(formula): return get_env().formula_manager.Not(formula)
r""".. math:: \lnot \varphi
https://github.com/pysmt/pysmt/blob/ade4dc2a825727615033a96d31c71e9f53ce4764/pysmt/shortcuts.py#L167-L169
import warnings warnings.filterwarnings('default', module='pysmt') import pysmt.configuration as config import pysmt.environment import pysmt.typing as types import pysmt.smtlib.parser import pysmt.smtlib.script import pysmt.smtlib.printers from pysmt.typing import INT, BOOL, REAL, BVType, FunctionType, ArrayType, Type assert INT or BOOL or REAL or BVType or FunctionType or ArrayType or Type def get_env(): return pysmt.environment.get_env() def reset_env(): return pysmt.environment.reset_env() get_env().enable_infix_notation = True def get_type(formula): return get_env().stc.get_type(formula) def simplify(formula): return get_env().simplifier.simplify(formula) def substitute(formula, subs): return get_env().substituter.substitute(formula, subs) def serialize(formula, threshold=None): return get_env().serializer.serialize(formula, threshold=threshold) def get_free_variables(formula): return get_env().fvo.get_free_variables(formula) def get_atoms(formula): return get_env().ao.get_atoms(formula) def get_formula_size(formula, measure=None): return get_env().sizeo.get_size(formula, measure) def ForAll(variables, formula): return get_env().formula_manager.ForAll(variables, formula) def Exists(variables, formula): return get_env().formula_manager.Exists(variables, formula) def Function(vname, params): return get_env().formula_manager.Function(vname, params)
Apache License 2.0
openstack/debtcollector
debtcollector/_utils.py
get_class_name
python
def get_class_name(obj, fully_qualified=True): if not isinstance(obj, six.class_types): obj = type(obj) try: built_in = obj.__module__ in _BUILTIN_MODULES except AttributeError: pass else: if built_in: return obj.__name__ if fully_qualified and hasattr(obj, '__module__'): return '%s.%s' % (obj.__module__, obj.__name__) else: return obj.__name__
Get class name for object. If object is a type, fully qualified name of the type is returned. Else, fully qualified name of the type of the object is returned. For builtin types, just name is returned.
https://github.com/openstack/debtcollector/blob/aa426abafebec6f36e7aa4484d6acc039f67589f/debtcollector/_utils.py#L104-L124
import functools import inspect import types import warnings import six try: _TYPE_TYPE = types.TypeType except AttributeError: _TYPE_TYPE = type _BUILTIN_MODULES = ('builtins', '__builtin__', '__builtins__', 'exceptions') _enabled = True def deprecation(message, stacklevel=None, category=None): if not _enabled: return if category is None: category = DeprecationWarning if stacklevel is None: warnings.warn(message, category=category) else: warnings.warn(message, category=category, stacklevel=stacklevel) def get_qualified_name(obj): try: return (True, obj.__qualname__) except AttributeError: return (False, obj.__name__) def generate_message(prefix, postfix=None, message=None, version=None, removal_version=None): message_components = [prefix] if version: message_components.append(" in version '%s'" % version) if removal_version: if removal_version == "?": message_components.append(" and will be removed in a future" " version") else: message_components.append(" and will be removed in version '%s'" % removal_version) if postfix: message_components.append(postfix) if message: message_components.append(": %s" % message) return ''.join(message_components) def get_assigned(decorator): if six.PY3: return functools.WRAPPER_ASSIGNMENTS else: assigned = [] for attr_name in functools.WRAPPER_ASSIGNMENTS: if hasattr(decorator, attr_name): assigned.append(attr_name) return tuple(assigned)
Apache License 2.0
googlecloudplatform/appengine-python-standard
src/google/appengine/datastore/datastore_stub_util.py
ParseKindQuery
python
def ParseKindQuery(query, filters, orders): Check(not query.HasField('ancestor'), 'ancestor queries on __kind__ not allowed') key_range = ParseKeyFilteredQuery(filters, orders) key_range.Remap(_KindKeyToString) return key_range
Parse __kind__ (schema) queries. Raises exceptions for illegal queries. Args: query: A Query PB. filters: the normalized filters from query. orders: the normalized orders from query. Returns: The kind range (a ValueRange over string) requested in the query.
https://github.com/googlecloudplatform/appengine-python-standard/blob/42c99c7a83f4ed50c724ecdde119a606a3ca58f3/src/google/appengine/datastore/datastore_stub_util.py#L895-L913
import atexit import collections import functools import hashlib import itertools import json import logging import os import random import struct import threading import time import weakref import six from six.moves import filter from six.moves import range from six.moves import zip from six.moves import zip_longest import six.moves.http_client from google.appengine.api import api_base_pb2 from google.appengine.api import apiproxy_stub_map from google.appengine.api import cmp_compat from google.appengine.api import datastore_admin from google.appengine.api import datastore_errors from google.appengine.api import datastore_types from google.appengine.api import yaml_errors from google.appengine.api.taskqueue import taskqueue_service_bytes_pb2 as taskqueue_service_pb2 from google.appengine.datastore import datastore_index from google.appengine.datastore import datastore_pb from google.appengine.datastore import datastore_pbs from google.appengine.datastore import datastore_query from google.appengine.datastore import datastore_stub_index from google.appengine.datastore import datastore_v4_pb2 from google.appengine.runtime import apiproxy_errors from google.protobuf import message from google.appengine.datastore import entity_bytes_pb2 as entity_pb2 if six.PY3: long = int if datastore_pbs._CLOUD_DATASTORE_ENABLED: from google.appengine.datastore.datastore_pbs import googledatastore _MAXIMUM_RESULTS = 300 _MAXIMUM_QUERY_RESULT_BYTES = 2000000 _MAX_QUERY_OFFSET = 1000 _PROPERTY_TYPE_NAMES = { 0: 'NULL', entity_pb2.PropertyValue.INT64VALUE_FIELD_NUMBER: 'INT64', entity_pb2.PropertyValue.BOOLEANVALUE_FIELD_NUMBER: 'BOOLEAN', entity_pb2.PropertyValue.STRINGVALUE_FIELD_NUMBER: 'STRING', entity_pb2.PropertyValue.DOUBLEVALUE_FIELD_NUMBER: 'DOUBLE', entity_pb2.PropertyValue.POINTVALUE_FIELD_NUMBER: 'POINT', entity_pb2.PropertyValue.USERVALUE_FIELD_NUMBER: 'USER', entity_pb2.PropertyValue.REFERENCEVALUE_FIELD_NUMBER: 'REFERENCE' } _SCATTER_PROPORTION = 32768 _MAX_EG_PER_TXN = 25 _BLOB_MEANINGS = frozenset( (entity_pb2.Property.BLOB, entity_pb2.Property.ENTITY_PROTO, entity_pb2.Property.TEXT)) _RETRIES = 3 _INITIAL_RETRY_DELAY_MS = 100 _RETRY_DELAY_MULTIPLIER = 2 _MAX_RETRY_DELAY_MS = 120000 MINIMUM_VERSION = 1 SEQUENTIAL = 'sequential' SCATTERED = 'scattered' _MAX_SEQUENTIAL_BIT = 52 _MAX_SEQUENTIAL_COUNTER = (1 << _MAX_SEQUENTIAL_BIT) - 1 _MAX_SEQUENTIAL_ID = _MAX_SEQUENTIAL_COUNTER _MAX_SCATTERED_COUNTER = (1 << (_MAX_SEQUENTIAL_BIT - 1)) - 1 _MAX_SCATTERED_ID = _MAX_SEQUENTIAL_ID + 1 + _MAX_SCATTERED_COUNTER _SCATTER_SHIFT = 64 - _MAX_SEQUENTIAL_BIT + 1 _EMULATOR_CONFIG_CACHE = None logger = logging.getLogger('google.appengine.api.stubs.datastore') def _GetScatterProperty(entity_proto): hash_obj = hashlib.md5() for element in entity_proto.key.path.element: if element.HasField('name'): hash_obj.update(element.name.encode('utf-8')) elif element.HasField('id'): hash_obj.update(six.ensure_binary(str(element.id))) hash_bytes = hash_obj.digest()[0:2] (hash_int,) = struct.unpack('H', hash_bytes) if hash_int >= _SCATTER_PROPORTION: return None scatter_property = entity_pb2.Property() scatter_property.name = datastore_types.SCATTER_SPECIAL_PROPERTY scatter_property.meaning = entity_pb2.Property.BYTESTRING scatter_property.multiple = False property_value = scatter_property.value property_value.stringValue = hash_bytes return scatter_property _SPECIAL_PROPERTY_MAP = { datastore_types.SCATTER_SPECIAL_PROPERTY: (False, True, _GetScatterProperty) } def GetInvisibleSpecialPropertyNames(): invisible_names = [] for name, value in _SPECIAL_PROPERTY_MAP.items(): is_visible, _, _ = value if not is_visible: invisible_names.append(name) return invisible_names def _PrepareSpecialProperties(entity_proto, is_load): for i in range(len(entity_proto.property) - 1, -1, -1): if entity_proto.property[i].name in _SPECIAL_PROPERTY_MAP: del entity_proto.property[i] for is_visible, is_stored, property_func in _SPECIAL_PROPERTY_MAP.values(): if is_load: should_process = is_visible else: should_process = is_stored if should_process: special_property = property_func(entity_proto) if special_property: entity_proto.property.append(special_property) _METADATA_PROPERTY_NAME = '__metadata__' def _FromStorageEntity(entity): clone = entity_pb2.EntityProto() clone.CopyFrom(entity) metadata = entity_pb2.EntityMetadata() for i in range(len(clone.property) - 1, -1, -1): prop = clone.property[i] if _METADATA_PROPERTY_NAME == prop.name: del clone.property[i] metadata = entity_pb2.EntityMetadata.FromString(prop.value.stringValue) return EntityRecord(clone, metadata) def _ToStorageEntity(record): if record: clone = entity_pb2.EntityProto() clone.CopyFrom(record.entity) serialized_metadata = record.metadata.SerializeToString() metadata_property = clone.property.add() metadata_property.name = _METADATA_PROPERTY_NAME metadata_property.meaning = entity_pb2.Property.BLOB metadata_property.multiple = False metadata_property.value.stringValue = serialized_metadata return clone def _GetGroupByKey(entity, property_names): return frozenset((prop.name, prop.value.SerializeToString()) for prop in entity.property if prop.name in property_names) def PrepareSpecialPropertiesForStore(entity_proto): _PrepareSpecialProperties(entity_proto, False) def LoadEntity(entity, keys_only=False, property_names=None): if entity: clone = entity_pb2.EntityProto() if property_names: clone.key.CopyFrom(entity.key) clone.entity_group.SetInParent() seen = set() for prop in entity.property: if prop.name in property_names: Check(prop.name not in seen, 'datastore dev stub produced bad result', datastore_pb.Error.INTERNAL_ERROR) seen.add(prop.name) new_prop = clone.property.add() new_prop.name = prop.name new_prop.meaning = entity_pb2.Property.INDEX_VALUE new_prop.value.CopyFrom(prop.value) new_prop.multiple = False elif keys_only: clone.key.CopyFrom(entity.key) clone.entity_group.SetInParent() else: clone.CopyFrom(entity) PrepareSpecialPropertiesForLoad(clone) return clone def LoadRecord(record, keys_only=False, property_names=None): if record: metadata = record.metadata if keys_only or property_names: metadata = entity_pb2.EntityMetadata() return EntityRecord(LoadEntity(record.entity, keys_only, property_names), metadata) def StoreRecord(record): clone = entity_pb2.EntityProto() clone.CopyFrom(record.entity) PrepareSpecialPropertiesForStore(clone) return EntityRecord(clone, record.metadata) def PrepareSpecialPropertiesForLoad(entity_proto): _PrepareSpecialProperties(entity_proto, True) def Check(test, msg='', error_code=datastore_pb.Error.BAD_REQUEST): if not test: raise apiproxy_errors.ApplicationError(error_code, msg) def CheckValidUTF8(string, desc): if isinstance(string, six.text_type): return True try: string.decode('utf-8') except UnicodeDecodeError: Check(False, '%s is not valid UTF-8.' % desc) def CheckAppId(request_trusted, request_app_id, app_id): assert app_id CheckValidUTF8(app_id, 'app id') Check(request_trusted or app_id == request_app_id, 'app "%s" cannot access app "%s"\'s data' % (request_app_id, app_id)) def CheckReference(request_trusted, request_app_id, key, require_id_or_name=True): assert isinstance(key, entity_pb2.Reference) CheckAppId(request_trusted, request_app_id, key.app) Check(key.path.element, 'key\'s path cannot be empty') if require_id_or_name: Check(datastore_pbs.is_complete_v3_key(key), 'missing key id/name') for elem in key.path.element: Check(not elem.HasField('id') or not elem.HasField('name'), 'each key path element should have id or name but not both: %r' % key) CheckValidUTF8(elem.type, 'key path element type') if elem.HasField('name'): CheckValidUTF8(elem.name, 'key path element name') def CheckEntity(request_trusted, request_app_id, entity): CheckReference(request_trusted, request_app_id, entity.key, False) for prop in entity.property: CheckProperty(request_trusted, request_app_id, prop) for prop in entity.raw_property: CheckProperty(request_trusted, request_app_id, prop, indexed=False) def CheckProperty(request_trusted, request_app_id, prop, indexed=True): name = prop.name value = prop.value meaning = prop.meaning CheckValidUTF8(name, 'property name') Check(request_trusted or not datastore_types.RESERVED_PROPERTY_NAME.match(name), 'cannot store entity with reserved property name \'%s\'' % name) Check(prop.meaning != entity_pb2.Property.INDEX_VALUE, 'Entities with incomplete properties cannot be written.') is_blob = meaning in _BLOB_MEANINGS if indexed: Check(not is_blob, 'BLOB, ENITY_PROTO or TEXT property ' + name + ' must be in a raw_property field') max_length = datastore_types._MAX_STRING_LENGTH else: if is_blob: Check( value.HasField('stringValue'), 'BLOB / ENTITY_PROTO / TEXT raw property ' + name + 'must have a string value') max_length = datastore_types._MAX_RAW_PROPERTY_BYTES if meaning == entity_pb2.Property.ATOM_LINK: max_length = datastore_types._MAX_LINK_PROPERTY_LENGTH CheckPropertyValue(name, value, max_length, meaning) def CheckPropertyValue(name, value, max_length, meaning): num_values = ( value.HasField('int64Value') + value.HasField('stringValue') + value.HasField('booleanValue') + value.HasField('doubleValue') + value.HasField('pointvalue') + value.HasField('uservalue') + value.HasField('referencevalue')) Check(num_values <= 1, 'PropertyValue for ' + name + ' has multiple value fields set') if value.HasField('stringValue'): s = value.stringValue if isinstance(s, six.text_type): s = s.encode('utf-8') Check(len(s) <= max_length, 'Property %s is too long. Maximum length is %d.' % (name, max_length)) if (meaning not in _BLOB_MEANINGS and meaning != entity_pb2.Property.BYTESTRING): CheckValidUTF8(value.stringValue, 'String property "%s" value' % name) def CheckTransaction(request_trusted, request_app_id, transaction): assert isinstance(transaction, datastore_pb.Transaction) CheckAppId(request_trusted, request_app_id, transaction.app) def CheckQuery(query, filters, orders, max_query_components): Check(not query.property_name or not query.keys_only, 'projection and keys_only cannot both be set') projected_properties = set(query.property_name) for prop_name in query.property_name: Check(not datastore_types.RESERVED_PROPERTY_NAME.match(prop_name), 'projections are not supported for the property: ' + prop_name) Check( len(projected_properties) == len(query.property_name), 'cannot project a property multiple times') key_prop_name = datastore_types.KEY_SPECIAL_PROPERTY unapplied_log_timestamp_us_name = ( datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY) if query.HasField('transaction'): Check( query.HasField('ancestor'), 'Only ancestor queries are allowed inside transactions.') num_components = len(filters) + len(orders) if query.HasField('ancestor'): num_components += 1 Check(num_components <= max_query_components, 'query is too large. may not have more than %s filters' ' + sort orders ancestor total' % max_query_components) if query.HasField('ancestor'): ancestor = query.ancestor Check(query.app == ancestor.app, 'query app is %s but ancestor app is %s' % (query.app, ancestor.app)) Check( query.name_space == ancestor.name_space, 'query namespace is %s but ancestor namespace is %s' % (query.name_space, ancestor.name_space)) if query.group_by_property_name: group_by_set = set(query.group_by_property_name) for order in orders: if not group_by_set: break Check( order.property in group_by_set, 'items in the group by clause must be specified first ' 'in the ordering') group_by_set.remove(order.property) ineq_prop_name = None for filter in filters: Check( len(filter.property) == 1, 'Filter has %d properties, expected 1' % len(filter.property)) prop = filter.property[0] prop_name = prop.name if prop_name == key_prop_name: Check( prop.value.HasField('referencevalue'), '%s filter value must be a Key' % key_prop_name) ref_val = prop.value.referencevalue Check( ref_val.app == query.app, '%s filter app is %s but query app is %s' % (key_prop_name, ref_val.app, query.app)) Check( ref_val.name_space == query.name_space, '%s filter namespace is %s but query namespace is %s' % (key_prop_name, ref_val.name_space, query.name_space)) if filter.op in datastore_index.EQUALITY_OPERATORS: Check(prop_name not in projected_properties, 'cannot use projection on a property with an equality filter') if (filter.op in datastore_index.INEQUALITY_OPERATORS and prop_name != unapplied_log_timestamp_us_name): if ineq_prop_name is None: ineq_prop_name = prop_name else: Check(ineq_prop_name == prop_name, 'Only one inequality filter per query is supported. ' 'Encountered both %s and %s' % (ineq_prop_name, prop_name)) if ineq_prop_name is not None and query.group_by_property_name and not orders: Check(ineq_prop_name in group_by_set, 'Inequality filter on %s must also be a group by ' 'property when group by properties are set.' % (ineq_prop_name)) if ineq_prop_name is not None and orders: first_order_prop = _Decode(orders[0].property) Check(first_order_prop == ineq_prop_name, 'The first sort property must be the same as the property ' 'to which the inequality filter is applied. In your query ' 'the first sort property is %s but the inequality filter ' 'is on %s' % (first_order_prop, ineq_prop_name)) if not query.HasField('kind'): for filter in filters: prop_name = _Decode(filter.property[0].name) Check(prop_name == key_prop_name or prop_name == unapplied_log_timestamp_us_name, 'kind is required for non-__key__ filters') for order in orders: prop_name = _Decode(order.property) Check( prop_name == key_prop_name and order.direction is datastore_pb.Query.Order.ASCENDING, 'kind is required for all orders except __key__ ascending') def _Decode(string): if isinstance(string, six.text_type): return string return string.decode('utf-8') class ValueRange(object): def __init__(self): self.__start = self.__end = None self.__start_inclusive = self.__end_inclusive = False def Update(self, rel_op, limit): if rel_op == datastore_pb.Query.Filter.LESS_THAN: if self.__end is None or limit <= self.__end: self.__end = limit self.__end_inclusive = False elif (rel_op == datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL or rel_op == datastore_pb.Query.Filter.EQUAL): if self.__end is None or limit < self.__end: self.__end = limit self.__end_inclusive = True if rel_op == datastore_pb.Query.Filter.GREATER_THAN: if self.__start is None or limit >= self.__start: self.__start = limit self.__start_inclusive = False elif (rel_op == datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL or rel_op == datastore_pb.Query.Filter.EQUAL): if self.__start is None or limit > self.__start: self.__start = limit self.__start_inclusive = True def Contains(self, value): if self.__start is not None: if self.__start_inclusive and value < self.__start: return False if not self.__start_inclusive and value <= self.__start: return False if self.__end is not None: if self.__end_inclusive and value > self.__end: return False if not self.__end_inclusive and value >= self.__end: return False return True def Remap(self, mapper): self.__start = self.__start and mapper(self.__start) self.__end = self.__end and mapper(self.__end) def MapExtremes(self, mapper): return ( self.__start and mapper(self.__start, self.__start_inclusive, False), self.__end and mapper(self.__end, self.__end_inclusive, True)) def ParseKeyFilteredQuery(filters, orders): remaining_filters = [] key_range = ValueRange() key_prop = datastore_types.KEY_SPECIAL_PROPERTY for f in filters: op = f.op if not (len(f.property) == 1 and f.property[0].name == key_prop and not (op == datastore_pb.Query.Filter.IN or op == datastore_pb.Query.Filter.EXISTS)): remaining_filters.append(f) continue val = f.property[0].value Check(val.HasField('referencevalue'), '__key__ kind must be compared to a key') limit = datastore_types.FromReferenceProperty(val) key_range.Update(op, limit) remaining_orders = [] for o in orders: if not (o.direction == datastore_pb.Query.Order.ASCENDING and o.property == datastore_types.KEY_SPECIAL_PROPERTY): remaining_orders.append(o) else: break Check(not remaining_filters, 'Only comparison filters on ' + key_prop + ' supported') Check(not remaining_orders, 'Only ascending order on ' + key_prop + ' supported') return key_range
Apache License 2.0
philgyford/django-ditto
ditto/twitter/migrations/0040_auto_20160603_1302.py
make_many_tweets
python
def make_many_tweets(apps, schema_editor): Media = apps.get_model("twitter", "Media") for media in Media.objects.all(): media.tweets.add(media.tweet)
Adds the Tweet object in Media.tweet to the many-to-many relationship in Media.tweets
https://github.com/philgyford/django-ditto/blob/bc8b68a839a8f82a0484f3006137a9ca718aa69e/ditto/twitter/migrations/0040_auto_20160603_1302.py#L13-L21
from __future__ import unicode_literals from django.db import migrations
MIT License
catap/namebench
nb_third_party/graphy/line_chart.py
LineStyle.__init__
python
def __init__(self, width, on, off, color=None): self.width = width self.on = on self.off = off self.color = color
Construct a LineStyle. See class docstring for details on args.
https://github.com/catap/namebench/blob/9913a7a1a7955a3759eb18cbe73b421441a7a00f/nb_third_party/graphy/line_chart.py#L56-L61
import copy import warnings from graphy import common class LineStyle(object): THIN = 1 THICK = 2 SOLID = (1, 0) DASHED = (8, 4) DOTTED = (2, 4)
Apache License 2.0
jiaweisheng/faan
matcher.py
EntityEncoder.forward
python
def forward(self, entity, entity_meta=None): if entity_meta is not None: entity = self.symbol_emb(entity) entity_left_connections, entity_left_degrees, entity_right_connections, entity_right_degrees = entity_meta entity_left, entity_right = torch.split(entity, 1, dim=1) entity_left = entity_left.squeeze(1) entity_right = entity_right.squeeze(1) entity_left, entity_right = self.neighbor_encoder_soft_select(entity_left_connections, entity_right_connections, entity_left, entity_right) else: entity = self.symbol_emb(entity) entity_left, entity_right = torch.split(entity, 1, dim=1) entity_left = entity_left.squeeze(1) entity_right = entity_right.squeeze(1) return entity_left, entity_right
query: (batch_size, 2) entity: (few, 2) return: (batch_size, )
https://github.com/jiaweisheng/faan/blob/b439b829506c4e2e9044a6b2ab7f3d844f445a95/matcher.py#L78-L100
import logging from modules import * class EntityEncoder(nn.Module): def __init__(self, embed_dim, num_symbols, use_pretrain=True, embed=None, dropout_input=0.3, finetune=False, dropout_neighbors=0.0, device=torch.device("cpu")): super(EntityEncoder, self).__init__() self.embed_dim = embed_dim self.pad_idx = num_symbols self.symbol_emb = nn.Embedding(num_symbols + 1, embed_dim, padding_idx=self.pad_idx) self.num_symbols = num_symbols self.gcn_w = nn.Linear(2 * self.embed_dim, self.embed_dim) self.gcn_b = nn.Parameter(torch.FloatTensor(self.embed_dim)) self.dropout = nn.Dropout(dropout_input) init.xavier_normal_(self.gcn_w.weight) init.constant_(self.gcn_b, 0) self.pad_tensor = torch.tensor([self.pad_idx], requires_grad=False).to(device) if use_pretrain: logging.info('LOADING KB EMBEDDINGS') self.symbol_emb.weight.data.copy_(torch.from_numpy(embed)) if not finetune: logging.info('FIX KB EMBEDDING') self.symbol_emb.weight.requires_grad = False self.NeighborAggregator = AttentionSelectContext(dim=embed_dim, dropout=dropout_neighbors) def neighbor_encoder_mean(self, connections, num_neighbors): num_neighbors = num_neighbors.unsqueeze(1) relations = connections[:, :, 0].squeeze(-1) entities = connections[:, :, 1].squeeze(-1) rel_embeds = self.dropout(self.symbol_emb(relations)) ent_embeds = self.dropout(self.symbol_emb(entities)) concat_embeds = torch.cat((rel_embeds, ent_embeds), dim=-1) out = self.gcn_w(concat_embeds) out = torch.sum(out, dim=1) out = out / num_neighbors return out.tanh() def neighbor_encoder_soft_select(self, connections_left, connections_right, head_left, head_right): relations_left = connections_left[:, :, 0].squeeze(-1) entities_left = connections_left[:, :, 1].squeeze(-1) rel_embeds_left = self.dropout(self.symbol_emb(relations_left)) ent_embeds_left = self.dropout(self.symbol_emb(entities_left)) pad_matrix_left = self.pad_tensor.expand_as(relations_left) mask_matrix_left = torch.eq(relations_left, pad_matrix_left).squeeze(-1) relations_right = connections_right[:, :, 0].squeeze(-1) entities_right = connections_right[:, :, 1].squeeze(-1) rel_embeds_right = self.dropout(self.symbol_emb(relations_right)) ent_embeds_right = self.dropout(self.symbol_emb(entities_right)) pad_matrix_right = self.pad_tensor.expand_as(relations_right) mask_matrix_right = torch.eq(relations_right, pad_matrix_right).squeeze(-1) left = [head_left, rel_embeds_left, ent_embeds_left] right = [head_right, rel_embeds_right, ent_embeds_right] output = self.NeighborAggregator(left, right, mask_matrix_left, mask_matrix_right) return output
MIT License
netflix/pygenie
pygenie/jobs/core.py
GenieJob.genie_setup_file
python
def genie_setup_file(self, setup_file): assert setup_file is not None and is_file(setup_file), "setup file '{}' does not exist".format(setup_file) self._setup_file = setup_file return self
Sets a Bash file to source before the job is executed. Genie will source the Bash file before executing the job. This can be used to set environment variables before the job is run, etc. This should not be used to set job configuration properties via a file (job's should have separate interfaces for setting property files). The file must be stored externally, or available on the Genie nodes. Example: >>> job = GenieJob() \\ ... .genie_setup_file('/Users/jdoe/my_setup_file.sh') Args: setup_file (str): The local path to the Bash file to use for setup. Returns: :py:class:`GenieJob`: self
https://github.com/netflix/pygenie/blob/96058a7586e001478048d84d5e8c7a6415c66f81/pygenie/jobs/core.py#L638-L665
from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import re from collections import defaultdict, OrderedDict from six import text_type from ..conf import GenieConf from ..utils import (convert_to_unicode, is_str, normalize_list, str_to_list, unicodify, uuid_str) from .utils import (add_to_repr, arg_list, arg_string, generate_job_id, is_file, reattach_job) from ..exceptions import (GenieJobError, GenieJobNotFoundError) logger = logging.getLogger('com.netflix.genie.jobs.core') class Repr(object): def __init__(self, class_name=None): self.__class_name = class_name[:-2] if class_name.endswith('()') else class_name self.__repr_list = list() def __repr__(self): return self.__unicode__() def __str__(self): return self.__unicode__() def __unicode__(self): return '.'.join(self.repr_list) @staticmethod def __quote(val): if '\n' in val or ('"' in val and "'" in val): return '"""' elif '"' in val: return "'" return '"' def append(self, func_name=None, args=None, kwargs=None): args_str = self.args_to_str(args) kwargs_str = self.kwargs_to_str(kwargs) call_str = '{func}({args}{comma}{kwargs})' .format(func=func_name, args=args_str if args_str else '', comma=', ' if kwargs_str and args_str else '', kwargs=kwargs_str if kwargs_str else '') self.remove(call_str) self.__repr_list.append(call_str) def args_to_str(self, args): if args is not None: results = list() redact_hint=None if len(args) > 0 and len(args) % 2 is 0 and is_str(args[0]): redact_hint = args[0] for i, arg in enumerate([convert_to_unicode(a) for a in args]): value = arg if isinstance(arg, list): value = normalize_list(arg) if i > 0 and is_str(value): value = convert_to_unicode(value, redact_hint) results.append('{qu}{val}{qu}'.format( val=value, qu=self.__quote(value) if is_str(arg) else '' )) return ', '.join(results) return '' def kwargs_to_str(self, kwargs): return ', '.join([ '{key}={qu}{val}{qu}'.format( key=key, val=convert_to_unicode(val, key) if is_str(val) else val, qu=self.__quote(val) if is_str(val) else '' ) for key, val in kwargs.items() ]) if kwargs is not None else '' def pop(self): self.__repr_list.pop() def remove(self, regex_filter, flags=0): assert regex_filter is not None, 'must specify a regular expression filter' regex_filter = re.escape(regex_filter) self.__repr_list = [i for i in self.__repr_list if not re.search(regex_filter, i, flags=flags) and regex_filter != i] return self @property def repr_list(self): return ['{}()'.format(self.__class_name)] + sorted(self.__repr_list) class GenieJob(object): DEFAULT_CLUSTER_TAG = 99999 def __init__(self, conf=None): assert conf is None or isinstance(conf, GenieConf), "invalid conf '{}', should be None or GenieConf".format(conf) cls = self.__class__.__name__ job_type = cls.rsplit('Job', 1)[0].lower() if cls.endswith('Job') else cls.lower() self._conf = conf or GenieConf() self.default_command_tags = str_to_list( self._conf.get('{}.default_command_tags'.format(cls), ['type:{}'.format(job_type)]) ) self.default_cluster_tags = str_to_list( self._conf.get('{}.default_cluster_tags'.format(cls), ['type:{}'.format(job_type)]) ) self.repr_obj = Repr(self.__class__.__name__) self._application_ids = list() self._archive = True self._cluster_tag_mapping = defaultdict(list) self._command_arguments = None self._command_options = defaultdict(OrderedDict) self._command_tags = list() self._configs = list() self._dependencies = list() self._description = None self._email = None self._genie_cpu = None self._genie_grouping = None self._genie_grouping_instance = None self._genie_memory = None self._group = None self._job_id = uuid_str() self._job_name = None self._job_version = 'NA' self._metadata = None self._parameters = OrderedDict() self._post_cmd_args = list() self._setup_file = None self._tags = list() self._timeout = None self._username = self._conf.get('genie.username') self.repr_obj.append('job_id', (self._job_id,)) self.repr_obj.append('genie_username', (self._username,)) self._cluster_tag_mapping[GenieJob.DEFAULT_CLUSTER_TAG] = self.default_cluster_tags def __repr__(self): return self.__unicode__() def __str__(self): return self.__unicode__() def __unicode__(self): return text_type(self.repr_obj) def _add_dependency(self, dep): if dep not in self._dependencies: self._dependencies.append(dep) def _add_config(self, config): if config not in self._configs: self._configs.append(config) @unicodify def _add_cluster_tag(self, tags, priority=1): assert priority >= 0, "cluster tag priorty must be >= 1" assert isinstance(tags, list), 'tags should be a list' self._cluster_tag_mapping[priority].extend(tags) return self @unicodify def _set_command_option(self, flag, name, value=None): self._command_options[flag][name] = value @unicodify @arg_list @add_to_repr('append') def applications(self, _application_ids): @add_to_repr('overwrite') def archive(self, archive): assert isinstance(archive, bool), "archive must be a boolean" self._archive = archive return self @unicodify @add_to_repr('append') def cluster_tags(self, cluster_tags): self._add_cluster_tag(str_to_list(cluster_tags)) return self @property def cmd_args(self): if self._command_arguments is not None: return self._command_arguments raise GenieJobError('should not try to access core GenieJob ' 'constructed command arguments') @unicodify @arg_string @add_to_repr('overwrite') def command_arguments(self, _command_arguments): @unicodify @arg_list @add_to_repr('append') def command_tags(self, _command_tags): @arg_list @add_to_repr('append') def configs(self, _configs): @arg_list @add_to_repr('append') def dependencies(self, _dependencies): @unicodify @arg_string @add_to_repr('overwrite') def description(self, _description): def disable_archive(self): return self.archive(False) def execute(self, retry=False, force=False, catch_signal=False, **kwargs): if catch_signal: import signal import sys def sig_handler(signum, frame): logger.warning("caught signal %s", signum) try: if running_job.job_id: logger.warning("killing job id %s", running_job.job_id) response = running_job.kill() response.raise_for_status() except Exception: pass finally: sys.exit(1) signal.signal(signal.SIGINT, sig_handler) signal.signal(signal.SIGTERM, sig_handler) signal.signal(signal.SIGABRT, sig_handler) if retry or force: uid = self._job_id try: uid = generate_job_id(uid, return_success=not force, conf=self._conf) running_job = reattach_job(uid, conf=self._conf) return running_job except GenieJobNotFoundError: self.job_id(uid) global execute_job running_job = execute_job(self, **kwargs) return running_job @add_to_repr('overwrite') def genie_cpu(self, cpu): assert int(cpu) > 0, 'number of CPUs cannot be less than 1' self._genie_cpu = int(cpu) return self @unicodify @arg_string @add_to_repr('overwrite') def genie_email(self, _email): def email(self, email): logger.warning("Use .genie_email('%s') to set Genie email.", email) return self.genie_email(email) @unicodify @arg_string @add_to_repr('overwrite') def genie_grouping(self, _genie_grouping): @unicodify @arg_string @add_to_repr('overwrite') def genie_grouping_instance(self, _genie_grouping_instance): @add_to_repr('overwrite') def genie_memory(self, memory): assert int(memory) > 0, 'memory amount (MB) cannot be less than 1' self._genie_memory = int(memory) return self @unicodify @add_to_repr('overwrite')
Apache License 2.0
demisto/demisto-py
demisto_client/demisto_api/models/run_status.py
RunStatus.__init__
python
def __init__(self): self.discriminator = None
RunStatus - a model defined in Swagger
https://github.com/demisto/demisto-py/blob/95d29e07693d27c133f7fe6ef9da13e4b6dbf542/demisto_client/demisto_api/models/run_status.py#L39-L41
import pprint import re import six class RunStatus(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { }
Apache License 2.0
sfdo-tooling/snowfakery
tools/faker_docs_utils/summarize_fakers.py
summarize_all_fakers
python
def summarize_all_fakers(faker) -> T.Sequence[FakerInfo]: from snowfakery.utils.collections import CaseInsensitiveDict with (Path(__file__).parent / "docs_config.yml").open() as f: yaml_data = yaml.safe_load(f) common_fakes = yaml_data["common_fakes"] uncommon_fakes = yaml_data["uncommon_fakes"] faker_infos = CaseInsensitiveDict() for name, meth in faker.fake_names.items(): if not isinstance(meth, types.MethodType): continue friendly = _to_camel_case(name) func = meth.__func__ doc = func.__doc__ filename = func.__code__.co_filename cls = meth.__self__.__class__ fullname = cls.__module__ + "." + cls.__name__ + "." + meth.__name__ overrides = common_fakes.get(meth.__name__) or uncommon_fakes.get(meth.__name__) is_common = meth.__name__ in common_fakes if "/faker/" in filename: source = "faker" idx = filename.find("/faker/") url = "https://github.com/joke2k/faker/tree/master" + filename[idx:] parts = filename.split("/") while parts[-1] in ("__init__.py", "en_US"): del parts[-1] category = parts[-1] else: source = "snowfakery" idx = filename.find("/snowfakery/") url = ( "https://github.com/SFDO-Tooling/Snowfakery/tree/main" + filename[idx:] ) category = "Salesforce" faker_info = faker_infos.setdefault( friendly, FakerInfo( friendly, fullname, [], url, source, category, doc or "", is_common, overrides.get("example") if overrides else None, ), ) faker_info.aliases.append(name) return faker_infos.values()
Summarize information about all fakers
https://github.com/sfdo-tooling/snowfakery/blob/98d04862f2e7638486cf38eb9e9fdb12b38e391e/tools/faker_docs_utils/summarize_fakers.py#L20-L77
from pathlib import Path import types import typing as T import yaml class FakerInfo(T.NamedTuple): name: str fullname: str aliases: T.List[str] url: str source: str category: str doc: str common: bool sample: str
BSD 3-Clause New or Revised License
seagate/cortx-hare
hax/hax/util.py
KVAdapter.kv_put
python
def kv_put(self, key: str, data: str, kv_cache=None, **kwargs) -> bool: assert key try: return self.cns.kv.put(key, data, **kwargs) except (ConsulException, HTTPError, RequestException) as e: raise HAConsistencyException('Failed to put value to KV') from e
Helper method that should be used by default in this class whenver we want to invoke Consul.kv.put()
https://github.com/seagate/cortx-hare/blob/8b2592500f770d665b5b7d0497679c80ce3be574/hax/hax/util.py#L194-L203
import json import logging import os import re from base64 import b64encode from functools import wraps from typing import Any, Dict, List, NamedTuple, Optional, Tuple from hax.log import TRACE from threading import Event from time import sleep import simplejson from consul import Consul, ConsulException from consul.base import ClientError from requests.exceptions import RequestException from urllib3.exceptions import HTTPError from hax.exception import HAConsistencyException, InterruptedException from hax.types import (ConfHaProcess, Fid, FsStatsWithTime, ObjT, ServiceHealth, Profile, m0HaProcessEvent, m0HaProcessType, KeyDelete, HaNoteStruct, m0HaObjState) from hax.consul.cache import (uses_consul_cache, invalidates_consul_cache, supports_consul_cache) __all__ = ['ConsulUtil', 'create_process_fid', 'create_service_fid', 'create_sdev_fid', 'create_drive_fid'] LOG = logging.getLogger('hax') ServiceData = NamedTuple('ServiceData', [('node', str), ('fid', Fid), ('ip_addr', str), ('address', str)]) FidWithType = NamedTuple('FidWithType', [('fid', Fid), ('service_type', str)]) MotrConsulProcInfo = NamedTuple('MotrConsulProcInfo', [('proc_status', str), ('proc_type', str)]) MotrConsulProcStatus = NamedTuple('MotrConsulProcStatus', [( 'consul_svc_status', str), ('consul_motr_proc_status', str)]) MotrProcStatusLocalRemote = NamedTuple('MotrProcStatusLocalRemote', [( 'motr_proc_status_local', ServiceHealth), ('motr_proc_status_remote', ServiceHealth)]) def mkServiceData(service: Dict[str, Any]) -> ServiceData: return ServiceData( node=service['Node'], fid=mk_fid( ObjT.PROCESS, int(service['ServiceID'])), ip_addr=service['Address'], address='{}:{}'.format(service['ServiceAddress'], service['ServicePort'])) def mk_fid(obj_t: ObjT, key: int) -> Fid: return Fid(obj_t.value, key) def create_process_fid(key: int) -> Fid: return mk_fid(ObjT.PROCESS, key) def create_service_fid(key: int) -> Fid: return mk_fid(ObjT.SERVICE, key) def create_sdev_fid(key: int) -> Fid: return mk_fid(ObjT.SDEV, key) def create_drive_fid(key: int) -> Fid: return mk_fid(ObjT.DRIVE, key) def create_profile_fid(key: int) -> Fid: return mk_fid(ObjT.PROFILE, key) ha_process_events = ('M0_CONF_HA_PROCESS_STARTING', 'M0_CONF_HA_PROCESS_STARTED', 'M0_CONF_HA_PROCESS_STOPPING', 'M0_CONF_HA_PROCESS_STOPPED') ha_conf_obj_states = ('M0_NC_UNKNOWN', 'M0_NC_ONLINE', 'M0_NC_FAILED', 'M0_NC_TRANSIENT', 'M0_NC_REPAIR', 'M0_NC_REPAIRED', 'M0_NC_REBALANCE') def repeat_if_fails(wait_seconds=5, max_retries=-1): def callable(f): @wraps(f) def wrapper(*args, **kwds): attempt_count = 0 while (True): try: return f(*args, **kwds) except HAConsistencyException as e: attempt_count += 1 if max_retries >= 0 and attempt_count > max_retries: LOG.warn( 'Function %s: Too many errors happened in a row ' '(max_retries = %d)', f.__name__, max_retries) raise e LOG.warn(f'Got HAConsistencyException: {e.message} while ' f'invoking function {f.__name__} ' f'(attempt {attempt_count}). The attempt will be ' f'repeated in {wait_seconds} seconds') sleep(wait_seconds) return wrapper return callable TxPutKV = NamedTuple('TxPutKV', [('key', str), ('value', str), ('cas', Optional[Any])]) def wait_for_event(event: Event, interval_sec) -> None: interrupted = event.wait(timeout=interval_sec) if interrupted: raise InterruptedException() class KVAdapter: def __init__(self, cns: Optional[Consul] = None): self.cns = cns or Consul() def kv_get_raw(self, key: str, **kwargs) -> Tuple[int, Any]: assert key try: return self.cns.kv.get(key, **kwargs) except (ConsulException, HTTPError, RequestException) as e: raise HAConsistencyException('Could not access Consul KV') from e @uses_consul_cache def kv_get(self, key: str, kv_cache=None, **kwargs) -> Any: LOG.debug('KVGET key=%s, kwargs=%s', key, kwargs) return self.kv_get_raw(key, **kwargs)[1] @invalidates_consul_cache
Apache License 2.0
docusign/docusign-python-client
docusign_esign/models/recipient_identity_verification.py
RecipientIdentityVerification.workflow_id_metadata
python
def workflow_id_metadata(self): return self._workflow_id_metadata
Gets the workflow_id_metadata of this RecipientIdentityVerification. # noqa: E501 :return: The workflow_id_metadata of this RecipientIdentityVerification. # noqa: E501 :rtype: PropertyMetadata
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/recipient_identity_verification.py#L109-L116
import pprint import re import six from docusign_esign.client.configuration import Configuration class RecipientIdentityVerification(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'input_options': 'list[RecipientIdentityInputOption]', 'workflow_id': 'str', 'workflow_id_metadata': 'PropertyMetadata' } attribute_map = { 'input_options': 'inputOptions', 'workflow_id': 'workflowId', 'workflow_id_metadata': 'workflowIdMetadata' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._input_options = None self._workflow_id = None self._workflow_id_metadata = None self.discriminator = None setattr(self, "_{}".format('input_options'), kwargs.get('input_options', None)) setattr(self, "_{}".format('workflow_id'), kwargs.get('workflow_id', None)) setattr(self, "_{}".format('workflow_id_metadata'), kwargs.get('workflow_id_metadata', None)) @property def input_options(self): return self._input_options @input_options.setter def input_options(self, input_options): self._input_options = input_options @property def workflow_id(self): return self._workflow_id @workflow_id.setter def workflow_id(self, workflow_id): self._workflow_id = workflow_id @property
MIT License
diana-hep/madminer
madminer/lhe/lhe_reader.py
LHEReader.reset_efficiencies
python
def reset_efficiencies(self): logger.debug("Resetting efficiencies") self.efficiencies = []
Resets all efficiencies.
https://github.com/diana-hep/madminer/blob/ce741d7558dee56ae6b3258f55f4032149388be7/madminer/lhe/lhe_reader.py#L548-L552
import logging import numpy as np from collections import OrderedDict from madminer.models import Cut from madminer.models import Efficiency from madminer.models import Observable from madminer.models import NuisanceParameter from madminer.utils.interfaces.hdf5 import load_madminer_settings from madminer.utils.interfaces.hdf5 import save_events from madminer.utils.interfaces.hdf5 import save_nuisance_setup from madminer.utils.interfaces.lhe import ( parse_lhe_file, extract_nuisance_parameters_from_lhe_file, get_elementary_pdg_ids, ) from madminer.sampling import combine_and_shuffle logger = logging.getLogger(__name__) class LHEReader: def __init__(self, filename): self.lhe_sample_filenames = [] self.sample_k_factors = [] self.sample_is_backgrounds = [] self.sampling_benchmarks = [] self.sample_systematics = [] self.observables = OrderedDict() self.cuts = [] self.efficiencies = [] self.energy_resolution = {} self.pt_resolution = {} self.eta_resolution = {} self.phi_resolution = {} for pdgid in get_elementary_pdg_ids(): self.energy_resolution[pdgid] = (0.0, 0.0) self.pt_resolution[pdgid] = (0.0, 0.0) self.eta_resolution[pdgid] = (0.0, 0.0) self.phi_resolution[pdgid] = (0.0, 0.0) self.pt_resolution["met"] = (0.0, 0.0) self.reference_benchmark = None self.observations = None self.weights = None self.events_sampling_benchmark_ids = [] self.signal_events_per_benchmark = None self.background_events = None self.filename = filename ( _, benchmarks, _, _, _, _, _, self.systematics, _, _, _, _, _, _, ) = load_madminer_settings(filename, include_nuisance_benchmarks=False) self.benchmark_names_phys = list(benchmarks.keys()) self.n_benchmarks_phys = len(benchmarks) self.nuisance_parameters = OrderedDict() @staticmethod def _check_sample_elements(this_elements, n_events=None): for key, elems in this_elements.items(): this_n_events = len(elems) if n_events is None: n_events = this_n_events logger.debug(f"Found {n_events} events") if this_n_events != n_events: raise RuntimeError( f"Mismatching number of events for {key}: "f"{n_events} vs {this_n_events}" ) if not np.issubdtype(elems.dtype, np.number): logger.warning(f"For key {key} have non-numeric dtype {elems.dtype}.") return n_events def add_sample( self, lhe_filename, sampled_from_benchmark, is_background=False, k_factor=1.0, systematics=None, ): logger.debug("Adding event sample %s", lhe_filename) self.sampling_benchmarks.append(sampled_from_benchmark) self.sample_is_backgrounds.append(is_background) self.sample_k_factors.append(k_factor) self.lhe_sample_filenames.append(lhe_filename) self.sample_systematics.append(systematics) def set_smearing( self, pdgids=None, energy_resolution_abs=0.0, energy_resolution_rel=0.0, pt_resolution_abs=0.0, pt_resolution_rel=0.0, eta_resolution_abs=0.0, eta_resolution_rel=0.0, phi_resolution_abs=0.0, phi_resolution_rel=0.0, ): if pdgids is None: pdgids = get_elementary_pdg_ids() for pdgid in pdgids: self.energy_resolution[pdgid] = (energy_resolution_abs, energy_resolution_rel) self.pt_resolution[pdgid] = (pt_resolution_abs, pt_resolution_rel) self.eta_resolution[pdgid] = (eta_resolution_abs, eta_resolution_rel) self.phi_resolution[pdgid] = (phi_resolution_abs, phi_resolution_rel) def set_met_noise(self, abs_=0.0, rel=0.0): self.pt_resolution["met"] = (abs_, rel) def add_observable(self, name, definition, required=False, default=None): if required: logger.debug("Adding required observable %s = %s", name, definition) else: logger.debug("Adding optional observable %s = %s with default %s", name, definition, default) self.observables[name] = Observable( name=name, val_expression=definition, val_default=default, is_required=required, ) def add_observable_from_function(self, name, fn, required=False, default=None): if required: logger.debug("Adding required observable %s defined through external function", name) else: logger.debug( "Adding optional observable %s defined through external function with default %s", name, default ) self.observables[name] = Observable( name=name, val_expression=fn, val_default=default, is_required=required, ) def add_default_observables( self, n_leptons_max=2, n_photons_max=2, n_jets_max=2, include_met=True, include_visible_sum=True, include_numbers=True, include_charge=True, ): logger.debug("Adding default observables") if include_met: self.add_observable("et_miss", "met.pt", required=True) self.add_observable("phi_miss", "met.phi", required=True) if include_visible_sum: self.add_observable("e_visible", "visible.e", required=True) self.add_observable("eta_visible", "visible.eta", required=True) for n, symbol, include_this_charge in zip( [n_leptons_max, n_photons_max, n_jets_max], ["l", "a", "j"], [False, False, include_charge] ): if include_numbers: self.add_observable(f"n_{symbol}s", f"len({symbol})", required=True) for i in range(n): self.add_observable( f"e_{symbol}{i+1}", f"{symbol}[{i}].e", required=False, default=0.0 ) self.add_observable( f"pt_{symbol}{i+1}", f"{symbol}[{i}].pt", required=False, default=0.0 ) self.add_observable( f"eta_{symbol}{i+1}", f"{symbol}[{i}].eta", required=False, default=0.0 ) self.add_observable( f"phi_{symbol}{i+1}", f"{symbol}[{i}].phi", required=False, default=0.0 ) if include_this_charge and symbol == "l": self.add_observable( f"charge_{symbol}{i+1}", f"{symbol}[{i}].charge", required=False, default=0.0, ) def add_cut(self, definition, required=False): logger.debug("Adding cut %s", definition) self.cuts.append(Cut( name="CUT", val_expression=definition, is_required=required, )) def add_efficiency(self, definition, default=1.0): logger.debug("Adding efficiency %s", definition) self.efficiencies.append(Efficiency( name="EFFICIENCY", val_expression=definition, val_default=default, )) def reset_observables(self): logger.debug("Resetting observables") self.observables = OrderedDict() def reset_cuts(self): logger.debug("Resetting cuts") self.cuts = []
MIT License
mattvonrocketstein/smash
smashlib/ipy3x/qt/console/ipython_widget.py
IPythonWidget._make_out_prompt
python
def _make_out_prompt(self, number): try: body = self.out_prompt % number except TypeError: from xml.sax.saxutils import escape body = escape(self.out_prompt) return '<span class="out-prompt">%s</span>' % body
Given a prompt number, returns an HTML Out prompt.
https://github.com/mattvonrocketstein/smash/blob/98acdc27ab72ca80d9a7f63a54c0d52f126a8009/smashlib/ipy3x/qt/console/ipython_widget.py#L537-L546
from collections import namedtuple import os.path import re from subprocess import Popen import sys import time from textwrap import dedent from IPython.external.qt import QtCore, QtGui from IPython.core.inputsplitter import IPythonInputSplitter from IPython.core.release import version from IPython.core.inputtransformer import ipy_prompt from IPython.utils.traitlets import Bool, Unicode from .frontend_widget import FrontendWidget from . import styles default_in_prompt = 'In [<span class="in-prompt-number">%i</span>]: ' default_out_prompt = 'Out[<span class="out-prompt-number">%i</span>]: ' default_input_sep = '\n' default_output_sep = '' default_output_sep2 = '' zmq_shell_source = 'IPython.kernel.zmq.zmqshell.ZMQInteractiveShell' if sys.platform.startswith('win'): default_editor = 'notepad' else: default_editor = '' class IPythonWidget(FrontendWidget): custom_edit = Bool(False) custom_edit_requested = QtCore.Signal(object, object) editor = Unicode(default_editor, config=True, help=""" A command for invoking a system text editor. If the string contains a {filename} format specifier, it will be used. Otherwise, the filename will be appended to the end the command. """) editor_line = Unicode(config=True, help=""" The editor command to use when a specific line number is requested. The string should contain two format specifiers: {line} and {filename}. If this parameter is not specified, the line number option to the %edit magic will be ignored. """) style_sheet = Unicode(config=True, help=""" A CSS stylesheet. The stylesheet can contain classes for: 1. Qt: QPlainTextEdit, QFrame, QWidget, etc 2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter) 3. IPython: .error, .in-prompt, .out-prompt, etc """) syntax_style = Unicode(config=True, help=""" If not empty, use this Pygments style for syntax highlighting. Otherwise, the style sheet is queried for Pygments style information. """) in_prompt = Unicode(default_in_prompt, config=True) out_prompt = Unicode(default_out_prompt, config=True) input_sep = Unicode(default_input_sep, config=True) output_sep = Unicode(default_output_sep, config=True) output_sep2 = Unicode(default_output_sep2, config=True) _input_splitter_class = IPythonInputSplitter _prompt_transformer = IPythonInputSplitter(physical_line_transforms=[ipy_prompt()], logical_line_transforms=[], python_line_transforms=[], ) _PromptBlock = namedtuple('_PromptBlock', ['block', 'length', 'number']) _payload_source_edit = 'edit' _payload_source_exit = 'ask_exit' _payload_source_next_input = 'set_next_input' _payload_source_page = 'page' _retrying_history_request = False _starting = False def __init__(self, *args, **kw): super(IPythonWidget, self).__init__(*args, **kw) self._payload_handlers = { self._payload_source_edit: self._handle_payload_edit, self._payload_source_exit: self._handle_payload_exit, self._payload_source_page: self._handle_payload_page, self._payload_source_next_input: self._handle_payload_next_input} self._previous_prompt_obj = None self._keep_kernel_on_exit = None if self.style_sheet: self._style_sheet_changed() self._syntax_style_changed() else: self.set_default_style() self._guiref_loaded = False def _handle_complete_reply(self, rep): self.log.debug("complete: %s", rep.get('content', '')) cursor = self._get_cursor() info = self._request_info.get('complete') if info and info.id == rep['parent_header']['msg_id'] and info.pos == cursor.position(): content = rep['content'] matches = content['matches'] start = content['cursor_start'] end = content['cursor_end'] start = max(start, 0) end = max(end, start) cursor_pos = self._get_input_buffer_cursor_pos() if end < cursor_pos: cursor.movePosition(QtGui.QTextCursor.Left, n=(cursor_pos - end)) elif end > cursor_pos: cursor.movePosition(QtGui.QTextCursor.Right, n=(end - cursor_pos)) self._control.setTextCursor(cursor) offset = end - start cursor.movePosition(QtGui.QTextCursor.Left, n=offset) self._complete_with_items(cursor, matches) def _handle_execute_reply(self, msg): msg_id = msg['parent_header'].get('msg_id') info = self._request_info['execute'].get(msg_id) if info and info.kind == 'prompt': content = msg['content'] if content['status'] == 'aborted': self._show_interpreter_prompt() else: number = content['execution_count'] + 1 self._show_interpreter_prompt(number) self._request_info['execute'].pop(msg_id) else: super(IPythonWidget, self)._handle_execute_reply(msg) def _handle_history_reply(self, msg): content = msg['content'] if 'history' not in content: self.log.error("History request failed: %r" % content) if content.get('status', '') == 'aborted' and not self._retrying_history_request: self.log.error("Retrying aborted history request") self._retrying_history_request = True time.sleep(0.25) self.kernel_client.shell_channel.history( hist_access_type='tail', n=1000) else: self._retrying_history_request = False return self._retrying_history_request = False history_items = content['history'] self.log.debug( "Received history reply with %i entries", len(history_items)) items = [] last_cell = u"" for _, _, cell in history_items: cell = cell.rstrip() if cell != last_cell: items.append(cell) last_cell = cell self._set_history(items) def _insert_other_input(self, cursor, content): cursor.beginEditBlock() start = cursor.position() n = content.get('execution_count', 0) cursor.insertText('\n') self._insert_html(cursor, self._make_in_prompt(n)) cursor.insertText(content['code']) self._highlighter.rehighlightBlock(cursor.block()) cursor.endEditBlock() def _handle_execute_input(self, msg): self.log.debug("execute_input: %s", msg.get('content', '')) if self.include_output(msg): self._append_custom( self._insert_other_input, msg['content'], before_prompt=True) def _handle_execute_result(self, msg): self.log.debug("execute_result: %s", msg.get('content', '')) if self.include_output(msg): self.flush_clearoutput() content = msg['content'] prompt_number = content.get('execution_count', 0) data = content['data'] if 'text/plain' in data: self._append_plain_text(self.output_sep, True) self._append_html(self._make_out_prompt(prompt_number), True) text = data['text/plain'] if "\n" in text and not self.output_sep.endswith("\n"): self._append_plain_text('\n', True) self._append_plain_text(text + self.output_sep2, True) def _handle_display_data(self, msg): self.log.debug("display: %s", msg.get('content', '')) if self.include_output(msg): self.flush_clearoutput() data = msg['content']['data'] metadata = msg['content']['metadata'] if 'text/plain' in data: text = data['text/plain'] self._append_plain_text(text, True) self._append_plain_text(u'\n', True) def _handle_kernel_info_reply(self, rep): content = rep['content'] if not self._guiref_loaded: if content.get('language') == 'python': self._load_guiref_magic() self._guiref_loaded = True self.kernel_banner = content.get('banner', '') if self._starting: self._starting = False super(IPythonWidget, self)._started_channels() def _started_channels(self): self._starting = True self.kernel_client.kernel_info() self.kernel_client.shell_channel.history(hist_access_type='tail', n=1000) def _load_guiref_magic(self): self.kernel_client.shell_channel.execute('\n'.join([ "try:", " _usage", "except:", " from IPython.core import usage as _usage", " get_ipython().register_magic_function(_usage.page_guiref, 'line', 'guiref')", " del _usage", ]), silent=True) def execute_file(self, path, hidden=False): if sys.platform == 'win32': path = os.path.normpath(path).replace('\\', '/') if ' ' in path or "'" in path or '"' in path: path = '"%s"' % path.replace('"', '\\"') self.execute('%%run %s' % path, hidden=hidden) def _process_execute_error(self, msg): content = msg['content'] traceback = '\n'.join(content['traceback']) + '\n' if False: traceback = traceback.replace(' ', '&nbsp;') traceback = traceback.replace('\n', '<br/>') ename = content['ename'] ename_styled = '<span class="error">%s</span>' % ename traceback = traceback.replace(ename, ename_styled) self._append_html(traceback) else: self._append_plain_text(traceback) def _process_execute_payload(self, item): handler = self._payload_handlers.get(item['source']) if handler is None: return False else: handler(item) return True def _show_interpreter_prompt(self, number=None): if number is None: msg_id = self.kernel_client.shell_channel.execute('', silent=True) info = self._ExecutionRequest(msg_id, 'prompt') self._request_info['execute'][msg_id] = info return self._prompt_sep = self.input_sep self._show_prompt(self._make_in_prompt(number), html=True) block = self._control.document().lastBlock() length = len(self._prompt) self._previous_prompt_obj = self._PromptBlock(block, length, number) self._set_continuation_prompt( self._make_continuation_prompt(self._prompt), html=True) def _show_interpreter_prompt_for_reply(self, msg): content = msg['content'] if content['status'] == 'aborted': if self._previous_prompt_obj: previous_prompt_number = self._previous_prompt_obj.number else: previous_prompt_number = 0 else: previous_prompt_number = content['execution_count'] if self._previous_prompt_obj and self._previous_prompt_obj.number != previous_prompt_number: block = self._previous_prompt_obj.block if block.isValid() and block.text(): cursor = QtGui.QTextCursor(block) cursor.movePosition(QtGui.QTextCursor.Right, QtGui.QTextCursor.KeepAnchor, self._previous_prompt_obj.length) prompt = self._make_in_prompt(previous_prompt_number) self._prompt = self._insert_html_fetching_plain_text( cursor, prompt) self._highlighter.rehighlightBlock(cursor.block()) self._previous_prompt_obj = None self._show_interpreter_prompt(previous_prompt_number + 1) def set_default_style(self, colors='lightbg'): colors = colors.lower() if colors == 'lightbg': self.style_sheet = styles.default_light_style_sheet self.syntax_style = styles.default_light_syntax_style elif colors == 'linux': self.style_sheet = styles.default_dark_style_sheet self.syntax_style = styles.default_dark_syntax_style elif colors == 'nocolor': self.style_sheet = styles.default_bw_style_sheet self.syntax_style = styles.default_bw_syntax_style else: raise KeyError("No such color scheme: %s" % colors) def _edit(self, filename, line=None): if self.custom_edit: self.custom_edit_requested.emit(filename, line) elif not self.editor: self._append_plain_text('No default editor available.\n' 'Specify a GUI text editor in the `IPythonWidget.editor` ' 'configurable to enable the %edit magic') else: try: filename = '"%s"' % filename if line and self.editor_line: command = self.editor_line.format(filename=filename, line=line) else: try: command = self.editor.format() except KeyError: command = self.editor.format(filename=filename) else: command += ' ' + filename except KeyError: self._append_plain_text('Invalid editor command.\n') else: try: Popen(command, shell=True) except OSError: msg = 'Opening editor with command "%s" failed.\n' self._append_plain_text(msg % command) def _make_in_prompt(self, number): try: body = self.in_prompt % number except TypeError: from xml.sax.saxutils import escape body = escape(self.in_prompt) return '<span class="in-prompt">%s</span>' % body def _make_continuation_prompt(self, prompt): end_chars = '...: ' space_count = len(prompt.lstrip('\n')) - len(end_chars) body = '&nbsp;' * space_count + end_chars return '<span class="in-prompt">%s</span>' % body
MIT License
memgraph/mage
python/mage/graph_coloring_module/operators/mutations/MIS_mutation.py
MISMutation.mutate
python
def mutate( self, graph: Graph, individual: Individual, parameters: Dict[str, Any] = None ) -> Tuple[Individual, List[int]]: maximal_independent_set = self._MIS(graph) if len(maximal_independent_set) > 0: color = individual[maximal_independent_set[0]] colors = [color for _ in range(len(maximal_independent_set))] mutated_individual = individual.replace_units( maximal_independent_set, colors ) return mutated_individual, maximal_independent_set return individual, []
A function that mutates the given individual and returns the new individual and nodes that were changed.
https://github.com/memgraph/mage/blob/69f0242aceb47fc383d0e56077f08b2b061273b5/python/mage/graph_coloring_module/operators/mutations/MIS_mutation.py#L16-L30
import random from mage.graph_coloring_module.operators.mutations.mutation import Mutation from typing import Dict, Any, Tuple, List from mage.graph_coloring_module.graph import Graph from mage.graph_coloring_module.components.individual import Individual class MISMutation(Mutation): def __str__(self): return "MISMutation"
Apache License 2.0
mwaskom/seaborn
seaborn/palettes.py
_parse_cubehelix_args
python
def _parse_cubehelix_args(argstr): if argstr.startswith("ch:"): argstr = argstr[3:] if argstr.endswith("_r"): reverse = True argstr = argstr[:-2] else: reverse = False if not argstr: return [], {"reverse": reverse} all_args = argstr.split(",") args = [float(a.strip(" ")) for a in all_args if "=" not in a] kwargs = [a.split("=") for a in all_args if "=" in a] kwargs = {k.strip(" "): float(v.strip(" ")) for k, v in kwargs} kwarg_map = dict( s="start", r="rot", g="gamma", h="hue", l="light", d="dark", ) kwargs = {kwarg_map.get(k, k): v for k, v in kwargs.items()} if reverse: kwargs["reverse"] = True return args, kwargs
Turn stringified cubehelix params into args/kwargs.
https://github.com/mwaskom/seaborn/blob/59e61256a704e709007685c9840595b53221e367/seaborn/palettes.py#L945-L977
import colorsys from itertools import cycle import numpy as np import matplotlib as mpl from .external import husl from .utils import desaturate, get_color_cycle from .colors import xkcd_rgb, crayons __all__ = ["color_palette", "hls_palette", "husl_palette", "mpl_palette", "dark_palette", "light_palette", "diverging_palette", "blend_palette", "xkcd_palette", "crayon_palette", "cubehelix_palette", "set_color_codes"] SEABORN_PALETTES = dict( deep=["#4C72B0", "#DD8452", "#55A868", "#C44E52", "#8172B3", "#937860", "#DA8BC3", "#8C8C8C", "#CCB974", "#64B5CD"], deep6=["#4C72B0", "#55A868", "#C44E52", "#8172B3", "#CCB974", "#64B5CD"], muted=["#4878D0", "#EE854A", "#6ACC64", "#D65F5F", "#956CB4", "#8C613C", "#DC7EC0", "#797979", "#D5BB67", "#82C6E2"], muted6=["#4878D0", "#6ACC64", "#D65F5F", "#956CB4", "#D5BB67", "#82C6E2"], pastel=["#A1C9F4", "#FFB482", "#8DE5A1", "#FF9F9B", "#D0BBFF", "#DEBB9B", "#FAB0E4", "#CFCFCF", "#FFFEA3", "#B9F2F0"], pastel6=["#A1C9F4", "#8DE5A1", "#FF9F9B", "#D0BBFF", "#FFFEA3", "#B9F2F0"], bright=["#023EFF", "#FF7C00", "#1AC938", "#E8000B", "#8B2BE2", "#9F4800", "#F14CC1", "#A3A3A3", "#FFC400", "#00D7FF"], bright6=["#023EFF", "#1AC938", "#E8000B", "#8B2BE2", "#FFC400", "#00D7FF"], dark=["#001C7F", "#B1400D", "#12711C", "#8C0800", "#591E71", "#592F0D", "#A23582", "#3C3C3C", "#B8850A", "#006374"], dark6=["#001C7F", "#12711C", "#8C0800", "#591E71", "#B8850A", "#006374"], colorblind=["#0173B2", "#DE8F05", "#029E73", "#D55E00", "#CC78BC", "#CA9161", "#FBAFE4", "#949494", "#ECE133", "#56B4E9"], colorblind6=["#0173B2", "#029E73", "#D55E00", "#CC78BC", "#ECE133", "#56B4E9"] ) MPL_QUAL_PALS = { "tab10": 10, "tab20": 20, "tab20b": 20, "tab20c": 20, "Set1": 9, "Set2": 8, "Set3": 12, "Accent": 8, "Paired": 12, "Pastel1": 9, "Pastel2": 8, "Dark2": 8, } QUAL_PALETTE_SIZES = MPL_QUAL_PALS.copy() QUAL_PALETTE_SIZES.update({k: len(v) for k, v in SEABORN_PALETTES.items()}) QUAL_PALETTES = list(QUAL_PALETTE_SIZES.keys()) class _ColorPalette(list): def __enter__(self): from .rcmod import set_palette self._orig_palette = color_palette() set_palette(self) return self def __exit__(self, *args): from .rcmod import set_palette set_palette(self._orig_palette) def as_hex(self): hex = [mpl.colors.rgb2hex(rgb) for rgb in self] return _ColorPalette(hex) def _repr_html_(self): s = 55 n = len(self) html = f'<svg width="{n * s}" height="{s}">' for i, c in enumerate(self.as_hex()): html += ( f'<rect x="{i * s}" y="0" width="{s}" height="{s}" style="fill:{c};' 'stroke-width:2;stroke:rgb(255,255,255)"/>' ) html += '</svg>' return html def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False): if palette is None: palette = get_color_cycle() if n_colors is None: n_colors = len(palette) elif not isinstance(palette, str): palette = palette if n_colors is None: n_colors = len(palette) else: if n_colors is None: n_colors = QUAL_PALETTE_SIZES.get(palette, 6) if palette in SEABORN_PALETTES: palette = SEABORN_PALETTES[palette] elif palette == "hls": palette = hls_palette(n_colors, as_cmap=as_cmap) elif palette == "husl": palette = husl_palette(n_colors, as_cmap=as_cmap) elif palette.lower() == "jet": raise ValueError("No.") elif palette.startswith("ch:"): args, kwargs = _parse_cubehelix_args(palette) palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap) elif palette.startswith("light:"): _, color = palette.split(":") reverse = color.endswith("_r") if reverse: color = color[:-2] palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap) elif palette.startswith("dark:"): _, color = palette.split(":") reverse = color.endswith("_r") if reverse: color = color[:-2] palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap) elif palette.startswith("blend:"): _, colors = palette.split(":") colors = colors.split(",") palette = blend_palette(colors, n_colors, as_cmap=as_cmap) else: try: palette = mpl_palette(palette, n_colors, as_cmap=as_cmap) except ValueError: raise ValueError("%s is not a valid palette name" % palette) if desat is not None: palette = [desaturate(c, desat) for c in palette] if not as_cmap: pal_cycle = cycle(palette) palette = [next(pal_cycle) for _ in range(n_colors)] try: palette = map(mpl.colors.colorConverter.to_rgb, palette) palette = _ColorPalette(palette) except ValueError: raise ValueError(f"Could not generate a palette for {palette}") return palette def hls_palette(n_colors=6, h=.01, l=.6, s=.65, as_cmap=False): if as_cmap: n_colors = 256 hues = np.linspace(0, 1, int(n_colors) + 1)[:-1] hues += h hues %= 1 hues -= hues.astype(int) palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues] if as_cmap: return mpl.colors.ListedColormap(palette, "hls") else: return _ColorPalette(palette) def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False): if as_cmap: n_colors = 256 hues = np.linspace(0, 1, int(n_colors) + 1)[:-1] hues += h hues %= 1 hues *= 359 s *= 99 l *= 99 palette = [_color_to_rgb((h_i, s, l), input="husl") for h_i in hues] if as_cmap: return mpl.colors.ListedColormap(palette, "hsl") else: return _ColorPalette(palette) def mpl_palette(name, n_colors=6, as_cmap=False): if name.endswith("_d"): sub_name = name[:-2] if sub_name.endswith("_r"): reverse = True sub_name = sub_name[:-2] else: reverse = False pal = color_palette(sub_name, 2) + ["#333333"] if reverse: pal = pal[::-1] cmap = blend_palette(pal, n_colors, as_cmap=True) else: cmap = mpl.cm.get_cmap(name) if name in MPL_QUAL_PALS: bins = np.linspace(0, 1, MPL_QUAL_PALS[name])[:n_colors] else: bins = np.linspace(0, 1, int(n_colors) + 2)[1:-1] palette = list(map(tuple, cmap(bins)[:, :3])) if as_cmap: return cmap else: return _ColorPalette(palette) def _color_to_rgb(color, input): if input == "hls": color = colorsys.hls_to_rgb(*color) elif input == "husl": color = husl.husl_to_rgb(*color) color = tuple(np.clip(color, 0, 1)) elif input == "xkcd": color = xkcd_rgb[color] return mpl.colors.to_rgb(color) def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"): rgb = _color_to_rgb(color, input) h, s, l = husl.rgb_to_husl(*rgb) gray_s, gray_l = .15 * s, 15 gray = _color_to_rgb((h, gray_s, gray_l), input="husl") colors = [rgb, gray] if reverse else [gray, rgb] return blend_palette(colors, n_colors, as_cmap) def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"): rgb = _color_to_rgb(color, input) h, s, l = husl.rgb_to_husl(*rgb) gray_s, gray_l = .15 * s, 95 gray = _color_to_rgb((h, gray_s, gray_l), input="husl") colors = [rgb, gray] if reverse else [gray, rgb] return blend_palette(colors, n_colors, as_cmap) def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, center="light", as_cmap=False): palfunc = dict(dark=dark_palette, light=light_palette)[center] n_half = int(128 - (sep // 2)) neg = palfunc((h_neg, s, l), n_half, reverse=True, input="husl") pos = palfunc((h_pos, s, l), n_half, input="husl") midpoint = dict(light=[(.95, .95, .95)], dark=[(.133, .133, .133)])[center] mid = midpoint * sep pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap) return pal def blend_palette(colors, n_colors=6, as_cmap=False, input="rgb"): colors = [_color_to_rgb(color, input) for color in colors] name = "blend" pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors) if not as_cmap: rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] pal = _ColorPalette(map(tuple, rgb_array)) return pal def xkcd_palette(colors): palette = [xkcd_rgb[name] for name in colors] return color_palette(palette, len(palette)) def crayon_palette(colors): palette = [crayons[name] for name in colors] return color_palette(palette, len(palette)) def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8, light=.85, dark=.15, reverse=False, as_cmap=False): def get_color_function(p0, p1): def color(x): xg = x ** gamma a = hue * xg * (1 - xg) / 2 phi = 2 * np.pi * (start / 3 + rot * x) return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi)) return color cdict = { "red": get_color_function(-0.14861, 1.78277), "green": get_color_function(-0.29227, -0.90649), "blue": get_color_function(1.97294, 0.0), } cmap = mpl.colors.LinearSegmentedColormap("cubehelix", cdict) x = np.linspace(light, dark, int(n_colors)) pal = cmap(x)[:, :3].tolist() if reverse: pal = pal[::-1] if as_cmap: x_256 = np.linspace(light, dark, 256) if reverse: x_256 = x_256[::-1] pal_256 = cmap(x_256) cmap = mpl.colors.ListedColormap(pal_256, "seaborn_cubehelix") return cmap else: return _ColorPalette(pal)
BSD 3-Clause New or Revised License
ooici/pyon
pyon/util/file_sys.py
FileSystem.secure_file
python
def secure_file(): f = FileSystem.mktemp() FileSystem.unlink(f.name) return f
A method for secure file I/O, the file is immediately unlinked after creation
https://github.com/ooici/pyon/blob/122c629290d27f32f2f41dafd5c12469295e8acf/pyon/util/file_sys.py#L263-L269
import errno import StringIO import tempfile import shutil import os import re import random import string from pyon.util.log import log from pyon.util.containers import DotDict from pyon.core.bootstrap import CFG as bootcfg, get_sys_name class FileSystemError(Exception): status_code = 411 def get_status_code(self): return self.status_code def get_error_message(self): return self.message def __str__(self): return str(self.get_status_code()) + " - " + str(self.get_error_message()) class FileSystem(object): FS_DIRECTORY_LIST = ['RESOURCE','TEMP','LIBRARY','CACHE','RUN','USERS','LOG','FILESTORE'] FS_DIRECTORY = DotDict(zip(FS_DIRECTORY_LIST,FS_DIRECTORY_LIST)) FS = DotDict(zip(FS_DIRECTORY_LIST, FS_DIRECTORY_LIST)) root = '' _instance = None def __new__(cls, *args, **kwargs): if not cls._instance: cls._instance = super(FileSystem, cls).__new__(cls, *args, **kwargs) return cls._instance @classmethod def _clean(cls, config): if not cls.root: cls.root = os.path.join(config.get_safe('container.filesystem.root', '/tmp/ion'), get_sys_name()) log.info('Removing %s', cls.root) if os.path.exists(cls.root): shutil.rmtree(cls.root) def __init__(self, CFG): if not FileSystem.root: FileSystem.root = os.path.join(CFG.get_safe('container.filesystem.root', '/tmp/ion'), get_sys_name()) for k,v in FileSystem.FS_DIRECTORY.iteritems(): s = v.lower() conf = CFG.get_safe('container.filesystem.%s' % s, None) if conf: FileSystem.FS_DIRECTORY[k] = conf else: FileSystem.FS_DIRECTORY[k] = os.path.join(FileSystem.root, s) if not FileSystem._sandbox(FileSystem.FS_DIRECTORY[k]): raise OSError('You are attempting to perform an operation beyond the scope of your permission. (%s is set to \'%s\')' % (k,FileSystem.FS_DIRECTORY[k])) if not os.path.exists(FS_DIRECTORY[k]): log.debug('Making path: %s', FS_DIRECTORY[k]) self.__makedirs(FS_DIRECTORY[k]) @classmethod def __makedirs(cls,path): try: os.makedirs(path) except OSError as ose: if ose.errno != errno.EEXIST: raise @staticmethod def get(path): if path.startswith('/'): path = path[1:] tree = path.split('/') if tree[0].upper() not in FS: return None root = FileSystem.FS_DIRECTORY[tree.pop(0).upper()] fullpath = '/'.join([root] + tree) return fullpath @staticmethod def is_safe(path): for root in FileSystem.FS_DIRECTORY.itervalues(): if path.startswith(root): return True return False @staticmethod def _sandbox(path): black_list = [ '/', '/bin', '/sbin', '/usr/bin', '/usr/sbin' '/usr/local/sbin', '/etc', '/usr/etc', '/home', '/var', '/lib', '/usr/lib', '/lost+found', '/boot', '/dev', '/media', '/proc', '/sys', '/root', '/selinux', '/srv', '/mnt' '/Application', '/Developer', '/Library', '/Network', '/System', '/Users', '/Volumes', '/include' '/private', '/cores' ] if path in black_list: return False return True @staticmethod def _parse_filename(file): ret = re.sub(r'\s', '_', file) ret = re.sub(r'[~!@#$%^&*()-+,/\'\";:`<>?\\\]\[\}\{=]+', '', ret) return ret[:64] @classmethod def get_url(cls,fs, filename, ext=''): path = os.path.join(FS_DIRECTORY[fs], '%s%s' % (FileSystem._parse_filename(filename), ext)) cls.__makedirs(path) return path @classmethod def get_hierarchical_url(cls,fs, filename, ext=''): clean_name = FileSystem._parse_filename(filename) if len(clean_name) < 6: return os.path.join(FS_DIRECTORY[fs], '%s%s' % (clean_name, ext)) else: path = os.path.join(FS_DIRECTORY[fs], "%s/%s" % (clean_name[0:2], clean_name[2:4])) cls.__makedirs(path) return os.path.join(path, '%s%s' % (clean_name[4:], ext)) @classmethod def get_extended_url(cls,path): if ':' in path: s = path.split(':') base = FileSystem.FS_DIRECTORY[s[0]] path = os.path.join(base, s[1]) cls.__makedirs(path) return path @staticmethod def mktemp(filename='', ext=''): if filename: return open(FileSystem.get_url(fs=FS.TEMP,filename=filename,ext=ext),'w+b') else: rand_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(24)) return open(FileSystem.get_url(fs=FS.TEMP,filename=rand_str), 'w+b') @staticmethod def mkstemp(): return tempfile.TemporaryFile(mode='w+b', dir=FS_DIRECTORY[FS.TEMP]) @staticmethod def unlink(filepath): if not FileSystem.is_safe(filepath): raise FileSystemError('It is not safe to remove %s, it is outside the scope of your permission.' % filepath) if not os.path.exists(filepath): raise FileSystemError('%s does not exist.' % filepath) try: os.unlink(filepath) except OSError as e: raise OSError('%s: %s' % (filepath, e.message)) @staticmethod def memory_file(): return StringIO.StringIO() @staticmethod
BSD 2-Clause Simplified License
dariah-de/topicsexplorer
topicsexplorer/views.py
overview_topics
python
def overview_topics(): logging.debug("Calling topics overview page endpoint...") logging.info("Get document-topic distributions...") response = get_document_topic_distributions() document_topic = pd.read_json(response, orient="index") logging.info("Get token frequencies...") response = get_token_frequencies() token_freqs = json.loads(response) logging.info("Add frequencies to weights...") document_topic = document_topic.multiply(token_freqs, axis=0) logging.info("Sum the weights...") dominance = document_topic.sum(axis=0) logging.info("Scale weights...") proportions = utils.scale(dominance) proportions = pd.Series(proportions, index=dominance.index) proportions = proportions.sort_values(ascending=False) proportions = list(utils.series2array(proportions)) corpus_size = get_corpus_size() number_topics = get_number_of_topics() logging.debug("Rendering topics overview template...") return flask.render_template( "overview-topics.html", current="topics", help=True, reset=True, topics=True, documents=True, document_topic_distributions=True, parameters=True, export_data=True, proportions=proportions, corpus_size=corpus_size, number_topics=number_topics, )
Topics overview page.
https://github.com/dariah-de/topicsexplorer/blob/d0fd35dc62e9751a956456392b2d10f609156167/topicsexplorer/views.py#L65-L106
import datetime import json import logging import multiprocessing from pathlib import Path import time import flask import pandas as pd import werkzeug from topicsexplorer import database from topicsexplorer import utils from topicsexplorer import workflow utils.init_logging(logging.INFO) web = utils.init_app("topicsexplorer") @web.route("/") def index(): logging.debug("Rendering home page template...") utils.init_db(web) return flask.render_template("index.html", help=True) @web.route("/help") def help(): logging.debug("Rendering help page template...") return flask.render_template("help.html", go_back=True) @web.route("/error") def error(): with utils.LOGFILE.open("r", encoding="utf-8") as logfile: log = logfile.read().split("\n")[-20:] return flask.render_template( "error.html", reset=True, log="\n".join(log), tempdir=utils.TEMPDIR ) @web.route("/modeling", methods=["POST"]) def modeling(): logging.debug("Calling modeling page endpoint...") global start global process start = time.time() logging.info("Initializing topic modeling process...") logging.info("Started topic modeling process.") workflow.wrapper() logging.debug("Rendering modeling page template...") return flask.render_template("modeling.html", abort=True) @web.route("/overview-topics")
Apache License 2.0
hyperledger/sawtooth-core
integration/sawtooth_integration/tests/test_network_permissioning.py
wait_for_consensus
python
def wait_for_consensus(clients, tolerance=1, amount=4, timeout=100): initial_time = time.time() message = "Timed out waiting for consensus" block_height = len(clients[0].block_list()) while block_height <= amount: block_height = len(clients[0].block_list()) assert timeout > time.time() - initial_time, message for c in clients: c.send() not_in_consensus = deque(clients[1:]) initial_time = time.time() while not_in_consensus: client = not_in_consensus.popleft() if not within_tolerance(client.block_list(), clients[0].block_list(), tolerance=tolerance): not_in_consensus.append(client) assert timeout > time.time() - initial_time, message for c in clients: c.send() LOGGER.warning("All validators in consensus")
Loop until validators are within tolerance blocks of consensus. This happens in two phases: First there must be amount blocks in the chain, then each other validator can have at most tolerance different blocks from the genesis validator. Args: clients (list of Client): The clients of each REST Api. tolerance (int): The number of blocks that can differ between validators. amount (int): The number of blocks needed before a consensus check is valid. timeout (timeout): The seconds seconds that each phase of the wait can take.
https://github.com/hyperledger/sawtooth-core/blob/2ab9b8fb8383887dc33c67a72e194c30f7b3f7dc/integration/sawtooth_integration/tests/test_network_permissioning.py#L400-L435
from collections import deque import hashlib import logging import os import subprocess from tempfile import mkdtemp import time import unittest from uuid import uuid4 import cbor import toml from sawtooth_signing import create_context from sawtooth_signing import CryptoFactory from sawtooth_processor_test.message_factory import MessageFactory from sawtooth_integration.tests.integration_tools import SetSawtoothHome from sawtooth_integration.tests import node_controller as NodeController from sawtooth_integration.tests.integration_tools import RestClient LOGGER = logging.getLogger(__name__) class TestNetworkPermissioning(unittest.TestCase): def setUp(self): self.clients = [] self.sawtooth_home = {} self.processes = [] def tearDown(self): NodeController.stop_node(self.processes) def test_network_trust_permissioning(self): walter = Admin("http://127.0.0.1:{}".format(8008 + 0)) sawtooth_home0 = mkdtemp() self.sawtooth_home[0] = sawtooth_home0 sawtooth_home1 = mkdtemp() self.sawtooth_home[1] = sawtooth_home1 with SetSawtoothHome(sawtooth_home0): write_validator_config( sawtooth_home0, roles={"network": "trust"}, endpoint="tcp://127.0.0.1:{}".format(8800 + 0), bind=[ "network:tcp://127.0.0.1:{}".format(8800 + 0), "component:tcp://127.0.0.1:{}".format(4004 + 0), "consensus:tcp://127.0.0.1:{}".format(5050 + 0) ], seeds=["tcp://127.0.0.1:{}".format(8800 + 1)], peering="dynamic", scheduler='parallel') validator_non_genesis_init(sawtooth_home1) validator_genesis_init( sawtooth_home0, sawtooth_home1, identity_pub_key=walter.pub_key, role="network") self.processes.extend(start_validator(0, sawtooth_home0)) self.clients.append(Client(NodeController.http_address(0))) with SetSawtoothHome(sawtooth_home1): write_validator_config( sawtooth_home1, roles={"network": "trust"}, endpoint="tcp://127.0.0.1:{}".format(8800 + 1), bind=[ "network:tcp://127.0.0.1:{}".format(8800 + 1), "component:tcp://127.0.0.1:{}".format(4004 + 1), "consensus:tcp://127.0.0.1:{}".format(5050 + 1) ], peering="dynamic", seeds=["tcp://127.0.0.1:{}".format(8800 + 0)], scheduler='parallel') self.processes.extend(start_validator(1, sawtooth_home1)) self.clients.append(Client(NodeController.http_address(1))) with open( os.path.join(self.sawtooth_home[1], 'keys', 'validator.pub'), 'r') as infile: non_genesis_key = infile.read().strip('\n') with open( os.path.join(self.sawtooth_home[0], 'keys', 'validator.pub'), 'r') as infile: genesis_key = infile.read().strip('\n') wait_for_consensus(self.clients, amount=2) walter.set_public_key_for_role( "non_genesis_out_of_network", "network", permit_keys=[genesis_key], deny_keys=[non_genesis_key]) wait_for_out_of_consensus(self.clients, tolerance=2) show_blocks(self.clients[0].block_list()) show_blocks(self.clients[1].block_list()) walter.set_public_key_for_role( "allow_all", "network", permit_keys=[genesis_key, non_genesis_key], deny_keys=[]) wait_for_consensus(self.clients) show_blocks(self.clients[0].block_list()) show_blocks(self.clients[1].block_list()) walter.set_public_key_for_role( "non_genesis_out_of_network", "network.consensus", permit_keys=[genesis_key], deny_keys=[non_genesis_key]) wait_for_out_of_consensus(self.clients, tolerance=2) show_blocks(self.clients[0].block_list()) show_blocks(self.clients[1].block_list()) walter.set_public_key_for_role( "allow_all_for_consensus", "network.consensus", permit_keys=[genesis_key, non_genesis_key], deny_keys=[]) wait_for_consensus(self.clients) show_blocks(self.clients[0].block_list()) show_blocks(self.clients[1].block_list()) def test_network_challenge_permissioning(self): walter = Admin("http://127.0.0.1:{}".format(8008 + 2)) processes = [] sawtooth_home0 = mkdtemp() self.sawtooth_home[0] = sawtooth_home0 sawtooth_home1 = mkdtemp() self.sawtooth_home[1] = sawtooth_home1 with SetSawtoothHome(sawtooth_home0): write_validator_config( sawtooth_home0, roles={"network": "challenge"}, endpoint="tcp://127.0.0.1:{}".format(8800 + 2), bind=[ "network:tcp://127.0.0.1:{}".format(8800 + 2), "component:tcp://127.0.0.1:{}".format(4004 + 2), "consensus:tcp://127.0.0.1:{}".format(5050 + 2) ], seeds=["tcp://127.0.0.1:{}".format(8800 + 3)], peering="dynamic", scheduler='parallel') validator_non_genesis_init(sawtooth_home1) validator_genesis_init( sawtooth_home0, sawtooth_home1, identity_pub_key=walter.pub_key, role="network") processes.extend(start_validator(2, sawtooth_home0)) self.clients.append(Client(NodeController.http_address(2))) with SetSawtoothHome(sawtooth_home1): write_validator_config( sawtooth_home1, roles={"network": "challenge"}, endpoint="tcp://127.0.0.1:{}".format(8800 + 3), bind=[ "network:tcp://127.0.0.1:{}".format(8800 + 3), "component:tcp://127.0.0.1:{}".format(4004 + 3), "consensus:tcp://127.0.0.1:{}".format(5050 + 3) ], peering="dynamic", seeds=["tcp://127.0.0.1:{}".format(8800 + 2)], scheduler='parallel') processes.extend(start_validator(3, sawtooth_home1)) self.clients.append(Client(NodeController.http_address(3))) with open(os.path.join(self.sawtooth_home[1], 'keys', 'validator.pub'), 'r') as infile: non_genesis_key = infile.read().strip('\n') with open(os.path.join(self.sawtooth_home[0], 'keys', 'validator.pub'), 'r') as infile: genesis_key = infile.read().strip('\n') wait_for_consensus(self.clients, amount=2) walter.set_public_key_for_role( "non_genesis_out_of_network", "network", permit_keys=[genesis_key], deny_keys=[non_genesis_key]) wait_for_out_of_consensus(self.clients, tolerance=2) show_blocks(self.clients[0].block_list()) show_blocks(self.clients[1].block_list()) walter.set_public_key_for_role( "allow_all_for_consensus", "network", permit_keys=[genesis_key, non_genesis_key], deny_keys=[]) wait_for_consensus(self.clients) show_blocks(self.clients[0].block_list()) show_blocks(self.clients[1].block_list()) walter.set_public_key_for_role( "non_genesis_out_of_network", "network.consensus", permit_keys=[genesis_key], deny_keys=[non_genesis_key]) wait_for_out_of_consensus(self.clients, tolerance=2) show_blocks(self.clients[0].block_list()) show_blocks(self.clients[1].block_list()) walter.set_public_key_for_role( "allow_all_for_consensus", "network.consensus", permit_keys=[genesis_key, non_genesis_key], deny_keys=[]) wait_for_consensus(self.clients) show_blocks(self.clients[0].block_list()) show_blocks(self.clients[1].block_list()) def write_validator_config(sawtooth_home, **kwargs): with open(os.path.join(sawtooth_home, 'etc', 'validator.toml'), mode='w') as out: toml.dump(kwargs, out) def start_validator(num, sawtooth_home): return NodeController.start_node( num, NodeController.intkey_config_identity, NodeController.everyone_peers_with_everyone, NodeController.even_parallel_odd_serial, sawtooth_home, NodeController.simple_validator_cmds) def show_blocks(block_list): blocks = [("Block Num", "Block ID", "Signer Key")] + block_list output = "\n" + "\n".join([ "{:^5} {:^21} {:^21}".format(item[0], item[1][:10], item[2][:10]) for item in blocks ]) LOGGER.warning(output) class Client: def __init__(self, rest_endpoint): context = create_context('secp256k1') private_key = context.new_random_private_key() self.priv_key = private_key.as_hex() self.pub_key = context.get_public_key(private_key).as_hex() self.signer = CryptoFactory(context).new_signer(private_key) self._namespace = hashlib.sha512('intkey'.encode()).hexdigest()[:6] self._factory = MessageFactory( 'intkey', '1.0', self._namespace, signer=self.signer) self._rest = RestClient(rest_endpoint) def send(self): name = uuid4().hex[:20] txns = [ self._factory.create_transaction( cbor.dumps({ 'Name': name, 'Verb': 'set', 'Value': 1000 }), inputs=[ self._namespace + self._factory.sha512(name.encode())[-64:] ], outputs=[ self._namespace + self._factory.sha512(name.encode())[-64:] ], deps=[]) ] self._rest.send_batches(self._factory.create_batch(txns)) def block_list(self): return [(item['header']['block_num'], item['header_signature'], item['header']['signer_public_key']) for item in self._rest.block_list()['data']] class Admin: def __init__(self, rest_endpoint): context = create_context('secp256k1') private_key = context.new_random_private_key() self.priv_key = private_key.as_hex() self.pub_key = context.get_public_key(private_key).as_hex() self._priv_key_file = os.path.join("/tmp", uuid4().hex[:20]) with open(self._priv_key_file, mode='w') as out: out.write(self.priv_key) self._rest_endpoint = rest_endpoint def set_public_key_for_role(self, policy, role, permit_keys, deny_keys): permits = ["PERMIT_KEY {}".format(key) for key in permit_keys] denies = ["DENY_KEY {}".format(key) for key in deny_keys] self._run_identity_commands(policy, role, denies + permits) def _run_identity_commands(self, policy, role, rules): subprocess.run( ['sawtooth', 'identity', 'policy', 'create', '-k', self._priv_key_file, '--wait', '20', '--url', self._rest_endpoint, policy, *rules], check=True) subprocess.run( ['sawtooth', 'identity', 'role', 'create', '-k', self._priv_key_file, '--wait', '45', '--url', self._rest_endpoint, role, policy], check=True)
Apache License 2.0
onshape-public/onshape-clients
python/onshape_client/oas/models/btm_sketch_curve_segment155_all_of.py
BTMSketchCurveSegment155AllOf.__init__
python
def __init__( self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs ): self._data_store = {} self._check_type = _check_type self._from_server = _from_server self._path_to_item = _path_to_item self._configuration = _configuration for var_name, var_value in six.iteritems(kwargs): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): continue setattr(self, var_name, var_value)
btm_sketch_curve_segment155_all_of.BTMSketchCurveSegment155AllOf - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _from_server (bool): True if the data is from the server False if the data is from the client (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. bt_type (str): [optional] # noqa: E501 end_param (float): [optional] # noqa: E501 end_point_id (str): [optional] # noqa: E501 start_param (float): [optional] # noqa: E501 start_point_id (str): [optional] # noqa: E501
https://github.com/onshape-public/onshape-clients/blob/20843a00c628e516e7219e17a23ec4ef2bf9f16f/python/onshape_client/oas/models/btm_sketch_curve_segment155_all_of.py#L109-L155
from __future__ import absolute_import import re import sys import six import nulltype from onshape_client.oas.model_utils import ( ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) class BTMSketchCurveSegment155AllOf(ModelNormal): allowed_values = {} validations = {} additional_properties_type = None @staticmethod def openapi_types(): return { "bt_type": (str,), "end_param": (float,), "end_point_id": (str,), "start_param": (float,), "start_point_id": (str,), } @staticmethod def discriminator(): return None attribute_map = { "bt_type": "btType", "end_param": "endParam", "end_point_id": "endPointId", "start_param": "startParam", "start_point_id": "startPointId", } @staticmethod def _composed_schemas(): return None required_properties = set( [ "_data_store", "_check_type", "_from_server", "_path_to_item", "_configuration", ] )
MIT License
dwavesystems/dimod
dimod/discrete/discrete_quadratic_model.py
DiscreteQuadraticModel.set_linear
python
def set_linear(self, v, biases): self._cydqm.set_linear(self.variables.index(v), np.asarray(biases))
Set the linear biases associated with `v`. Args: v: A variable in the discrete quadratic model. biases (array-like): The linear biases in an array.
https://github.com/dwavesystems/dimod/blob/af5a722f96250034a9099043927bf8ebc5294e40/dimod/discrete/discrete_quadratic_model.py#L675-L684
import collections.abc as abc import io import json import warnings from collections import defaultdict, namedtuple import numpy as np from numpy.core.shape_base import stack from dimod.discrete.cydiscrete_quadratic_model import cyDiscreteQuadraticModel from dimod.sampleset import as_samples from dimod.serialization.fileview import VariablesSection, _BytesIO, SpooledTemporaryFile from dimod.serialization.fileview import load, read_header, write_header from dimod.variables import Variables from typing import List, Tuple, Union, Generator, Iterator LinearTriplets = Union[List[Tuple], Generator[Tuple, None, None]] __all__ = ['DiscreteQuadraticModel', 'DQM', 'CaseLabelDQM'] DQM_MAGIC_PREFIX = b'DIMODDQM' DATA_MAGIC_PREFIX = b'BIAS' LegacyDQMVectors = namedtuple( 'LegacyDQMVectors', ['case_starts', 'linear_biases', 'quadratic', 'labels']) DQMVectors = namedtuple( 'DQMVectors', ['case_starts', 'linear_biases', 'quadratic', 'labels', 'offset']) QuadraticVectors = namedtuple( 'QuadraticVectors', ['row_indices', 'col_indices', 'biases']) class VariableNeighborhood(abc.Set): __slots__ = ('_dqm', '_vi') def __init__(self, dqm, v): self._dqm = dqm self._vi = dqm.variables.index(v) def __contains__(self, u): return self._dqm.variables.index(u) in self._dqm._cydqm.adj[self._vi] def __iter__(self): for ui in self._dqm._cydqm.adj[self._vi]: yield self._dqm.variables[ui] def __len__(self): return self._dqm._cydqm.degree(self._vi) def __repr__(self): return str(dict(self)) class VariableAdjacency(abc.Mapping): __slots__ = ('_dqm',) def __init__(self, dqm): self._dqm = dqm def __getitem__(self, v): return VariableNeighborhood(self._dqm, v) def __iter__(self): yield from self._dqm.variables def __len__(self): return len(self._dqm.variables) def __repr__(self): return str(dict(self)) class DiscreteQuadraticModel: def __init__(self): self.variables = Variables() self._cydqm = cyDiscreteQuadraticModel() variables = None @property def adj(self): try: return self._adj except AttributeError: pass self._adj = adj = VariableAdjacency(self) return adj @property def offset(self): return self._cydqm.offset @offset.setter def offset(self, offset: float): self._cydqm.offset = offset def add_linear_equality_constraint(self, terms: LinearTriplets, lagrange_multiplier: float, constant: float): index_terms = ((self.variables.index(v), c, x) for v, c, x in terms) self._cydqm.add_linear_equality_constraint( index_terms, lagrange_multiplier, constant) def add_linear_inequality_constraint(self, terms: LinearTriplets, lagrange_multiplier: float, label: str, constant: int = 0, lb: int = np.iinfo(np.int64).min, ub: int = 0, slack_method: str = "log2", cross_zero: bool = False) -> LinearTriplets: if slack_method not in ['log2', 'log10', 'linear']: raise ValueError( "expected slack_method to be 'log2', 'log10' or 'linear' " f"but got {slack_method!r}") if isinstance(terms, Iterator): terms = list(terms) if int(constant) != constant or int(lb) != lb or int(ub) != ub or any( int(bias) != bias for _, _, bias in terms): warnings.warn("For constraints with fractional coefficients, " "multiply both sides of the inequality by an " "appropriate factor of ten to attain or " "approximate integer coefficients. ") terms_upper_bound = sum(v for _, _, v in terms if v > 0) terms_lower_bound = sum(v for _, _, v in terms if v < 0) ub_c = min(terms_upper_bound, ub - constant) lb_c = max(terms_lower_bound, lb - constant) if terms_upper_bound <= ub_c and terms_lower_bound >= lb_c: warnings.warn( f'Did not add constraint {label}.' ' This constraint is feasible' ' with any value for state variables.') return [] if ub_c < lb_c: raise ValueError( f'The given constraint ({label}) is infeasible with any value' ' for state variables.') slack_upper_bound = int(ub_c - lb_c) if slack_upper_bound == 0: self.add_linear_equality_constraint(terms, lagrange_multiplier, -ub_c) return [] else: slack_terms = [] zero_constraint = False if cross_zero: if lb_c > 0 or ub_c < 0: zero_constraint = True if slack_method == "log2": num_slack = int(np.floor(np.log2(slack_upper_bound))) slack_coefficients = [2 ** j for j in range(num_slack)] if slack_upper_bound - 2 ** num_slack >= 0: slack_coefficients.append( slack_upper_bound - 2 ** num_slack + 1) for j, s in enumerate(slack_coefficients): sv = self.add_variable(2, f'slack_{label}_{j}') slack_terms.append((sv, 1, s)) if zero_constraint: sv = self.add_variable(2, f'slack_{label}_{num_slack + 1}') slack_terms.append((sv, 1, ub_c)) elif slack_method == "log10": num_dqm_vars = int(np.ceil(np.log10(slack_upper_bound+1))) for j in range(num_dqm_vars): slack_term = list(range(0, min(slack_upper_bound + 1, 10 ** (j + 1)), 10 ** j))[1:] if j < num_dqm_vars - 1 or not zero_constraint: sv = self.add_variable(len(slack_term) + 1, f'slack_{label}_{j}') else: sv = self.add_variable(len(slack_term) + 2, f'slack_{label}_{j}') for i, val in enumerate(slack_term): slack_terms.append((sv, i + 1, val)) if zero_constraint: slack_terms.append((sv, len(slack_term) + 1, ub_c)) elif slack_method == 'linear': slack_term = list(range(1, slack_upper_bound + 1)) if not zero_constraint: sv = self.add_variable(len(slack_term) + 1, f'slack_{label}') else: sv = self.add_variable(len(slack_term) + 2, f'slack_{label}') for i, val in enumerate(slack_term): slack_terms.append((sv, i + 1, val)) if zero_constraint: slack_terms.append((sv, len(slack_term) + 1, ub_c)) self.add_linear_equality_constraint(terms + slack_terms, lagrange_multiplier, -ub_c) return slack_terms def add_variable(self, num_cases, label=None): self.variables._append(label) variable_index = self._cydqm.add_variable(num_cases) assert variable_index + 1 == len(self.variables) return self.variables[-1] def copy(self): new = type(self)() new._cydqm = self._cydqm.copy() for v in self.variables: new.variables._append(v) return new def degree(self, v): return self._cydqm.degree(self.variables.index(v)) def energy(self, sample): energy, = self.energies(sample) return energy def energies(self, samples): samples, labels = as_samples(samples, dtype=self._cydqm.case_dtype) if len(labels) != self.num_variables(): raise ValueError( "Given sample(s) have incorrect number of variables") if self.variables != labels: label_to_idx = dict((v, i) for i, v in enumerate(labels)) try: order = [label_to_idx[v] for v in self.variables] except KeyError: raise ValueError("given samples-like does not match labels") samples = samples[:, order] return np.asarray(self._cydqm.energies(samples)) @classmethod def _from_file_numpy(cls, file_like): magic = file_like.read(len(DATA_MAGIC_PREFIX)) if magic != DATA_MAGIC_PREFIX: raise ValueError("unknown file type, expected magic string {} but " "got {}".format(DATA_MAGIC_PREFIX, magic)) length = np.frombuffer(file_like.read(4), '<u4')[0] start = file_like.tell() data = np.load(file_like) obj = cls.from_numpy_vectors(data['case_starts'], data['linear_biases'], (data['quadratic_row_indices'], data['quadratic_col_indices'], data['quadratic_biases'], ), offset=data.get('offset', 0), ) file_like.seek(start+length, io.SEEK_SET) return obj @classmethod def from_file(cls, file_like): if isinstance(file_like, (bytes, bytearray, memoryview)): file_like = _BytesIO(file_like) header_info = read_header(file_like, DQM_MAGIC_PREFIX) version = header_info.version header_data = header_info.data if version >= (2, 0): raise ValueError("cannot load a DQM serialized with version " f"{version!r}, try upgrading your dimod version") obj = cls._from_file_numpy(file_like) if header_data['variables']: obj.variables = Variables() for v in VariablesSection.load(file_like): obj.variables._append(v) if len(obj.variables) != obj.num_variables(): raise ValueError("mismatched labels to BQM in given file") return obj @classmethod def from_numpy_vectors(cls, case_starts, linear_biases, quadratic, labels=None, offset=0): obj = cls() obj._cydqm = cyDiscreteQuadraticModel.from_numpy_vectors( case_starts, linear_biases, quadratic, offset) if labels is not None: if len(labels) != obj._cydqm.num_variables(): raise ValueError( "labels does not match the length of the DQM" ) for v in labels: obj.variables._append(v) else: for v in range(obj._cydqm.num_variables()): obj.variables._append() return obj def get_cases(self, v): return range(self.num_cases(v)) def get_linear(self, v): return self._cydqm.get_linear(self.variables.index(v)) def get_linear_case(self, v, case): return self._cydqm.get_linear_case(self.variables.index(v), case) def get_quadratic(self, u, v, array=False): return self._cydqm.get_quadratic( self.variables.index(u), self.variables.index(v), array=array) def get_quadratic_case(self, u, u_case, v, v_case): return self._cydqm.get_quadratic_case( self.variables.index(u), u_case, self.variables.index(v), v_case) def num_cases(self, v=None): if v is None: return self._cydqm.num_cases() return self._cydqm.num_cases(self.variables.index(v)) def num_case_interactions(self): return self._cydqm.num_case_interactions() def num_variable_interactions(self): return self._cydqm.num_variable_interactions() def num_variables(self): return self._cydqm.num_variables() def relabel_variables(self, mapping, inplace=True): if not inplace: return self.copy().relabel_variables(mapping, inplace=True) self.variables._relabel(mapping) return self def relabel_variables_as_integers(self, inplace=True): if not inplace: return self.copy().relabel_variables_as_integers(inplace=True) return self, self.variables._relabel_as_integers()
Apache License 2.0
open-catalyst-project/ocp
ocpmodels/models/gemnet/layers/efficient.py
EfficientInteractionDownProjection.forward
python
def forward(self, rbf, sph, id_ca, id_ragged_idx): num_edges = rbf.shape[1] rbf_W1 = torch.matmul(rbf, self.weight) rbf_W1 = rbf_W1.permute(1, 2, 0) if sph.shape[0] == 0: Kmax = 0 else: Kmax = torch.max( torch.max(id_ragged_idx + 1), torch.tensor(0).to(id_ragged_idx.device), ) sph2 = sph.new_zeros(num_edges, Kmax, self.num_spherical) sph2[id_ca, id_ragged_idx] = sph sph2 = torch.transpose(sph2, 1, 2) return rbf_W1, sph2
Arguments --------- rbf: torch.Tensor, shape=(1, nEdges, num_radial) sph: torch.Tensor, shape=(nEdges, Kmax, num_spherical) id_ca id_ragged_idx Returns ------- rbf_W1: torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical) sph: torch.Tensor, shape=(nEdges, Kmax, num_spherical) Kmax = maximum number of neighbors of the edges
https://github.com/open-catalyst-project/ocp/blob/1044e311182c1120c6e6d137ce6db3f445148973/ocpmodels/models/gemnet/layers/efficient.py#L48-L88
import torch from ..initializers import he_orthogonal_init class EfficientInteractionDownProjection(torch.nn.Module): def __init__( self, num_spherical: int, num_radial: int, emb_size_interm: int, ): super().__init__() self.num_spherical = num_spherical self.num_radial = num_radial self.emb_size_interm = emb_size_interm self.reset_parameters() def reset_parameters(self): self.weight = torch.nn.Parameter( torch.empty( (self.num_spherical, self.num_radial, self.emb_size_interm) ), requires_grad=True, ) he_orthogonal_init(self.weight)
MIT License
pmorenoz/continualgp
continualgp/het_likelihood.py
HetLikelihood.negative_log_predictive
python
def negative_log_predictive(self, Ytest, mu_F_star, v_F_star, Y_metadata, num_samples): t_ind = Y_metadata['task_index'].flatten() y_ind = Y_metadata['y_index'].flatten() f_ind = Y_metadata['function_index'].flatten() p_ind = Y_metadata['pred_index'].flatten() tasks = np.unique(t_ind) logpred = 0 for t in tasks: logpred += self.likelihoods_list[t].log_predictive(Ytest[t], mu_F_star[t], v_F_star[t], num_samples) nlogpred = -logpred return nlogpred
Returns the negative log-predictive density (NLPD) of the model over test data Ytest.
https://github.com/pmorenoz/continualgp/blob/65c5cc03765da7737d7af49c3f9bfc7350309ea3/continualgp/het_likelihood.py#L149-L163
import numpy as np from GPy.likelihoods import link_functions from GPy.likelihoods import Likelihood from itertools import compress class HetLikelihood(Likelihood): def __init__(self, likelihoods_list, gp_link=None ,name='heterogeneous_likelihood'): if gp_link is None: gp_link = link_functions.Identity() super(HetLikelihood, self).__init__(gp_link=gp_link, name=name) self.likelihoods_list = likelihoods_list def generate_metadata(self): t_index = np.arange(len(self.likelihoods_list)) y_index = np.empty((1,1)) f_index = np.empty((1,1)) d_index = np.empty((1,1)) p_index = np.empty((1,1)) for t, lik in enumerate(self.likelihoods_list): dim_y, dim_f, dim_pred = lik.get_metadata() y_index = np.hstack(( y_index, t*np.ones((1,dim_y)) )) f_index = np.hstack(( f_index, t*np.ones((1,dim_f)) )) d_index = np.hstack(( d_index, np.arange(0,dim_f)[None,:] )) p_index = np.hstack((p_index, t * np.ones((1, dim_pred)))) metadata = {'task_index': t_index, 'y_index': np.int_(y_index[0,1:]), 'function_index': np.int_(f_index[0,1:]), 'd_index': np.int_(d_index[0,1:]),'pred_index': np.int_(p_index[0,1:])} return metadata def pdf(self, f, Y, Y_metadata): t_ind = Y_metadata['task_index'].flatten() y_ind = Y_metadata['y_index'].flatten() f_ind = Y_metadata['function_index'].flatten() tasks = np.unique(t_ind) pdf = np.zeros((Y.shape[0], t_ind.shape[0])) for t in tasks: pdf[:, t_ind == t] = self.likelihoods_list[t].pdf(f[:, f_ind == t], Y[:, y_ind == t], Y_metadata=None) return pdf def logpdf(self, f, Y, Y_metadata): t_ind = Y_metadata['task_index'].flatten() y_ind = Y_metadata['y_index'].flatten() f_ind = Y_metadata['function_index'].flatten() tasks = np.unique(t_ind) logpdf = np.zeros((Y.shape[0], t_ind.shape[0])) for t in tasks: logpdf[:, t_ind == t] = self.likelihoods_list[t].logpdf(f[:, f_ind == t], Y[:, y_ind == t], Y_metadata=None) return logpdf def samples(self, F, Y_metadata): t_ind = Y_metadata['task_index'].flatten() y_ind = Y_metadata['y_index'].flatten() f_ind = Y_metadata['function_index'].flatten() tasks = np.unique(t_ind) samples = [] for t in tasks: samples.append(self.likelihoods_list[t].samples(F[t], num_samples=1, Y_metadata=None)) return samples def num_output_functions(self, Y_metadata): f_ind = Y_metadata['function_index'].flatten() return f_ind.shape[0] def num_latent_functions(self): pass def ismulti(self, task): return self.likelihoods_list[task].ismulti() def var_exp(self, Y, mu_F, v_F, Y_metadata): t_ind = Y_metadata['task_index'].flatten() y_ind = Y_metadata['y_index'].flatten() f_ind = Y_metadata['function_index'].flatten() d_ind = Y_metadata['d_index'].flatten() tasks = np.unique(t_ind) var_exp = [] for t in tasks: ve_task = self.likelihoods_list[t].var_exp(Y[t], mu_F[t], v_F[t], Y_metadata=None) var_exp.append(ve_task) return var_exp def var_exp_derivatives(self, Y, mu_F, v_F, Y_metadata): t_ind = Y_metadata['task_index'].flatten() y_ind = Y_metadata['y_index'].flatten() f_ind = Y_metadata['function_index'].flatten() tasks = np.unique(t_ind) var_exp_dm = [] var_exp_dv = [] for t in tasks: ve_task_dm, ve_task_dv = self.likelihoods_list[t].var_exp_derivatives(Y[t], mu_F[t], v_F[t], Y_metadata=None) var_exp_dm.append(ve_task_dm) var_exp_dv.append(ve_task_dv) return var_exp_dm, var_exp_dv def predictive(self, mu_F_pred, v_F_pred, Y_metadata): t_ind = Y_metadata['task_index'].flatten() y_ind = Y_metadata['y_index'].flatten() f_ind = Y_metadata['function_index'].flatten() p_ind = Y_metadata['pred_index'].flatten() tasks = np.unique(t_ind) m_pred = [] v_pred = [] for t in tasks: m_pred_task, v_pred_task = self.likelihoods_list[t].predictive(mu_F_pred[t], v_F_pred[t], Y_metadata=None) m_pred.append(m_pred_task) v_pred.append(v_pred_task) return m_pred, v_pred
MIT License
biolink/kgx
kgx/graph/nx_graph.py
NxGraph.has_node
python
def has_node(self, node: str) -> bool: return self.graph.has_node(node)
Check whether a given node exists in the graph. Parameters ---------- node: str The node identifier Returns ------- bool Whether or not the given node exists
https://github.com/biolink/kgx/blob/247d113d5b593f078afce1951c63eee2a8cc1248/kgx/graph/nx_graph.py#L368-L383
from typing import Dict, Any, Optional, List, Generator from kgx.graph.base_graph import BaseGraph from networkx import ( MultiDiGraph, set_node_attributes, relabel_nodes, set_edge_attributes, get_node_attributes, get_edge_attributes, ) from kgx.utils.kgx_utils import prepare_data_dict class NxGraph(BaseGraph): def __init__(self): super().__init__() self.graph = MultiDiGraph() self.name = None def add_node(self, node: str, **kwargs: Any) -> None: if "data" in kwargs: data = kwargs["data"] else: data = kwargs self.graph.add_node(node, **data) def add_edge( self, subject_node: str, object_node: str, edge_key: str = None, **kwargs: Any ) -> None: if "data" in kwargs: data = kwargs["data"] else: data = kwargs return self.graph.add_edge(subject_node, object_node, key=edge_key, **data) def add_node_attribute(self, node: str, attr_key: str, attr_value: Any) -> None: self.graph.add_node(node, **{attr_key: attr_value}) def add_edge_attribute( self, subject_node: str, object_node: str, edge_key: Optional[str], attr_key: str, attr_value: Any, ) -> None: self.graph.add_edge( subject_node, object_node, key=edge_key, **{attr_key: attr_value} ) def update_node_attribute( self, node: str, attr_key: str, attr_value: Any, preserve: bool = False ) -> Dict: node_data = self.graph.nodes[node] updated = prepare_data_dict( node_data, {attr_key: attr_value}, preserve=preserve ) self.graph.add_node(node, **updated) return updated def update_edge_attribute( self, subject_node: str, object_node: str, edge_key: Optional[str], attr_key: str, attr_value: Any, preserve: bool = False, ) -> Dict: e = self.graph.edges( (subject_node, object_node, edge_key), keys=True, data=True ) edge_data = list(e)[0][3] updated = prepare_data_dict(edge_data, {attr_key: attr_value}, preserve) self.graph.add_edge(subject_node, object_node, key=edge_key, **updated) return updated def get_node(self, node: str) -> Dict: n = {} if self.graph.has_node(node): n = self.graph.nodes[node] return n def get_edge( self, subject_node: str, object_node: str, edge_key: Optional[str] = None ) -> Dict: e = {} if self.graph.has_edge(subject_node, object_node, edge_key): e = self.graph.get_edge_data(subject_node, object_node, edge_key) return e def nodes(self, data: bool = True) -> Dict: return self.graph.nodes(data) def edges(self, keys: bool = False, data: bool = True) -> Dict: return self.graph.edges(keys=keys, data=data) def in_edges(self, node: str, keys: bool = False, data: bool = False) -> List: return self.graph.in_edges(node, keys=keys, data=data) def out_edges(self, node: str, keys: bool = False, data: bool = False) -> List: return self.graph.out_edges(node, keys=keys, data=data) def nodes_iter(self) -> Generator: for n in self.graph.nodes(data=True): yield n def edges_iter(self) -> Generator: for u, v, k, data in self.graph.edges(keys=True, data=True): yield u, v, k, data def remove_node(self, node: str) -> None: self.graph.remove_node(node) def remove_edge( self, subject_node: str, object_node: str, edge_key: Optional[str] = None ) -> None: self.graph.remove_edge(subject_node, object_node, edge_key)
BSD 3-Clause New or Revised License
idospringer/ergo
ergo_data_loader.py
negative_examples
python
def negative_examples(pairs, all_pairs, size, _protein=False): examples = [] i = 0 tcrs = [tcr for (tcr, pep_data) in pairs] peps = [pep_data for (tcr, pep_data) in pairs] while i < size: pep_data = random.choice(peps) for j in range(5): tcr = random.choice(tcrs) if _protein: tcr_pos_pairs = [pair for pair in all_pairs if pair[0] == tcr] tcr_proteins = [pep[1] for (tcr, pep) in tcr_pos_pairs] protein = pep_data[1] attach = protein in tcr_proteins else: attach = (tcr, pep_data) in all_pairs if attach is False: if (tcr, pep_data, 'n') not in examples: examples.append((tcr, pep_data, 'n')) i += 1 return examples
Randomly creating intentional negative examples from the same pairs dataset.
https://github.com/idospringer/ergo/blob/8edf1f2f48b0f6447dc7c1696676b95c28f2a2e0/ergo_data_loader.py#L99-L123
import random import numpy as np import csv import os import sklearn.model_selection as skl def read_data(csv_file, file_key, _protein=False, _hla=False): with open(csv_file, 'r', encoding='unicode_escape') as file: file.readline() if file_key == 'mcpas': reader = csv.reader(file) elif file_key == 'vdjdb': reader = csv.reader(file, delimiter='\t') elif file_key == 'tumor': reader = csv.reader(file, delimiter='\t') elif file_key == 'nettcr': reader = csv.reader(file, delimiter='\t') tcrs = set() peps = set() all_pairs = [] for line in reader: if file_key == 'mcpas': if _protein: protein = line[9] if protein == 'NA': continue if _hla: hla = line[13] if hla == 'NA': continue if line[2] != 'Human': continue tcr, pep = line[1], line[11] elif file_key == 'vdjdb': if _protein: protein = line[10] if protein == 'NA': continue if _hla: hla = line[6] if hla == 'NA': continue if line[5] != 'HomoSapiens': continue tcr, pep = line[2], line[9] if line[1] != 'TRB': continue elif file_key == 'tumor': tcr, pep = line elif file_key == 'nettcr': tcr, pep = line[1], line[0] tcr = 'C' + tcr + 'F' if any(att == 'NA' or att == "" for att in [tcr, pep]): continue if any(key in tcr + pep for key in ['#', '*', 'b', 'f', 'y', '~', 'O', '/', '1', 'X', '_', 'B', '7']): continue tcrs.add(tcr) pep_data = [pep] if _protein: pep_data.append(protein) if _hla: pep_data.append(hla) peps.add(tuple(pep_data)) all_pairs.append((tcr, tuple(pep_data))) train_pairs, test_pairs = train_test_split(all_pairs) return all_pairs, train_pairs, test_pairs def train_test_split(all_pairs): train_pairs = [] test_pairs = [] for pair in all_pairs: p = np.random.binomial(1, 0.8) if p == 1: train_pairs.append(pair) else: test_pairs.append(pair) return train_pairs, test_pairs def positive_examples(pairs): examples = [] for pair in pairs: tcr, pep_data = pair examples.append((tcr, pep_data, 'p')) return examples
MIT License
docusign/docusign-python-client
docusign_esign/models/comment.py
Comment.text
python
def text(self): return self._text
Gets the text of this Comment. # noqa: E501 # noqa: E501 :return: The text of this Comment. # noqa: E501 :rtype: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/comment.py#L480-L488
import pprint import re import six from docusign_esign.client.configuration import Configuration class Comment(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'envelope_id': 'str', 'hmac': 'str', 'id': 'str', 'mentions': 'list[str]', 'read': 'bool', 'sent_by_email': 'str', 'sent_by_full_name': 'str', 'sent_by_image_id': 'str', 'sent_by_initials': 'str', 'sent_by_recipient_id': 'str', 'sent_by_user_id': 'str', 'signing_group_id': 'str', 'signing_group_name': 'str', 'subject': 'str', 'tab_id': 'str', 'text': 'str', 'thread_id': 'str', 'thread_originator_id': 'str', 'timestamp': 'str', 'time_stamp_formatted': 'str', 'visible_to': 'list[str]' } attribute_map = { 'envelope_id': 'envelopeId', 'hmac': 'hmac', 'id': 'id', 'mentions': 'mentions', 'read': 'read', 'sent_by_email': 'sentByEmail', 'sent_by_full_name': 'sentByFullName', 'sent_by_image_id': 'sentByImageId', 'sent_by_initials': 'sentByInitials', 'sent_by_recipient_id': 'sentByRecipientId', 'sent_by_user_id': 'sentByUserId', 'signing_group_id': 'signingGroupId', 'signing_group_name': 'signingGroupName', 'subject': 'subject', 'tab_id': 'tabId', 'text': 'text', 'thread_id': 'threadId', 'thread_originator_id': 'threadOriginatorId', 'timestamp': 'timestamp', 'time_stamp_formatted': 'timeStampFormatted', 'visible_to': 'visibleTo' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._envelope_id = None self._hmac = None self._id = None self._mentions = None self._read = None self._sent_by_email = None self._sent_by_full_name = None self._sent_by_image_id = None self._sent_by_initials = None self._sent_by_recipient_id = None self._sent_by_user_id = None self._signing_group_id = None self._signing_group_name = None self._subject = None self._tab_id = None self._text = None self._thread_id = None self._thread_originator_id = None self._timestamp = None self._time_stamp_formatted = None self._visible_to = None self.discriminator = None setattr(self, "_{}".format('envelope_id'), kwargs.get('envelope_id', None)) setattr(self, "_{}".format('hmac'), kwargs.get('hmac', None)) setattr(self, "_{}".format('id'), kwargs.get('id', None)) setattr(self, "_{}".format('mentions'), kwargs.get('mentions', None)) setattr(self, "_{}".format('read'), kwargs.get('read', None)) setattr(self, "_{}".format('sent_by_email'), kwargs.get('sent_by_email', None)) setattr(self, "_{}".format('sent_by_full_name'), kwargs.get('sent_by_full_name', None)) setattr(self, "_{}".format('sent_by_image_id'), kwargs.get('sent_by_image_id', None)) setattr(self, "_{}".format('sent_by_initials'), kwargs.get('sent_by_initials', None)) setattr(self, "_{}".format('sent_by_recipient_id'), kwargs.get('sent_by_recipient_id', None)) setattr(self, "_{}".format('sent_by_user_id'), kwargs.get('sent_by_user_id', None)) setattr(self, "_{}".format('signing_group_id'), kwargs.get('signing_group_id', None)) setattr(self, "_{}".format('signing_group_name'), kwargs.get('signing_group_name', None)) setattr(self, "_{}".format('subject'), kwargs.get('subject', None)) setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None)) setattr(self, "_{}".format('text'), kwargs.get('text', None)) setattr(self, "_{}".format('thread_id'), kwargs.get('thread_id', None)) setattr(self, "_{}".format('thread_originator_id'), kwargs.get('thread_originator_id', None)) setattr(self, "_{}".format('timestamp'), kwargs.get('timestamp', None)) setattr(self, "_{}".format('time_stamp_formatted'), kwargs.get('time_stamp_formatted', None)) setattr(self, "_{}".format('visible_to'), kwargs.get('visible_to', None)) @property def envelope_id(self): return self._envelope_id @envelope_id.setter def envelope_id(self, envelope_id): self._envelope_id = envelope_id @property def hmac(self): return self._hmac @hmac.setter def hmac(self, hmac): self._hmac = hmac @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def mentions(self): return self._mentions @mentions.setter def mentions(self, mentions): self._mentions = mentions @property def read(self): return self._read @read.setter def read(self, read): self._read = read @property def sent_by_email(self): return self._sent_by_email @sent_by_email.setter def sent_by_email(self, sent_by_email): self._sent_by_email = sent_by_email @property def sent_by_full_name(self): return self._sent_by_full_name @sent_by_full_name.setter def sent_by_full_name(self, sent_by_full_name): self._sent_by_full_name = sent_by_full_name @property def sent_by_image_id(self): return self._sent_by_image_id @sent_by_image_id.setter def sent_by_image_id(self, sent_by_image_id): self._sent_by_image_id = sent_by_image_id @property def sent_by_initials(self): return self._sent_by_initials @sent_by_initials.setter def sent_by_initials(self, sent_by_initials): self._sent_by_initials = sent_by_initials @property def sent_by_recipient_id(self): return self._sent_by_recipient_id @sent_by_recipient_id.setter def sent_by_recipient_id(self, sent_by_recipient_id): self._sent_by_recipient_id = sent_by_recipient_id @property def sent_by_user_id(self): return self._sent_by_user_id @sent_by_user_id.setter def sent_by_user_id(self, sent_by_user_id): self._sent_by_user_id = sent_by_user_id @property def signing_group_id(self): return self._signing_group_id @signing_group_id.setter def signing_group_id(self, signing_group_id): self._signing_group_id = signing_group_id @property def signing_group_name(self): return self._signing_group_name @signing_group_name.setter def signing_group_name(self, signing_group_name): self._signing_group_name = signing_group_name @property def subject(self): return self._subject @subject.setter def subject(self, subject): self._subject = subject @property def tab_id(self): return self._tab_id @tab_id.setter def tab_id(self, tab_id): self._tab_id = tab_id @property
MIT License
codeforamerica/pittsburgh-purchasing-suite
purchasing/opportunities/front/views.py
manage
python
def manage(): form = init_form(UnsubscribeForm) form_categories = [] form_opportunities = [] vendor = None if form.validate_on_submit(): email = form.data.get('email') vendor = Vendor.query.filter(Vendor.email == email).first() if vendor is None: current_app.logger.info( 'OPPMANAGEVIEW - Unsuccessful search for email {}'.format(email) ) form.email.errors = ['We could not find the email {}'.format(email)] if request.form.get('button', '').lower() == 'update email preferences': remove_categories = set([Category.query.get(i) for i in form.categories.data]) remove_opportunities = set([Opportunity.query.get(i) for i in form.opportunities.data]) remove_categories.discard(None) remove_opportunities.discard(None) vendor.categories = vendor.categories.difference(remove_categories) vendor.opportunities = vendor.opportunities.difference(remove_opportunities) if form.data.get('subscribed_to_newsletter'): vendor.subscribed_to_newsletter = False current_app.logger.info( '''OPPMANAGEVIEW - Vendor {} unsubscribed from: Categories: {} Opportunities: {} Subscribed from newsletter: {} '''.format( email, ', '.join([i.category_friendly_name for i in remove_categories if remove_categories and len(remove_categories) > 0]), ', '.join([i.description for i in remove_opportunities if remove_opportunities and len(remove_opportunities) > 0]), vendor.subscribed_to_newsletter ) ) db.session.commit() flash('Preferences updated!', 'alert-success') if vendor: for subscription in vendor.categories: form_categories.append((subscription.id, subscription.category_friendly_name)) for subscription in vendor.opportunities: form_opportunities.append((subscription.id, subscription.title)) form = init_form(UnsubscribeForm) form.opportunities.choices = form_opportunities form.categories.choices = form_categories return render_template( 'opportunities/front/manage.html', form=form, vendor=vendor if vendor else Vendor() )
Manage a vendor's signups :status 200: render the :py:class:`~purchasing.opportunities.forms.UnsubscribeForm` :status 302: post the :py:class:`~purchasing.opportunities.forms.UnsubscribeForm` and change the user's email subscriptions and redirect them back to the management page.
https://github.com/codeforamerica/pittsburgh-purchasing-suite/blob/9552eda6df396746feedc9ce45f35842a716de6a/purchasing/opportunities/front/views.py#L134-L200
import datetime from flask import ( render_template, request, current_app, flash, redirect, url_for, session, abort ) from flask_login import current_user from purchasing.database import db from purchasing.notifications import Notification from purchasing.opportunities.forms import UnsubscribeForm, VendorSignupForm, OpportunitySignupForm from purchasing.opportunities.models import Category, Opportunity, Vendor from purchasing.opportunities.front import blueprint from purchasing.opportunities.util import init_form, signup_for_opp from purchasing.users.models import User, Role @blueprint.route('/') def splash(): current_app.logger.info('BEACON FRONT SPLASH VIEW') return render_template( 'opportunities/front/splash.html' ) @blueprint.route('/signup', methods=['GET', 'POST']) def signup(): session_vendor = Vendor.query.filter( Vendor.email == session.get('email'), Vendor.business_name == session.get('business_name') ).first() form = init_form(VendorSignupForm, model=session_vendor) if form.validate_on_submit(): vendor = Vendor.query.filter(Vendor.email == form.data.get('email')).first() if vendor: current_app.logger.info(''' OPPUPDATEVENDOR - Vendor updated: EMAIL: {old_email} -> {email} at BUSINESS: {old_bis} -> {bis_name} signed up for: CATEGORIES: {old_cats} -> {categories}'''.format( old_email=vendor.email, email=form.data['email'], old_bis=vendor.business_name, bis_name=form.data['business_name'], old_cats=[i.__unicode__() for i in vendor.categories], categories=[i.__unicode__() for i in form.data['categories']] )) vendor.update( **form.pop_categories(categories=False) ) flash("You are already signed up! Your profile was updated with this new information", 'alert-info') else: current_app.logger.info( 'OPPNEWVENDOR - New vendor signed up: EMAIL: {email} at BUSINESS: {bis_name} signed up for:\n' + 'CATEGORIES: {categories}'.format( email=form.data['email'], bis_name=form.data['business_name'], categories=[i.__unicode__() for i in form.data['categories']] ) ) vendor = Vendor.create( **form.pop_categories(categories=False) ) confirmation_sent = Notification( to_email=vendor.email, subject='Thank you for signing up!', html_template='opportunities/emails/signup.html', txt_template='opportunities/emails/signup.txt', categories=form.data['categories'] ).send() if confirmation_sent: admins = db.session.query(User.email).join(Role, User.role_id == Role.id).filter( Role.name.in_(['admin', 'superadmin']) ).all() Notification( to_email=admins, subject='A new vendor has signed up on beacon', categories=form.data['categories'], vendor=form.data['email'], convert_args=True, business_name=form.data['business_name'] ).send() flash('Thank you for signing up! Check your email for more information', 'alert-success') else: flash('Uh oh, something went wrong. We are investigating.', 'alert-danger') session['email'] = form.data.get('email') session['business_name'] = form.data.get('business_name') return redirect(url_for('opportunities.splash')) page_email = request.args.get('email', None) if page_email: current_app.logger.info( 'OPPSIGNUPVIEW - User clicked through to signup with email {}'.format(page_email) ) session['email'] = page_email return redirect(url_for('opportunities.signup')) if 'email' in session: if not form.email.validate(form): session.pop('email', None) form.display_cleanup() return render_template( 'opportunities/front/signup.html', form=form, categories=form.get_categories(), subcategories=form.get_subcategories() ) @blueprint.route('/manage', methods=['GET', 'POST'])
BSD 3-Clause New or Revised License
ibm/differential-privacy-library
diffprivlib/mechanisms/base.py
TruncationAndFoldingMixin._check_all
python
def _check_all(self, value): del value self._check_bounds(self.lower, self.upper) return True
Checks that all parameters of the mechanism have been initialised correctly
https://github.com/ibm/differential-privacy-library/blob/90b319a90414ebf12062887c07e1609f888e1a34/diffprivlib/mechanisms/base.py#L204-L209
import abc from copy import copy import inspect from numbers import Real import secrets class DPMachine(abc.ABC): @abc.abstractmethod def randomise(self, value): def copy(self): return copy(self) class DPMechanism(DPMachine, abc.ABC): def __init__(self, *, epsilon, delta): self.epsilon, self.delta = self._check_epsilon_delta(epsilon, delta) self._rng = secrets.SystemRandom() def __repr__(self): attrs = inspect.getfullargspec(self.__class__).kwonlyargs attr_output = [] for attr in attrs: attr_output.append(attr + "=" + repr(self.__getattribute__(attr))) return str(self.__module__) + "." + str(self.__class__.__name__) + "(" + ", ".join(attr_output) + ")" @abc.abstractmethod def randomise(self, value): def bias(self, value): raise NotImplementedError def variance(self, value): raise NotImplementedError def mse(self, value): return self.variance(value) + (self.bias(value)) ** 2 @classmethod def _check_epsilon_delta(cls, epsilon, delta): if not isinstance(epsilon, Real) or not isinstance(delta, Real): raise TypeError("Epsilon and delta must be numeric") if epsilon < 0: raise ValueError("Epsilon must be non-negative") if not 0 <= delta <= 1: raise ValueError("Delta must be in [0, 1]") if epsilon + delta == 0: raise ValueError("Epsilon and Delta cannot both be zero") return float(epsilon), float(delta) def _check_all(self, value): del value self._check_epsilon_delta(self.epsilon, self.delta) return True class TruncationAndFoldingMixin: def __init__(self, *, lower, upper): if not isinstance(self, DPMechanism): raise TypeError("TruncationAndFoldingMachine must be implemented alongside a :class:`.DPMechanism`") self.lower, self.upper = self._check_bounds(lower, upper) @classmethod def _check_bounds(cls, lower, upper): if not isinstance(lower, Real) or not isinstance(upper, Real): raise TypeError("Bounds must be numeric") if lower > upper: raise ValueError("Lower bound must not be greater than upper bound") return lower, upper
MIT License