repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
wdm0006/dummyrdd
dummy_spark/rdd.py
RDD.countByKey
python
def countByKey(self): raise NotImplementedError
NotImplemented :return:
https://github.com/wdm0006/dummyrdd/blob/d66c30495cbaa001a744128c89d41fb55741fba5/dummy_spark/rdd.py#L845-L851
import random import uuid from collections import OrderedDict from functools import reduce from dummy_spark.resultsiterable import ResultIterable __author__ = 'willmcginnis' class RDD(object): def __init__(self, jrdd, ctx, jrdd_deserializer=None): self._id = str(uuid.uuid4()) if jrdd is None: self._jrdd = [] else: if isinstance(jrdd, list): self._jrdd = jrdd elif isinstance(jrdd, set): self._jrdd = list(jrdd) else: raise AttributeError('Type %s for jrdd not supported' % (type(jrdd), )) self.ctx = ctx self.is_cached = True self._name = 'dummpy-rdd' self.is_checkpointed = False self._jrdd_deserializer = jrdd_deserializer self.partitioner = None def id(self): return self._id @property def context(self): return self.ctx def name(self): return self._name def setName(self, name): self._name = name return self def __repr__(self): return str(self._jrdd) def cache(self): return self def persist(self, storageLevel=None): return self def unpersist(self): return self def _reserialize(self, serializer=None): return self def checkpoint(self): pass def isCheckpointed(self): return True def getCheckpointFile(self): return None def map(self, f, preservesPartitioning=False): data = list(map(f, self._jrdd)) return RDD(data, self.ctx) def flatMap(self, f, preservesPartitioning=False): data = [item for sl in map(f, self._jrdd) for item in sl] return RDD(data, self.ctx) def mapPartitions(self, f, preservesPartitioning=False): return self.map(f, preservesPartitioning=preservesPartitioning) def getNumPartitions(self): return 1 def filter(self, f): data = list(filter(f, self._jrdd)) return RDD(data, self.ctx) def distinct(self, numPartitions=None): data = set(self._jrdd) return RDD(data, self.ctx) def sample(self, withReplacement, fraction, seed=None): assert fraction >= 0.0, "Negative fraction value: %s" % fraction if seed is not None: random.seed(seed) idx_list = list(range(len(self._jrdd))) if withReplacement: data = [self._jrdd[random.choice(idx_list)] for _ in list(range(int(fraction * len(self._jrdd))))] else: random.shuffle(idx_list) data = [self._jrdd[idx] for idx in idx_list[:int(fraction * len(self._jrdd))]] return RDD(data, self.ctx) def randomSplit(self, weights, seed=None): pass def takeSample(self, withReplacement, num, seed=None): assert num >= 0.0, "Negative sample num: %s" % num if seed is not None: random.seed(seed) if withReplacement: out = [self._jrdd[random.choice(list(range(len(self._jrdd))))] for _ in num] else: idx_list = list(range(len(self._jrdd))) random.shuffle(idx_list) out = [self._jrdd[idx] for idx in idx_list[:num]] return out def union(self, other): return RDD(self._jrdd + other._jrdd, self.ctx) def intersection(self, other): data = [item for item in self._jrdd if item in other._jrdd] return RDD(data, self.ctx) def __add__(self, other): if not isinstance(other, RDD): raise TypeError return self.union(other) def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=None, ascending=True, keyfunc=lambda x: x): data = sorted(self._jrdd, key=keyfunc, reverse=ascending) return RDD(data, self.ctx) def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x): data = sorted(self._jrdd, key=keyfunc, reverse=not ascending) return RDD(data, self.ctx) def sortBy(self, keyfunc, ascending=True, numPartitions=None): data = sorted(self._jrdd, key=keyfunc, reverse=not ascending) return RDD(data, self.ctx) def glom(self): return self._jrdd def cartesian(self, other): data = [(t, u) for t in self._jrdd for u in other._jrdd] return RDD(data, self.ctx) def groupBy(self, f, numPartitions=None): return self.map(lambda x: (f(x), x)).groupByKey(numPartitions) def foreach(self, f): return self.map(f) def foreachPartition(self, f): return f(self._jrdd) def collect(self): return self._jrdd def sum(self): return sum(self._jrdd) def count(self): return len(self._jrdd) def mean(self): return float(sum(self._jrdd)) / len(self._jrdd) def take(self, num): return self._jrdd[:num] def first(self): return self._jrdd[0] def isEmpty(self): return len(self._jrdd) == 0 def reduceByKey(self, func, numPartitions=None): keys = {kv[0] for kv in self._jrdd} data = [(key, reduce(func, [kv[1] for kv in self._jrdd if kv[0] == key])) for key in keys] return RDD(data, self.ctx) def groupByKey(self, numPartitions=None): keys = {x[0] for x in self._jrdd} out = {k: ResultIterable([x[1] for x in self._jrdd if x[0] == k]) for k in keys} data = list(out.items()) return RDD(data, self.ctx) def flatMapValues(self, f): flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1])) return self.flatMap(flat_map_fn, preservesPartitioning=True) def mapValues(self, f): map_values_fn = lambda kv: (kv[0], f(kv[1])) return self.map(map_values_fn, preservesPartitioning=True) def cogroup(self, other, numPartitions=None): vs = {x[0] for x in self._jrdd} us = {x[0] for x in other._jrdd} keys = vs.union(us) data = [ ( k, ([v[1] for v in self._jrdd if v[0] == k]), ([u[1] for u in other._jrdd if u[0] == k]) ) for k in keys ] return RDD(data, self.ctx) def zip(self, other): data = list(zip(other, self._jrdd)) return RDD(data, self.ctx) def zipWithIndex(self): data = [(b, a) for a, b in list(enumerate(self._jrdd))] return RDD(data, self.ctx) def _defaultReducePartitions(self): return 1 def lookup(self, key): return [x for x in self._jrdd if x[0] == key] def countApprox(self, timeout, confidence=0.95): return len(self._jrdd) def sumApprox(self, timeout, confidence=0.95): return sum(self._jrdd) def meanApprox(self, timeout, confidence=0.95): return float(sum(self._jrdd)) / len(self._jrdd) def countApproxDistinct(self, relativeSD=0.05): return len(set(self._jrdd)) def toLocalIterator(self): for row in self._jrdd: yield row def max(self, key=None): if key is None: return max(self._jrdd) else: raise NotImplementedError def min(self, key=None): if key is None: return min(self._jrdd) else: raise NotImplementedError def _pickled(self): raise NotImplementedError def mapPartitionsWithIndex(self, f, preservesPartitioning=False): raise NotImplementedError @staticmethod def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement): raise NotImplementedError def pipe(self, command, env=None): raise NotImplementedError def reduce(self, f): raise NotImplementedError def treeReduce(self, f, depth=2): raise NotImplementedError def fold(self, zeroValue, op): raise NotImplementedError def aggregate(self, zeroValue, seqOp, combOp): raise NotImplementedError def treeAggregate(self, zeroValue, seqOp, combOp, depth=2): raise NotImplementedError def stats(self): raise NotImplementedError def histogram(self, buckets): raise NotImplementedError def variance(self): raise NotImplementedError def stdev(self): raise NotImplementedError def sampleStdev(self): raise NotImplementedError def sampleVariance(self): raise NotImplementedError def countByValue(self): raise NotImplementedError def top(self, num, key=None): raise NotImplementedError def takeOrdered(self, num, key=None): raise NotImplementedError def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None): raise NotImplementedError def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None, keyConverter=None, valueConverter=None, conf=None): raise NotImplementedError def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None): raise NotImplementedError def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None, keyConverter=None, valueConverter=None, conf=None, compressionCodecClass=None): raise NotImplementedError def saveAsSequenceFile(self, path, compressionCodecClass=None): raise NotImplementedError def saveAsPickleFile(self, path, batchSize=10): raise NotImplementedError def saveAsTextFile(self, path, compressionCodecClass=None): raise NotImplementedError def collectAsMap(self): raise NotImplementedError def keys(self): return RDD(list(OrderedDict(self._jrdd).keys()), self.ctx) def values(self): return RDD(list(OrderedDict(self._jrdd).values()), self.ctx) def reduceByKeyLocally(self, func): raise NotImplementedError
BSD 3-Clause New or Revised License
lmjohns3/kohonen
kohonen/kohonen.py
weighted_euclidean_metric
python
def weighted_euclidean_metric(weights): def calculate(x, y): d = x - y return np.sqrt(np.sum(d * d * weights, axis=-1)) return calculate
Implements a standard euclidean distance with weighted dimensions.
https://github.com/lmjohns3/kohonen/blob/2155cf9e99dbf5e7082c4294d561ffd70e96341c/kohonen/kohonen.py#L70-L75
import numpy as np def cosine_metric(x, y): nx = np.sqrt(np.sum(x * x, axis=-1)) ny = np.sqrt(np.sum(y * y, axis=-1)) return 1 - np.sum(x * y, axis=-1) / nx / ny def euclidean_metric(x, y): d = x - y return np.sqrt(np.sum(d * d, axis=-1)) def manhattan_metric(x, y): d = x - y return np.sum(np.abs(d), axis=-1)
MIT License
jborean93/ntlm-auth
ntlm_auth/session_security.py
SessionSecurity.get_signature
python
def get_signature(self, message): signature = calc_signature(message, self.negotiate_flags, self.outgoing_signing_key, self.outgoing_seq_num, self.outgoing_handle) self.outgoing_seq_num += 1 return signature.get_data()
[MS-NLMP] v28.0 2016-07-14 3.4.4 Message Signature Functions Will create the signature based on the message to send to the server. Depending on the negotiate_flags set this could either be an NTLMv1 signature or NTLMv2 with Extended Session Security signature. :param message: The message data that will be signed :return signature: Either _NtlmMessageSignature1 or _NtlmMessageSignature2 depending on the flags set
https://github.com/jborean93/ntlm-auth/blob/417db8b6fb0f9b5f86f314e257b8a4893f74c057/ntlm_auth/session_security.py#L209-L227
import binascii import hashlib import hmac import struct import ntlm_auth.compute_keys as compkeys from ntlm_auth.constants import NegotiateFlags, SignSealConstants from ntlm_auth.rc4 import ARC4 class _NtlmMessageSignature1(object): EXPECTED_BODY_LENGTH = 16 def __init__(self, random_pad, checksum, seq_num): self.version = b"\x01\x00\x00\x00" self.random_pad = random_pad self.checksum = checksum self.seq_num = seq_num def get_data(self): signature = self.version signature += self.random_pad signature += self.checksum signature += self.seq_num assert self.EXPECTED_BODY_LENGTH == len(signature), "BODY_LENGTH: %d != signature: %d" % (self.EXPECTED_BODY_LENGTH, len(signature)) return signature class _NtlmMessageSignature2(object): EXPECTED_BODY_LENGTH = 16 def __init__(self, checksum, seq_num): self.version = b"\x01\x00\x00\x00" self.checksum = checksum self.seq_num = seq_num def get_data(self): signature = self.version signature += self.checksum signature += self.seq_num assert self.EXPECTED_BODY_LENGTH == len(signature), "BODY_LENGTH: %d != signature: %d" % (self.EXPECTED_BODY_LENGTH, len(signature)) return signature class SessionSecurity(object): def __init__(self, negotiate_flags, exported_session_key, source="client"): self.negotiate_flags = negotiate_flags self.exported_session_key = exported_session_key self.outgoing_seq_num = 0 self.incoming_seq_num = 0 self._source = source self._client_sealing_key = compkeys.get_seal_key(self.negotiate_flags, exported_session_key, SignSealConstants.CLIENT_SEALING) self._server_sealing_key = compkeys.get_seal_key(self.negotiate_flags, exported_session_key, SignSealConstants.SERVER_SEALING) self.outgoing_handle = None self.incoming_handle = None self.reset_rc4_state(True) self.reset_rc4_state(False) if source == "client": self.outgoing_signing_key = compkeys.get_sign_key(exported_session_key, SignSealConstants.CLIENT_SIGNING) self.incoming_signing_key = compkeys.get_sign_key(exported_session_key, SignSealConstants.SERVER_SIGNING) elif source == "server": self.outgoing_signing_key = compkeys.get_sign_key(exported_session_key, SignSealConstants.SERVER_SIGNING) self.incoming_signing_key = compkeys.get_sign_key(exported_session_key, SignSealConstants.CLIENT_SIGNING) else: raise ValueError("Invalid source parameter %s, must be client " "or server" % source) def reset_rc4_state(self, outgoing=True): csk = self._client_sealing_key ssk = self._server_sealing_key if outgoing: self.outgoing_handle = ARC4(csk if self._source == 'client' else ssk) else: self.incoming_handle = ARC4(ssk if self._source == 'client' else csk) def wrap(self, message): if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SEAL: encrypted_message = self._seal_message(message) signature = self.get_signature(message) message = encrypted_message elif self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SIGN: signature = self.get_signature(message) else: signature = None return message, signature def unwrap(self, message, signature): if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SEAL: message = self._unseal_message(message) self.verify_signature(message, signature) elif self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SIGN: self.verify_signature(message, signature) return message def _seal_message(self, message): encrypted_message = self.outgoing_handle.update(message) return encrypted_message def _unseal_message(self, message): decrypted_message = self.incoming_handle.update(message) return decrypted_message
MIT License
qin2dim/campusdailyautosign
src/BusinessCentralLayer/scaffold.py
_ScaffoldGuider._scaffold_refresh_cookie
python
def _scaffold_refresh_cookie(): from src.BusinessLogicLayer.apis.manager_cookie import check_admin_cookie check_admin_cookie()
Check the timeliness of the cookie, and automatically pull the cookie if the cookie fails :return:
https://github.com/qin2dim/campusdailyautosign/blob/1123bf9c54d2bbfc7d22de58987c558ddea2b1c3/src/BusinessCentralLayer/scaffold.py#L231-L238
from gevent import monkey monkey.patch_all() import gevent from sys import argv from typing import List from src.BusinessCentralLayer.setting import * _command_set = { 'deploy': "部署项目(定时任务/Flask 开启与否取决于yaml配置文件)", "ping": "测试接口可用性", "sign [StuNumbers]": "为某个/一系列账号签到(立即执行)", "group": "为config.yaml group中的账号越权签到(General)", "refresh_cookie": "检测并刷新越权cookie", "example": "调用示例,如 python main.py ping" } class _ConfigQuarantine(object): def __init__(self): self.root = [ SERVER_DIR_DATABASE, SERVER_DIR_SCREENSHOT, SERVER_DIR_CACHE, SERVER_DIR_CACHE_FOR_TIMER ] self.flag = False def set_up_file_tree(self, root): for child_ in root: if not os.path.exists(child_): self.flag = True try: if os.path.isdir(child_) or not os.path.splitext(child_)[-1]: os.mkdir(child_) logger.success(f"系统文件链接成功->{child_}") else: if child_ == SERVER_PATH_COOKIES: try: with open(child_, 'w', encoding='utf-8', newline='') as fpx: fpx.write("") logger.success(f"系统文件链接成功->{child_}") except Exception as ep: logger.exception(f"Exception{child_}{ep}") except Exception as ep: logger.exception(ep) @staticmethod def check_config(): if not all(SMTP_SCKEY.values()): logger.warning('<ConfigQuarantine> 您未正确配置<通信邮箱>信息(SMTP_SCKEY)') if not all([MySQL_SETTING.get("host"), MySQL_SETTING.get("password")]): logger.error('<ConfigQuarantine> 您未正确配置<MySQL_SETTING> 请配置后重启项目!') exit() if not all([SDK_OSS_SCKEY.get("id"), SDK_OSS_SCKEY.get("bucket_name")]): logger.warning("您未正确配置<SDK_OSS_SCKEY> 本项目将无法正常启用截图上传功能") def run(self): try: if [cq for cq in reversed(self.root) if not os.path.exists(cq)]: logger.warning('系统文件残缺!') logger.debug("启动<工程重构>模块...") self.set_up_file_tree(self.root) self.check_config() finally: if self.flag: logger.success(">>> 运行环境链接完成,请重启项目") exec("if self.flag:\n\texit()") _ConfigQuarantine().run() class _ScaffoldGuider(object): def __init__(self): self.scaffold_ruler = [i for i in self.__dir__() if i.startswith('_scaffold_')] @logger.catch() def startup(self, driver_command_set: List[str]): driver_command: List[str] = [] if len(driver_command_set) == 1: print("\n".join([f">>> {menu[0].ljust(30, '-')}|| {menu[-1]}" for menu in _command_set.items()])) return True if len(driver_command_set) == 2: driver_command = [driver_command_set[-1].lower(), ] elif len(driver_command_set) > 2: driver_command = list(set([command.lower() for command in driver_command_set[1:]])) if not isinstance(driver_command, list): return True if "group" in driver_command: driver_command.remove('group') group_ = set(config_['group']) group_ = [{'username': j} for j in group_] self._scaffold_group(stu_numbers=group_) elif "sign" in driver_command: driver_command.remove('sign') student_numbers: list = driver_command.copy() for pc_ in reversed(student_numbers): try: int(pc_) except ValueError: student_numbers.remove(pc_) if student_numbers.__len__() == 0: logger.error(f"<ScaffoldGuider> 参数缺失 'sign [StuNumber] or sign group' but not {driver_command}") return False else: group_ = [{'username': j} for j in student_numbers] return self._scaffold_group(stu_numbers=group_) task_list = [] while driver_command.__len__() > 0: _pending_command = driver_command.pop() try: task_list.append(gevent.spawn(eval(f"self._scaffold_{_pending_command}"))) except AttributeError: pass except Exception as e: logger.warning(f'未知错误 <{_pending_command}> {e}') else: gevent.joinall(task_list) if 'deploy' in driver_command: self._scaffold_deploy() @staticmethod def _scaffold_deploy(): from src.BusinessCentralLayer.middleware.interface_io import sei sei.startup_system() @staticmethod def _scaffold_group(stu_numbers: List[dict]): logger.info(f"<ScaffoldGuider> StartupSignInGroup-debug || {stu_numbers}") from src.BusinessLogicLayer.apis.vulcan_ash import SignInSpeedup SignInSpeedup(task_docker=stu_numbers).interface() @staticmethod def _scaffold_ping(): import pycurl test_url = [ 'https://ehall.hainanu.edu.cn/jsonp/ywtb/searchServiceItem?flag=0', 'https://ehall.hainanu.edu.cn/qljfwapp/sys/lwHainanuStuTempReport/*default/index.do#/stuTempReport' ] for url in test_url: c = pycurl.Curl() c.setopt(pycurl.URL, url) c.setopt(pycurl.CONNECTTIMEOUT, 5) c.setopt(pycurl.TIMEOUT, 5) c.setopt(pycurl.NOPROGRESS, 1) c.setopt(pycurl.FORBID_REUSE, 1) c.setopt(pycurl.MAXREDIRS, 1) c.setopt(pycurl.DNS_CACHE_TIMEOUT, 30) index_file = open(os.path.dirname(os.path.realpath(__file__)) + "/content.txt", "wb") c.setopt(pycurl.WRITEHEADER, index_file) c.setopt(pycurl.WRITEDATA, index_file) c.perform() print("\n测试网站: ", url) print("HTTP状态码: {}".format(c.getinfo(c.HTTP_CODE))) print("DNS解析时间:{} ms".format(round(c.getinfo(c.NAMELOOKUP_TIME) * 1000), 2)) print("建立连接时间:{} ms".format(round(c.getinfo(c.CONNECT_TIME) * 1000), 2)) print("准备传输时间:{} ms".format(round(c.getinfo(c.PRETRANSFER_TIME) * 1000), 2)) print("传输开始时间:{} ms".format(round(c.getinfo(c.STARTTRANSFER_TIME) * 1000), 2)) print("传输结束总时间:{} ms".format(round(c.getinfo(c.TOTAL_TIME) * 1000), 2)) c.close() @staticmethod
MIT License
hasgeek/coaster
tests/test_sqlalchemy_registry.py
PropertyRegistry
python
def PropertyRegistry(): class PropertyRegistry: registry = Registry(property=True) return PropertyRegistry
Registry with property and a positional parameter.
https://github.com/hasgeek/coaster/blob/3ffbc9d33c981284593445299aaee0c3cc0cdb0b/tests/test_sqlalchemy_registry.py#L25-L31
from types import SimpleNamespace import pytest from coaster.db import db from coaster.sqlalchemy import BaseMixin from coaster.sqlalchemy.registry import Registry @pytest.fixture() def CallableRegistry(): class CallableRegistry: registry = Registry() return CallableRegistry @pytest.fixture()
BSD 2-Clause Simplified License
testproject-io/python-sdk
src/testproject/sdk/internal/agent/agent_client.py
AgentClient.execute_proxy
python
def execute_proxy(self, action: ActionProxy) -> AddonExecutionResponse: operation_result = self.send_request( "POST", urljoin(self._remote_address, Endpoint.AddonExecution.value), self._create_action_proxy_payload(action), {"skipReporting": "true"}, ) if operation_result.status_code == HTTPStatus.NOT_FOUND: logging.error( f"Action [{action.proxydescriptor.classname}] in addon [{action.proxydescriptor.guid}]" f" is not installed in your account." ) raise AddonNotInstalledException return AddonExecutionResponse( execution_result_type=( ExecutionResultType.Passed if operation_result.data["resultType"] == "Passed" else ExecutionResultType.Failed ), message=operation_result.data["message"], fields=( [] if not operation_result.data["fields"] else [ResultField(**field) for field in operation_result.data["fields"]] ), )
Sends a custom action to the Agent Args: action (ActionProxy): The custom action to be executed Returns: AddonExecutionResponse: object containing the result of the action execution
https://github.com/testproject-io/python-sdk/blob/efae0de04936bacc5358e3cdc3b86fe6c14434fb/src/testproject/sdk/internal/agent/agent_client.py#L417-L452
import logging import uuid from distutils.util import strtobool from enum import Enum, unique from http import HTTPStatus from urllib.parse import urljoin, urlparse, ParseResult import requests import os from requests import HTTPError from src.testproject.classes import ActionExecutionResponse from src.testproject.classes.resultfield import ResultField from src.testproject.enums import ExecutionResultType from src.testproject.enums.report_type import ReportType from src.testproject.executionresults import OperationResult from src.testproject.helpers import ConfigHelper, SeleniumHelper from src.testproject.rest import ReportSettings from src.testproject.rest.messages import ( SessionRequest, SessionResponse, DriverCommandReport, StepReport, CustomTestReport, AddonExecutionResponse, ) from src.testproject.rest.messages.agentstatusresponse import AgentStatusResponse from src.testproject.sdk.addons import ActionProxy from src.testproject.sdk.exceptions import ( SdkException, AgentConnectException, InvalidTokenException, ObsoleteVersionException, ) from src.testproject.sdk.exceptions.addonnotinstalled import AddonNotInstalledException from src.testproject.sdk.internal.agent.agent_client_singleton import AgentClientSingleton from src.testproject.sdk.internal.agent.reports_queue import ReportsQueue from src.testproject.sdk.internal.agent.reports_queue_batch import ReportsQueueBatch from src.testproject.sdk.internal.session import AgentSession from src.testproject.tcp import SocketManager from packaging import version class AgentClient(metaclass=AgentClientSingleton): MIN_SESSION_REUSE_CAPABLE_VERSION = "0.64.20" MIN_LOCAL_REPORT_SUPPORTED_VERSION = "2.1.0" MIN_BATCH_REPORT_SUPPORTED_VERSION = "3.1.0" NEW_SESSION_SOCKET_TIMEOUT_MS = 120 * 1000 __agent_version: str = None def __init__( self, token: str, capabilities: dict, agent_url: str, report_settings: ReportSettings, socket_session_timeout: int, ): self.agent_url = agent_url self._is_local_execution = True self._agent_session = None self._agent_response = None self._remote_address = agent_url if agent_url is not None else ConfigHelper.get_agent_service_address() self.__check_local_execution() self._report_settings = report_settings self._capabilities = capabilities self._token = token self._session_socket_timeout = socket_session_timeout self.__start_session() self.__verify_local_reports_supported(report_settings.report_type) if version.parse(self.__agent_version) >= version.parse(self.MIN_BATCH_REPORT_SUPPORTED_VERSION): url = urljoin(self._remote_address, Endpoint.ReportBatch.value) self._reports_queue = ReportsQueueBatch(token=token, url=url) else: self._reports_queue = ReportsQueue(token) @property def agent_session(self): return self._agent_session @property def report_settings(self) -> ReportSettings: return self._report_settings def __verify_local_reports_supported(self, report_type: ReportType): if report_type is ReportType.LOCAL and version.parse(self.__agent_version) < version.parse( self.MIN_LOCAL_REPORT_SUPPORTED_VERSION ): raise AgentConnectException( f"Target Agent version [{self.__agent_version}] doesn't support local reports." f" Upgrade the Agent to the latest version and try again." ) def __start_session(self): sdk_version = ConfigHelper.get_sdk_version() logging.info(f"SDK version: {sdk_version}") self._request_session_from_agent() self.log_warnings() AgentClient.__agent_version = self._agent_response.agent_version if self._agent_response.local_report_url is not None and self._agent_response.local_report_url: parsed_report_url = urlparse(self._agent_response.local_report_url) report_url = ParseResult( scheme=parsed_report_url.scheme, netloc=f"{urlparse(self._remote_address).hostname}:{parsed_report_url.port}", path=parsed_report_url.path, params=parsed_report_url.params, query=parsed_report_url.query, fragment=parsed_report_url.fragment, ).geturl() logging.info("Report URL: " + report_url) self._agent_session = AgentSession( self._agent_response.server_address, self._agent_response.session_id, self._agent_response.dialect, self._agent_response.capabilities, ) SocketManager.instance().open_socket( urlparse(self._remote_address).hostname, self._agent_response.dev_socket_port, self._agent_response.uuid, ) logging.info("Development session started...") def log_warnings(self): if self._agent_response.warnings is not None: for warning in self._agent_response.warnings: logging.warning(warning) @staticmethod def can_reuse_session() -> bool: if AgentClient.__agent_version is None: return False return version.parse(AgentClient.__agent_version) >= version.parse( AgentClient.MIN_SESSION_REUSE_CAPABLE_VERSION ) def _request_session_from_agent(self): session_request = SessionRequest(self._capabilities, self._report_settings) logging.info(f"Session request: {session_request.to_json()}") try: response = self.send_request( "POST", urljoin(self._remote_address, Endpoint.DevelopmentSession.value), session_request.to_json(), timeout=self._session_socket_timeout, ) except requests.exceptions.ConnectionError: logging.error(f"Could not start new session on {self._remote_address}. Is your Agent running?") logging.error("You can download the TestProject Agent from https://app.testproject.io/#/agents") raise AgentConnectException(f"Connection error trying to connect to Agent on {self._remote_address}") if not response.passed: self.__handle_new_session_error(response) self._agent_response = SessionResponse( dev_socket_port=response.data["devSocketPort"], server_address=response.data.get("serverAddress"), session_id=response.data.get("sessionId", uuid.uuid4()), dialect=response.data.get("dialect"), capabilities=response.data.get("capabilities", {}), agent_version=response.data.get("version"), local_report=response.data.get("localReport"), local_report_url=response.data.get("localReportUrl"), uuid=response.data.get("uuid"), warnings=response.data.get("warnings"), ) def update_job_name(self, job_name): if strtobool(os.getenv("TP_UPDATE_JOB_NAME")): logging.info(f"Updating job name to: {job_name}") try: response = self.send_request( "PUT", urljoin(self._remote_address, Endpoint.DevelopmentSession.value), {"jobName": job_name}, ) if not response.passed: logging.error("Failed to update job name") except requests.exceptions.RequestException: logging.error("Failed to update job name") def send_request(self, method, path, body=None, params=None, timeout=None) -> OperationResult: with requests.Session() as session: if params: session.params = params if method == "GET": response = session.get(path, headers={"Authorization": self._token}) elif method == "POST": response = session.post(path, headers={"Authorization": self._token}, json=body, timeout=timeout) elif method == "DELETE": response = session.delete(path, headers={"Authorization": self._token}) elif method == "PUT": response = session.put(path, headers={"Authorization": self._token}, json=body) else: raise SdkException(f"Unsupported HTTP method {method} in send_request()") response_json = {} try: response_json = response.json() except ValueError: pass try: response.raise_for_status() return OperationResult(True, response.status_code, "", response_json) except HTTPError as http_error: return OperationResult( False, response.status_code, response_json.get("message", str(http_error)), response_json if response_json else None, ) def send_action_execution_request(self, codeblock_guid: str, body: dict) -> ActionExecutionResponse: response = self.send_request( "POST", urljoin( urljoin(self._remote_address, Endpoint.ActionExecution.value), codeblock_guid, ), body, ) if not response.passed: result = ExecutionResultType.Failed else: result = ( ExecutionResultType.Passed if response.data["resultType"] == "Passed" else ExecutionResultType.Failed ) result_data = response.data["outputs"] if response.passed else None return ActionExecutionResponse(result, response.message, result_data) @staticmethod def get_agent_version(token: str): with requests.Session() as session: response = session.get( urljoin(ConfigHelper.get_agent_service_address(), Endpoint.GetStatus.value), headers={"Authorization": token}, ) try: response.raise_for_status() try: response_json = response.json() agent_version = response_json["tag"] except ValueError: raise SdkException("Could not parse Agent status response: no JSON response body present") except KeyError: raise SdkException( "Could not parse Agent status response: element 'tag' not found in JSON response body" ) except HTTPError: raise AgentConnectException( f"Agent returned HTTP {response.status_code} when trying to retrieve Agent status" ) return AgentStatusResponse(agent_version) def report_driver_command(self, driver_command_report: DriverCommandReport): self._reports_queue.submit( report_as_json=driver_command_report.to_json(), url=urljoin(self._remote_address, Endpoint.ReportDriverCommand.value), block=False, ) def report_step(self, step_report: StepReport): self._reports_queue.submit( report_as_json=step_report.to_json(), url=urljoin(self._remote_address, Endpoint.ReportStep.value), block=False, ) def report_test(self, test_report: CustomTestReport): self._reports_queue.submit( report_as_json=test_report.to_json(), url=urljoin(self._remote_address, Endpoint.ReportTest.value), block=False, )
Apache License 2.0
nasa/bingo
bingo/symbolic_regression/agraph/component_generator.py
ComponentGenerator.random_terminal_parameter
python
def random_terminal_parameter(self, terminal_number): if terminal_number == 0: param = np.random.randint(self.input_x_dimension) else: param = -1 return param
Get random terminal parameter Parameters ---------- terminal_number : int terminal number for which random parameter should be generated Returns ------- int parameter to be used in a terminal command
https://github.com/nasa/bingo/blob/fad1547105d66fe91c58fb1c771af57cb26126c1/bingo/symbolic_regression/agraph/component_generator.py#L191-L209
import logging import numpy as np from .maps import OPERATOR_NAMES from ...util.probability_mass_function import ProbabilityMassFunction from ...util.argument_validation import argument_validation LOGGER = logging.getLogger(__name__) class ComponentGenerator: @argument_validation(input_x_dimension={">=": 0}, num_initial_load_statements={">=": 1}, terminal_probability={">=": 0.0, "<=": 1.0}, constant_probability={">=": 0.0, "<=": 1.0}) def __init__(self, input_x_dimension, num_initial_load_statements=1, terminal_probability=0.1, constant_probability=None, automatic_constant_optimization=True, numerical_constant_range=100, numerical_constant_std=None): self.input_x_dimension = input_x_dimension self._num_initial_load_statements = num_initial_load_statements self._terminal_pmf = self._make_terminal_pdf(constant_probability) self._operator_pmf = ProbabilityMassFunction() self._random_command_function_pmf = self._make_random_command_pmf(terminal_probability) self.automatic_constant_optimization = automatic_constant_optimization self._numerical_constant_range = numerical_constant_range if numerical_constant_std is None: numerical_constant_std = numerical_constant_range / 100 self._numerical_constant_std = numerical_constant_std def _make_terminal_pdf(self, constant_probability): if constant_probability is None: terminal_weight = [1, self.input_x_dimension] else: terminal_weight = [constant_probability, 1.0 - constant_probability] return ProbabilityMassFunction(items=[1, 0], weights=terminal_weight) def _make_random_command_pmf(self, terminal_probability): command_weights = [terminal_probability, 1.0 - terminal_probability] return ProbabilityMassFunction(items=[self._random_terminal_command, self._random_operator_command], weights=command_weights) def add_operator(self, operator_to_add, operator_weight=None): if isinstance(operator_to_add, str): operator_number = self._get_operator_number_from_string( operator_to_add) else: operator_number = operator_to_add self._operator_pmf.add_item(operator_number, operator_weight) @staticmethod def _get_operator_number_from_string(operator_string): for operator_number, operator_names in OPERATOR_NAMES.items(): if operator_string in operator_names: return operator_number raise ValueError("Could not find operator %s. " % operator_string) def random_command(self, stack_location): if stack_location < self._num_initial_load_statements: return self._random_terminal_command(stack_location) return self._random_command_function_pmf.draw_sample()(stack_location) def _random_operator_command(self, stack_location): return np.array([self.random_operator(), self.random_operator_parameter(stack_location), self.random_operator_parameter(stack_location)], dtype=int) def random_operator(self): return self._operator_pmf.draw_sample() @staticmethod def random_operator_parameter(stack_location): return np.random.randint(stack_location) def _random_terminal_command(self, _=None): terminal = self.random_terminal() return np.array([terminal, self.random_terminal_parameter(terminal), self.random_terminal_parameter(terminal)], dtype=int) def random_terminal(self): return self._terminal_pmf.draw_sample()
Apache License 2.0
mindspore-ai/mindinsight
mindinsight/datavisual/data_transform/graph/msgraph.py
MSGraph._get_data_type_by_parse_type_proto
python
def _get_data_type_by_parse_type_proto(self, type_proto, node): data_type_name = self._get_data_type_name_by_value(type_proto, type_proto.data_type, field_name='data_type') if type_proto.data_type == DataType.DT_TENSOR: tensor_type_proto = type_proto.tensor_type value = type_proto.tensor_type.elem_type elem_type_name = self._get_data_type_name_by_value(tensor_type_proto, value, field_name='elem_type') node.elem_types.append(elem_type_name) return f'{data_type_name}[{elem_type_name}]' if type_proto.data_type == DataType.DT_TUPLE: data_types = [] for elem_type in type_proto.sequence_type.elem_types: data_types.append(self._get_data_type_by_parse_type_proto(elem_type, node)) return f'{data_type_name}{str(data_types)}' node.elem_types.append(data_type_name) return data_type_name
Get data type by parse type proto object. The name of the DataType, refer to `anf_ir_pb2.DataType` object. If data type is tensor or tuple, the data name we return is `data_type[element_type, element_type]`. Args: type_proto (anf_ir_pb2.TypeProto): Refer to anf_ir_pb2.TypeProto. Returns: str, the data type.
https://github.com/mindspore-ai/mindinsight/blob/253a210719dbb1e55b826f2e489322f402d66676/mindinsight/datavisual/data_transform/graph/msgraph.py#L181-L211
from mindinsight.datavisual.common.enums import PluginNameEnum from mindinsight.datavisual.common.log import logger from mindinsight.datavisual.data_transform.graph.graph import EdgeTypeEnum, Graph, check_invalid_character from mindinsight.datavisual.data_transform.graph.node import Node, NodeTypeEnum from mindinsight.datavisual.data_transform.graph.node_tree import NodeTree from mindinsight.datavisual.proto_files.mindinsight_anf_ir_pb2 import DataType from mindinsight.debugger.stream_cache.source import DebuggerSource class MSGraph(Graph): def _parse_data(self, proto_data): logger.info("Start to parse graph proto data.") self._parse_op_nodes(proto_data.node) self._parse_parameters(proto_data.parameters) self._parse_consts(proto_data.const_vals) self._update_input_after_create_node() self._update_output_after_create_node() logger.info("Parse proto data end, normal node count(only contain op node, " "parameter, const): %s.", self.normal_node_count) def _parse_op_nodes(self, node_protos): logger.debug("Start to parse op nodes from proto.") for topological_index, node_proto in enumerate(node_protos): if not node_proto.name: logger.warning("Finding a node with an empty name will not save it.") continue if node_proto.op_type == "Load": node_name = Node.create_node_name(scope=node_proto.scope, base_name=f'{node_proto.op_type}-op{node_proto.name}') node_proto.full_name = node_name elif not node_proto.full_name or any( node_proto.full_name.lower().endswith(f'[:{plugin.value.lower()}]') for plugin in PluginNameEnum): node_name = Node.create_node_name(scope=node_proto.scope, base_name=f'{node_proto.op_type}{node_proto.name}') else: node_name = node_proto.full_name check_invalid_character(node_name) node = Node(name=node_name, node_id=node_proto.name, topological_index=topological_index) node.full_name = node_proto.full_name node.type = node_proto.op_type if getattr(node_proto, 'source_address', None): node.stack = DebuggerSource.build_stack_from_source_address(node_proto.source_address) self._parse_attributes(node_proto.attribute, node) self._parse_inputs(node_proto.input, node) node.output_i = node_proto.output_i node.scope = node_proto.scope node.output_shape = self._get_shape_by_parse_type_proto(node_proto.output_type) node.output_nums = len(node.output_shape) node.output_data_type = self._get_data_type_by_parse_type_proto(node_proto.output_type, node) self._cache_node(node) def _parse_parameters(self, parameter_protos): logger.debug("Start to parse parameters from proto.") for parameter in parameter_protos: if not parameter.name: logger.warning("Finding a parameter with an empty name will not save it.") continue check_invalid_character(parameter.name) node = Node(name=parameter.name, node_id=parameter.name) node.type = NodeTypeEnum.PARAMETER.value node.output_shape = self._get_shape_by_parse_type_proto(parameter.type) node.output_nums = len(node.output_shape) node.output_data_type = self._get_data_type_by_parse_type_proto(parameter.type, node) attr = dict( type=self._get_data_type_by_parse_type_proto(parameter.type, node), shape=str(self._get_shape_by_parse_type_proto(parameter.type)) ) node.add_attr(attr) self._cache_node(node) logger.debug("Foreach graph proto parameters, node id: %s, node name: %s, " "node def name: %s", node.node_id, node.name, parameter.name) def _parse_consts(self, consts): logger.debug("Start to parse consts from proto.") for const in consts: if not const.key: logger.warning("Finding a const with an empty key will not save it.") continue check_invalid_character(const.key) node = Node(name=const.key, node_id=const.key) node.type = NodeTypeEnum.CONST.value if const.value.ByteSize() > self.MAX_NODE_ATTRIBUTE_VALUE_BYTES: node.add_attr({const.key: 'dtype: ' + DataType.Name(const.value.dtype)}) else: node.add_attr({const.key: str(const.value)}) if const.value.dtype == DataType.DT_TENSOR: shape = list(const.value.tensor_val.dims) node.output_shape.append(shape) if const.value.tensor_val.HasField('data_type'): node.elem_types.append(DataType.Name(const.value.tensor_val.data_type)) else: node.elem_types.append(DataType.Name(const.value.dtype)) node.output_shape.append([]) node.output_nums = len(node.output_shape) self._cache_node(node) def _get_shape_by_parse_type_proto(self, type_proto): shapes = [] if type_proto.HasField('data_type'): if type_proto.data_type != DataType.DT_TENSOR and type_proto.data_type != DataType.DT_TUPLE: shapes.append([]) return shapes if type_proto.HasField('tensor_type'): tensor_type = type_proto.tensor_type tensor_shape_proto = tensor_type.shape shape = [dim.size for dim in tensor_shape_proto.dim] shapes.append(shape) if type_proto.HasField('sequence_type'): for elem_type in type_proto.sequence_type.elem_types: shapes.extend(self._get_shape_by_parse_type_proto(elem_type)) return shapes
Apache License 2.0
paddlepaddle/paddle
python/paddle/distributed/fleet/base/distributed_strategy.py
DistributedStrategy.hierarchical_allreduce_inter_nranks
python
def hierarchical_allreduce_inter_nranks(self): return self.strategy.hierarchical_allreduce_inter_nranks
Number of ranks for low level node groups in hierarchical allreduce Default value: number of GPU cards on each single GPU machine Example: .. code-block:: python import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.hierarchical_allreduce_inter_nranks = 8
https://github.com/paddlepaddle/paddle/blob/056b87414880e0520bb4560fc40d5b62db9c5175/python/paddle/distributed/fleet/base/distributed_strategy.py#L584-L597
import paddle from paddle.distributed.fleet.proto import distributed_strategy_pb2 from paddle.fluid.framework import Variable, set_flags, core, _global_flags from paddle.fluid.wrapped_decorator import wrap_decorator import google.protobuf.text_format import google.protobuf __all__ = [] non_auto_func_called = True def __non_auto_func_called__(func): def __impl__(*args, **kwargs): global non_auto_func_called non_auto_func_called = False return func(*args, **kwargs) return __impl__ is_strict_auto = wrap_decorator(__non_auto_func_called__) def get_msg_dict(msg): res_dict = {} fields = msg.DESCRIPTOR.fields for f in fields: res_dict[f.name] = getattr(msg, f.name) return res_dict def assign_configs_value(msg, config): fields = msg.DESCRIPTOR.fields for key in config: for f in fields: if key == f.name: if f.label == 3: getattr(msg, f.name).extend(config[f.name]) elif f.label == 1 or f.label == 2: setattr(msg, f.name, config[f.name]) def check_configs_key(msg, config, field_name): key_list = msg.DESCRIPTOR.fields_by_name.keys() for key in config: assert key in key_list, "key:{} not in {}".format(key, field_name) class DistributedJobInfo(object): def __init__(self): self.job_info = distributed_strategy_pb2.DistributedJobInfo() def _set_worker_num(self, worker_num): self.job_info.worker_num = worker_num def _set_server_num(self, server_num): self.job_info.server_num = server_num def _set_worker_ips(self, worker_ips): self.job_info.worker_ips.extend(worker_ips) def _set_server_endpoints(self, server_endpoints): self.job_info.server_endpoints.extend(server_endpoints) def _set_origin_startup(self, origin_startup_prog): self.job_info.origin_startup = str(origin_startup_prog) def _set_origin_main(self, origin_main_prog): self.job_info.origin_main = str(origin_main_prog) def _distributed_main(self, distributed_main_prog): self.job_info.distributed_main = str(distributed_main_prog) def _optimizer_name(self, optimizer_name): self.job_info.optimizer_name = optimizer_name def _set_distributed_strategy(self, dist_strategy): self.job_info.strategy = dist_strategy class DistributedStrategy(object): __lock_attr = False def __init__(self): self.strategy = distributed_strategy_pb2.DistributedStrategy() key = 'FLAGS_cudnn_batchnorm_spatial_persistent' if _global_flags().is_public(key): self.strategy.cudnn_batchnorm_spatial_persistent = bool( _global_flags()[key]) key = 'FLAGS_conv_workspace_size_limit' if _global_flags().is_public(key): self.strategy.conv_workspace_size_limit = int(_global_flags()[key]) key = 'FLAGS_cudnn_exhaustive_search' if _global_flags().is_public(key): self.strategy.cudnn_exhaustive_search = bool(_global_flags()[key]) key = 'FLAGS_sync_nccl_allreduce' if _global_flags().is_public(key): self.strategy.sync_nccl_allreduce = bool(_global_flags()[key]) self.__lock_attr = True def __setattr__(self, key, value): if self.__lock_attr and not hasattr(self, key): raise TypeError("%s is not a attribute of %s" % (key, self.__class__.__name__)) object.__setattr__(self, key, value) def save_to_prototxt(self, output): with open(output, "w") as fout: fout.write(str(self.strategy)) def load_from_prototxt(self, pb_file): with open(pb_file, 'r') as f: self.strategy = google.protobuf.text_format.Merge( str(f.read()), self.strategy) @property def execution_strategy(self): execution_strategy = paddle.fluid.ExecutionStrategy() fields = self.strategy.execution_strategy.DESCRIPTOR.fields for f in fields: setattr(execution_strategy, f.name, getattr(self.strategy.execution_strategy, f.name)) return execution_strategy @execution_strategy.setter @is_strict_auto def execution_strategy(self, strategy): fields = self.strategy.execution_strategy.DESCRIPTOR.fields for f in fields: setattr(self.strategy.execution_strategy, f.name, getattr(strategy, f.name)) @property def build_strategy(self): build_strategy = paddle.fluid.BuildStrategy() fields = self.strategy.build_strategy.DESCRIPTOR.fields for f in fields: setattr(build_strategy, f.name, getattr(self.strategy.build_strategy, f.name)) return build_strategy @build_strategy.setter @is_strict_auto def build_strategy(self, strategy): fields = self.strategy.build_strategy.DESCRIPTOR.fields for f in fields: if f.label == 1 or f.label == 2: setattr(self.strategy.build_strategy, f.name, getattr(strategy, f.name)) elif f.label == 3: getattr(self.strategy.build_strategy, f.name).extend(getattr(strategy, f.name)) @property def gradient_scale_configs(self): return get_msg_dict(self.strategy.gradient_scale_configs) @gradient_scale_configs.setter @is_strict_auto def gradient_scale_configs(self, config): check_configs_key(self.strategy.gradient_scale_configs, config, 'gradient_scale_configs') assign_configs_value(self.strategy.gradient_scale_configs, config) @property def a_sync(self): return self.strategy.a_sync @a_sync.setter @is_strict_auto def a_sync(self, flag): if isinstance(flag, bool): self.strategy.a_sync = flag self.a_sync_configs = {"k_steps": 0} else: raise ValueError( "The type of `flag` is invalid, expected type is bool, but received {}". format(type(flag))) @property def a_sync_configs(self): return get_msg_dict(self.strategy.a_sync_configs) @a_sync_configs.setter @is_strict_auto def a_sync_configs(self, configs): check_configs_key(self.strategy.a_sync_configs, configs, "a_sync_configs") assign_configs_value(self.strategy.a_sync_configs, configs) @property def trainer_desc_configs(self): return get_msg_dict(self.strategy.trainer_desc_configs) @trainer_desc_configs.setter @is_strict_auto def trainer_desc_configs(self, configs): check_configs_key(self.strategy.trainer_desc_configs, configs, "trainer_desc_configs") assign_configs_value(self.strategy.trainer_desc_configs, configs) @property def amp(self): return self.strategy.amp @amp.setter @is_strict_auto def amp(self, flag): if isinstance(flag, bool): self.strategy.amp = flag else: print("WARNING: amp should have value of bool type") @property def amp_configs(self): return get_msg_dict(self.strategy.amp_configs) @amp_configs.setter @is_strict_auto def amp_configs(self, configs): check_configs_key(self.strategy.amp_configs, configs, "amp_configs") assign_configs_value(self.strategy.amp_configs, configs) @property def asp(self): return self.strategy.asp @asp.setter @is_strict_auto def asp(self, flag): if isinstance(flag, bool): self.strategy.asp = flag else: print("WARNING: asp should have value of bool type") @property def recompute(self): return self.strategy.recompute @property def sync_nccl_allreduce(self): return self.strategy.sync_nccl_allreduce @sync_nccl_allreduce.setter @is_strict_auto def sync_nccl_allreduce(self, flag): if isinstance(flag, bool): self.strategy.sync_nccl_allreduce = flag else: print("WARNING: sync_nccl_allreduce should have value of bool type") @property def use_hierarchical_allreduce(self): return self.strategy.use_hierarchical_allreduce @use_hierarchical_allreduce.setter @is_strict_auto def use_hierarchical_allreduce(self, flag): if isinstance(flag, bool): self.strategy.use_hierarchical_allreduce = flag else: print( "WARNING: use_hierarchical_allreduce should have value of bool type" ) @property
Apache License 2.0
mozilla/make.mozilla.org
vendor-local/lib/python/celery/app/base.py
BaseApp.backend
python
def backend(self): return self._get_backend()
Storing/retrieving task state. See :class:`~celery.backend.base.BaseBackend`.
https://github.com/mozilla/make.mozilla.org/blob/98b87c517b463a5bae09f29284b1dabca97bb376/vendor-local/lib/python/celery/app/base.py#L357-L360
from __future__ import absolute_import from __future__ import with_statement import os import warnings import platform as _platform from contextlib import contextmanager from copy import deepcopy from functools import wraps from kombu.clocks import LamportClock from .. import datastructures from .. import platforms from ..exceptions import AlwaysEagerIgnored from ..utils import cached_property, instantiate, lpmerge from .defaults import DEFAULTS, find_deprecated_settings, find import kombu if kombu.VERSION < (2, 0): raise ImportError("Celery requires Kombu version 1.1.0 or higher.") BUGREPORT_INFO = """ platform -> system:%(system)s arch:%(arch)s imp:%(py_i)s software -> celery:%(celery_v)s kombu:%(kombu_v)s py:%(py_v)s settings -> transport:%(transport)s results:%(results)s """ class Settings(datastructures.ConfigurationView): @property def CELERY_RESULT_BACKEND(self): return self.get("CELERY_RESULT_BACKEND") or self.get("CELERY_BACKEND") @property def BROKER_TRANSPORT(self): return (self.get("BROKER_TRANSPORT") or self.get("BROKER_BACKEND") or self.get("CARROT_BACKEND")) @property def BROKER_BACKEND(self): return self.BROKER_TRANSPORT @property def BROKER_HOST(self): return (os.environ.get("CELERY_BROKER_URL") or self.get("BROKER_URL") or self.get("BROKER_HOST")) def find_option(self, name, namespace="celery"): return find(name, namespace) def get_by_parts(self, *parts): return self["_".join(filter(None, parts))] def find_value_for_key(self, name, namespace="celery"): ns, key, _ = self.find_option(name, namespace=namespace) return self.get_by_parts(ns, key) class BaseApp(object): SYSTEM = platforms.SYSTEM IS_OSX = platforms.IS_OSX IS_WINDOWS = platforms.IS_WINDOWS amqp_cls = "celery.app.amqp:AMQP" backend_cls = None events_cls = "celery.events:Events" loader_cls = "celery.loaders.app:AppLoader" log_cls = "celery.log:Logging" control_cls = "celery.task.control:Control" _pool = None def __init__(self, main=None, loader=None, backend=None, amqp=None, events=None, log=None, control=None, set_as_current=True, accept_magic_kwargs=False, **kwargs): self.main = main self.amqp_cls = amqp or self.amqp_cls self.backend_cls = backend or self.backend_cls self.events_cls = events or self.events_cls self.loader_cls = loader or self.loader_cls self.log_cls = log or self.log_cls self.control_cls = control or self.control_cls self.set_as_current = set_as_current self.accept_magic_kwargs = accept_magic_kwargs self.clock = LamportClock() self.on_init() def on_init(self): pass def config_from_object(self, obj, silent=False): del(self.conf) return self.loader.config_from_object(obj, silent=silent) def config_from_envvar(self, variable_name, silent=False): del(self.conf) return self.loader.config_from_envvar(variable_name, silent=silent) def config_from_cmdline(self, argv, namespace="celery"): self.conf.update(self.loader.cmdline_config_parser(argv, namespace)) def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, publisher=None, connection=None, connect_timeout=None, result_cls=None, expires=None, queues=None, **options): if self.conf.CELERY_ALWAYS_EAGER: warnings.warn(AlwaysEagerIgnored( "CELERY_ALWAYS_EAGER has no effect on send_task")) router = self.amqp.Router(queues) result_cls = result_cls or self.AsyncResult options.setdefault("compression", self.conf.CELERY_MESSAGE_COMPRESSION) options = router.route(options, name, args, kwargs) exchange = options.get("exchange") exchange_type = options.get("exchange_type") with self.default_connection(connection, connect_timeout) as conn: publish = publisher or self.amqp.TaskPublisher(conn, exchange=exchange, exchange_type=exchange_type) try: new_id = publish.delay_task(name, args, kwargs, task_id=task_id, countdown=countdown, eta=eta, expires=expires, **options) finally: publisher or publish.close() return result_cls(new_id) def AsyncResult(self, task_id, backend=None, task_name=None): from ..result import BaseAsyncResult return BaseAsyncResult(task_id, app=self, task_name=task_name, backend=backend or self.backend) def TaskSetResult(self, taskset_id, results, **kwargs): from ..result import TaskSetResult return TaskSetResult(taskset_id, results, app=self) def broker_connection(self, hostname=None, userid=None, password=None, virtual_host=None, port=None, ssl=None, insist=None, connect_timeout=None, transport=None, transport_options=None, **kwargs): conf = self.conf return self.amqp.BrokerConnection( hostname or conf.BROKER_HOST, userid or conf.BROKER_USER, password or conf.BROKER_PASSWORD, virtual_host or conf.BROKER_VHOST, port or conf.BROKER_PORT, transport=transport or conf.BROKER_TRANSPORT, insist=self.either("BROKER_INSIST", insist), ssl=self.either("BROKER_USE_SSL", ssl), connect_timeout=self.either( "BROKER_CONNECTION_TIMEOUT", connect_timeout), transport_options=dict(conf.BROKER_TRANSPORT_OPTIONS, **transport_options or {})) @contextmanager def default_connection(self, connection=None, connect_timeout=None): if connection: yield connection else: with self.pool.acquire(block=True) as connection: yield connection def with_default_connection(self, fun): @wraps(fun) def _inner(*args, **kwargs): connection = kwargs.pop("connection", None) with self.default_connection(connection) as c: return fun(*args, **dict(kwargs, connection=c)) return _inner def prepare_config(self, c): find_deprecated_settings(c) return c def now(self): return self.loader.now(utc=self.conf.CELERY_ENABLE_UTC) def mail_admins(self, subject, body, fail_silently=False): if self.conf.ADMINS: to = [admin_email for _, admin_email in self.conf.ADMINS] return self.loader.mail_admins(subject, body, fail_silently, to=to, sender=self.conf.SERVER_EMAIL, host=self.conf.EMAIL_HOST, port=self.conf.EMAIL_PORT, user=self.conf.EMAIL_HOST_USER, password=self.conf.EMAIL_HOST_PASSWORD, timeout=self.conf.EMAIL_TIMEOUT, use_ssl=self.conf.EMAIL_USE_SSL, use_tls=self.conf.EMAIL_USE_TLS) def select_queues(self, queues=None): if queues: return self.amqp.queues.select_subset(queues, self.conf.CELERY_CREATE_MISSING_QUEUES) def either(self, default_key, *values): for value in values: if value is not None: return value return self.conf.get(default_key) def merge(self, l, r): return lpmerge(l, r) def _get_backend(self): from ..backends import get_backend_cls return get_backend_cls( self.backend_cls or self.conf.CELERY_RESULT_BACKEND, loader=self.loader)(app=self) def _get_config(self): return Settings({}, [self.prepare_config(self.loader.conf), deepcopy(DEFAULTS)]) def _after_fork(self, obj_): if self._pool: self._pool.force_close_all() self._pool = None def bugreport(self): import celery import kombu return BUGREPORT_INFO % {"system": _platform.system(), "arch": _platform.architecture(), "py_i": platforms.pyimplementation(), "celery_v": celery.__version__, "kombu_v": kombu.__version__, "py_v": _platform.python_version(), "transport": self.conf.BROKER_TRANSPORT, "results": self.conf.CELERY_RESULT_BACKEND} @property def pool(self): if self._pool is None: try: from multiprocessing.util import register_after_fork register_after_fork(self, self._after_fork) except ImportError: pass self._pool = self.broker_connection().Pool( limit=self.conf.BROKER_POOL_LIMIT) return self._pool @cached_property def amqp(self): return instantiate(self.amqp_cls, app=self) @cached_property
BSD 3-Clause New or Revised License
bids-standard/pybids
bids/layout/models.py
Entity.match_file
python
def match_file(self, f): if self.regex is None: return None m = self.regex.search(f.path) val = m.group(1) if m is not None else None return self._astype(val)
Determine whether the passed file matches the Entity. Parameters ---------- f : BIDSFile The BIDSFile instance to match against. Returns ------- the matched value if a match was found, otherwise None.
https://github.com/bids-standard/pybids/blob/bafd148900d12f115d0e1133a731a706f370d8ad/bids/layout/models.py#L561-L579
import re import os from pathlib import Path import warnings import json from copy import deepcopy from itertools import chain from functools import lru_cache from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy import (Column, String, Boolean, ForeignKey, Table) from sqlalchemy.orm import reconstructor, relationship, backref, object_session from ..utils import listify from .writing import build_path, write_to_file from ..config import get_option from .utils import BIDSMetadata Base = declarative_base() class LayoutInfo(Base): __tablename__ = 'layout_info' root = Column(String, primary_key=True) absolute_paths = Column(Boolean) _derivatives = Column(String) _config = Column(String) def __init__(self, **kwargs): init_args = self._sanitize_init_args(kwargs) raw_cols = ['root', 'absolute_paths'] json_cols = ['derivatives', 'config'] all_cols = raw_cols + json_cols missing_cols = set(all_cols) - set(init_args.keys()) if missing_cols: raise ValueError("Missing mandatory initialization args: {}" .format(missing_cols)) for col in all_cols: setattr(self, col, init_args[col]) if col in json_cols: json_data = json.dumps(init_args[col]) setattr(self, '_' + col, json_data) @reconstructor def _init_on_load(self): for col in ['derivatives', 'config']: db_val = getattr(self, '_' + col) setattr(self, col, json.loads(db_val)) def _sanitize_init_args(self, kwargs): if 'root' in kwargs: kwargs['root'] = str(Path(kwargs['root']).absolute()) if kwargs.get('derivatives') not in (None, True, False): kwargs['derivatives'] = [ str(Path(der).absolute()) for der in listify(kwargs['derivatives']) ] return kwargs class Config(Base): __tablename__ = 'configs' name = Column(String, primary_key=True) _default_path_patterns = Column(String) entities = relationship( "Entity", secondary="config_to_entity_map", collection_class=attribute_mapped_collection('name')) def __init__(self, name, entities=None, default_path_patterns=None, session=None): self.name = name self.default_path_patterns = default_path_patterns self._default_path_patterns = json.dumps(default_path_patterns) if entities: for ent in entities: if session is not None: existing = (session.query(Config) .filter_by(name=ent['name']).first()) else: existing = None ent = existing or Entity(**ent) self.entities[ent.name] = ent if session is not None: session.add_all(list(self.entities.values())) session.commit() @reconstructor def _init_on_load(self): self.default_path_patterns = json.loads(self._default_path_patterns) @classmethod def load(self, config, session=None): if isinstance(config, str): config_paths = get_option('config_paths') if config in config_paths: config = config_paths[config] if not Path(config).exists(): raise ValueError("{} is not a valid path.".format(config)) else: with open(config, 'r') as f: config = json.load(f) if session is not None: result = session.query(Config).filter_by(name=config['name']).first() if result: return result return Config(session=session, **config) class BIDSFile(Base): __tablename__ = 'files' path = Column(String, primary_key=True) filename = Column(String) dirname = Column(String) entities = association_proxy("tags", "value") is_dir = Column(Boolean, index=True) class_ = Column(String(20)) _associations = relationship('BIDSFile', secondary='associations', primaryjoin='FileAssociation.dst == BIDSFile.path', secondaryjoin='FileAssociation.src == BIDSFile.path') __mapper_args__ = { 'polymorphic_on': class_, 'polymorphic_identity': 'file' } def __init__(self, filename): self.path = str(filename) self.filename = self._path.name self.dirname = str(self._path.parent) self.is_dir = not self.filename @property def _path(self): return Path(self.path) @property def _dirname(self): return Path(self.dirname) def __getattr__(self, attr): if not attr.startswith('_') and attr in self.entities: warnings.warn("Accessing entities as attributes is deprecated as " "of 0.7. Please use the .entities dictionary instead" " (i.e., .entities['%s'] instead of .%s." % (attr, attr)) return self.entities[attr] raise AttributeError("%s object has no attribute named %r" % (self.__class__.__name__, attr)) def __repr__(self): return "<{} filename='{}'>".format(self.__class__.__name__, self.path) def __fspath__(self): return self.path @property @lru_cache() def relpath(self): root = object_session(self).query(LayoutInfo).first().root return str(Path(self.path).relative_to(root)) def get_associations(self, kind=None, include_parents=False): if kind is None and not include_parents: return self._associations session = object_session(self) q = (session.query(BIDSFile) .join(FileAssociation, BIDSFile.path == FileAssociation.dst) .filter_by(src=self.path)) if kind is not None: q = q.filter_by(kind=kind) associations = q.all() if not include_parents: return associations def collect_associations(results, bidsfile): results.append(bidsfile) for p in bidsfile.get_associations('Child'): results = collect_associations(results, p) return results return list(chain(*[collect_associations([], bf) for bf in associations])) def get_metadata(self): md = BIDSMetadata(self.path) md.update(self.get_entities(metadata=True)) return md def get_entities(self, metadata=False, values='tags'): session = object_session(self) query = (session.query(Tag) .filter_by(file_path=self.path) .join(Entity)) if metadata not in (None, 'all'): query = query.filter(Tag.is_metadata == metadata) results = query.all() if values.startswith('obj'): return {t.entity_name: t.entity for t in results} return {t.entity_name: t.value for t in results} def copy(self, path_patterns, symbolic_link=False, root=None, conflicts='fail'): new_filename = build_path(self.entities, path_patterns) if not new_filename: return None if new_filename[-1] == os.sep: new_filename += self.filename if self._path.is_absolute() or root is None: path = self._path else: path = Path(root) / self._path if not path.exists(): raise ValueError("Target filename to copy/symlink (%s) doesn't " "exist." % path) kwargs = dict(path=new_filename, root=root, conflicts=conflicts) if symbolic_link: kwargs['link_to'] = path else: kwargs['copy_from'] = path write_to_file(**kwargs) class BIDSDataFile(BIDSFile): __mapper_args__ = { 'polymorphic_identity': 'data_file' } def get_df(self, include_timing=True, adjust_onset=False, enforce_dtypes=True, **pd_args): import pandas as pd import numpy as np if enforce_dtypes: dtype = { 'subject_id': str, 'session_id': str, 'participant_id': str } else: dtype = None suffix = self.entities['suffix'] header = None if suffix in {'physio', 'stim'} else 'infer' self.data = pd.read_csv(self.path, sep='\t', na_values='n/a', dtype=dtype, header=header, **pd_args) data = self.data.copy() if self.entities['extension'] == '.tsv.gz': md = self.get_metadata() data.columns = md['Columns'] if include_timing: onsets = np.arange(len(data)) / md['SamplingFrequency'] if adjust_onset: onsets += md['StartTime'] data.insert(0, 'onset', onsets) return data class BIDSImageFile(BIDSFile): __mapper_args__ = { 'polymorphic_identity': 'image_file' } def get_image(self, **kwargs): try: import nibabel as nb return nb.load(self.path, **kwargs) except Exception as e: raise ValueError("'{}' does not appear to be an image format " "NiBabel can read.".format(self.path)) from e class BIDSJSONFile(BIDSFile): __mapper_args__ = { 'polymorphic_identity': 'json_file' } def get_dict(self): d = json.loads(self.get_json()) if not isinstance(d, dict): raise ValueError("File %s is a json containing %s, not a dict which was expected" % (self.path, type(d))) return d def get_json(self): with open(self.path, 'r') as f: return f.read() class Entity(Base): __tablename__ = 'entities' name = Column(String, primary_key=True) mandatory = Column(Boolean, default=False) pattern = Column(String) directory = Column(String, nullable=True) _dtype = Column(String, default='str') files = association_proxy("tags", "value") def __init__(self, name, pattern=None, mandatory=False, directory=None, dtype='str'): self.name = name self.pattern = pattern self.mandatory = mandatory self.directory = directory if not isinstance(dtype, str): dtype = dtype.__name__ self._dtype = dtype self._init_on_load() @reconstructor def _init_on_load(self): if self._dtype not in ('str', 'float', 'int', 'bool'): raise ValueError("Invalid dtype '{}'. Must be one of 'int', " "'float', 'bool', or 'str'.".format(self._dtype)) self.dtype = eval(self._dtype) self.regex = re.compile(self.pattern) if self.pattern is not None else None def __iter__(self): for i in self.unique(): yield i def __deepcopy__(self, memo): cls = self.__class__ result = cls.__new__(cls) result._sa_instance_state = self._sa_instance_state memo[id(self)] = result for k, v in self.__dict__.items(): if k == '_sa_instance_state': continue new_val = getattr(self, k) if k == 'regex' else deepcopy(v, memo) setattr(result, k, new_val) return result
MIT License
fdalvi/neurox
neurox/analysis/corpus.py
get_top_words
python
def get_top_words(tokens, activations, neuron, num_tokens=0): MIN_THRESHOLD = 0.1 activation_values = [ sentence_activations[:, neuron] for sentence_activations in activations ] activation_values = np.concatenate(activation_values) tokens = [token for sentence in tokens["source"] for token in sentence] mean = np.mean(activation_values) std = np.std(activation_values) token_wise_scores = np.abs((activation_values - mean) / std) type_wise_scores_aggregation = defaultdict(lambda: (0, 0)) for idx, token in enumerate(tokens): curr_sum, curr_count = type_wise_scores_aggregation[token] type_wise_scores_aggregation[token] = ( curr_sum + token_wise_scores[idx], curr_count + 1, ) type_wise_scores = [ (k, v[0] / v[1]) for k, v in type_wise_scores_aggregation.items() ] max_score = max([s for _, s in type_wise_scores]) type_wise_scores = [(k, v / max_score) for k, v in type_wise_scores] sorted_types_scores = sorted(type_wise_scores, key=lambda x: -x[1]) sorted_types_scores = [ (k, v) for (k, v) in sorted_types_scores if v > MIN_THRESHOLD ] if num_tokens > 0: sorted_types_scores = sorted_types_scores[:num_tokens] return sorted_types_scores
Get top activating words for any given neuron. This method compares the activations of the given neuron across all tokens, and extracts tokens that account for the largest variance for that given neuron. It also returns a normalized score for each token, depicting their contribution to the overall variance. Parameters ---------- tokens : dict Dictionary containing atleast one list with the key ``source``. Usually returned from ``data.loader.load_data`` activations : list of numpy.ndarray List of *sentence representations*, where each *sentence representation* is a numpy matrix of shape ``[num tokens in sentence x concatenated representation size]``. Usually retured from ``data.loader.load_activations`` neuron : int Index of the neuron relative to ``X`` num_tokens: int, optional Number of top tokens to return. Defaults to 0, which returns all tokens with a non-neglible contribution to the variance Returns ------- top_neurons : list of tuples List of tuples, where each tuple is a (token, score) element
https://github.com/fdalvi/neurox/blob/fe7d981508f2ab9bef9df3f923c3f813537bdd98/neurox/analysis/corpus.py#L12-L81
from collections import defaultdict import numpy as np
BSD 3-Clause New or Revised License
ueg1990/imgur-cli
imgur_cli/cli_api.py
cmd_album_delete
python
def cmd_album_delete(client, args): delete_album = client.album_delete(args.album_id) generate_output({'delete_album': delete_album})
Delete an album with a given ID. You are required to be logged in as the user to delete the album. For anonymous albums, {album} should be the deletehash that is returned at creation
https://github.com/ueg1990/imgur-cli/blob/359508a8806d9e583849f5de9115a5bbcd5e04b4/imgur_cli/cli_api.py#L312-L319
import os import imgurpython from imgur_cli import exceptions from imgur_cli.utils import (cli_arg, cli_subparser, data_fields, generate_output, format_comment_tree) from imgur_cli.utils import cli_subparser from imgur_cli.utils import data_fields from imgur_cli.utils import generate_output SUBPARSERS = {'gallery': 'Gallery subparser', 'album': 'Album subparser', 'image': 'Image subparser', 'comment': 'Comment subparser', 'memegen': 'Memegen subparser', 'account': 'Account subparser', 'conversation': 'Conversation subparser', 'notification': 'Notification subparser', 'auth': 'Authentication subparser'} @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_user(client, args): account_user = client.get_account(args.username) data = account_user.__dict__ generate_output({'account_user': data}) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_account_gallery_favorites(client, args): gallery_favorites = client.get_gallery_favorites(args.username) data = [item.__dict__ for item in gallery_favorites] generate_output({'gallery_favorites': data}, args.output_file) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_account_favorites(client, args): account_favorites = client.get_account_favorites(args.username) data = [item.__dict__ for item in account_favorites] generate_output({'account_favorites': data}, args.output_file) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--page', default=0, metavar='<page>', type=int, help='The data paging number (defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_account_submissions(client, args): account_submissions = client.get_account_submissions(args.username, args.page) data = [item.__dict__ for item in account_submissions] generate_output({'account_submissions': data}, args.output_file) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_settings(client, args): account_settings = client.get_account_settings(args.username) data = account_settings.__dict__ generate_output({'account_settings': data}) @cli_subparser('account') @cli_arg('user', help='Username of Account') @cli_arg('--bio', metavar='<bio>', help='The biography of the user, ' 'is displayed in the gallery profile page') @cli_arg('--public-images', metavar='<public-images>', choices=['true', 'false'], help='Set the users images to private ' 'or public by default') @cli_arg('--messaging-enabled', metavar='<messaging-enabled>', choices=['true', 'false'], help='Allows the user to enable or ' 'disable private messages') @cli_arg('--album-privacy', metavar='<album-privacy>', choices=['public', 'hidden', 'secret'], help='public | hidden | secret - ' 'Sets the default privacy level of albums the users creates') @cli_arg('--accepted-gallery-terms', metavar='<accepted-gallery-terms>', choices=['true', 'false'], help='The user agreement to the Imgur ' 'Gallery terms') @cli_arg('--username', metavar='<username>', help='A valid Imgur username (between 4 and 63 alphanumeric characters)') def cmd_account_change_settings(client, args): fields = data_fields(args, client.allowed_account_fields) account_settings = client.change_account_settings(args.user, fields) generate_output({'account_settings': account_settings}) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_verification_status(client, args): email_verification_status = client.get_email_verification_status(args.username) generate_output({'email_verification_status': email_verification_status}) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_send_verification(client, args): verification_email = client.send_verification_email(args.username) generate_output({'verification_email': verification_email}) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_account_albums(client, args): account_albums = client.get_account_albums(args.username, args.page) data = [item.__dict__ for item in account_albums] generate_output({'account_albums': data}, args.output_file) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') def cmd_account_album_ids(client, args): account_album_ids = client.get_account_album_ids(args.username, args.page) generate_output({'account_album_ids': account_album_ids}) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_album_count(client, args): account_album_count = client.get_account_album_count(args.username) generate_output({'account_album_count': account_album_count}) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--sort', default='newest', metavar='<sort>', choices=['best', 'worst', 'oldest', 'newest'], help='best | worst | oldest | newest - defaults to %(default)s') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') def cmd_account_comments(client, args): account_comments = client.get_account_comments(args.username, args.sort, args.page) data = format_comment_tree(account_comments) generate_output({'account_comments': data}) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--sort', default='newest', metavar='<sort>', choices=['best', 'worst', 'oldest', 'newest'], help='best | worst | oldest | newest - defaults to %(default)s') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') def cmd_account_comment_ids(client, args): account_comment_ids = client.get_account_comment_ids(args.username, args.sort, args.page) generate_output({'account_comment_ids': account_comment_ids}) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_comment_count(client, args): account_comment_count = client.get_account_comment_count(args.username) generate_output({'account_comment_count': account_comment_count}) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_account_images(client, args): account_images = client.get_account_images(args.username, args.page) data = [item.__dict__ for item in account_images] generate_output({'account_images': data}, args.output_file) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') def cmd_account_image_ids(client, args): account_image_ids = client.get_account_image_ids(args.username, args.page) generate_output({'account_image_ids': account_image_ids}) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_image_count(client, args): account_image_count = client.get_account_images_count(args.username) generate_output({'account_image_count': account_image_count}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') def cmd_album_id(client, args): album = client.get_album(args.album_id) data = album.__dict__ generate_output({'album': data}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_album_images(client, args): album_images = client.get_album_images(args.album_id) data = [item.__dict__ for item in album_images] generate_output({'album_images': data}, args.output_file) @cli_subparser('album') @cli_arg('--ids', metavar='<ids>', help='Comma separated list of image ids that you ' 'want to be included in the album; you have to be logged in as the user ' 'for adding the image ids') @cli_arg('--title', metavar='<title>', help='The title of the album') @cli_arg('--description', metavar='<description>', help='The description of the album') @cli_arg('--privacy', metavar='<privacy>', choices=['public', 'hidden', 'secret'], help="Sets the privacy level of the album." "Values are : public | hidden | secret." "Defaults to user's privacy settings for logged in users") @cli_arg('--layout', metavar='<layout>', choices=['blog', 'grid', 'horizontal', 'vertical'], help='Sets the layout to display the album. ' 'Values are : blog | grid | horizontal | vertical') @cli_arg('--cover', metavar='<cover>', help='The ID of an image that you want to be the cover of the album; ' 'you have to be logged in as the user') def cmd_album_create(client, args): fields = data_fields(args, client.allowed_album_fields) album = client.create_album(fields) generate_output({'album': album}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') @cli_arg('--ids', metavar='<ids>', help='Comma separated list of image ids that you ' 'want to be included in the album; you have to be logged in as the user ' 'for adding the image ids') @cli_arg('--title', metavar='<title>', help='The title of the album') @cli_arg('--description', metavar='<description>', help='The description of the album') @cli_arg('--privacy', metavar='<privacy>', choices=['public', 'hidden', 'secret'], help="Sets the privacy level of the album." "Values are : public | hidden | secret." "Defaults to user's privacy settings for logged in users") @cli_arg('--layout', metavar='<layout>', choices=['blog', 'grid', 'horizontal', 'vertical'], help='Sets the layout to display the album. ' 'Values are : blog | grid | horizontal | vertical') @cli_arg('--cover', metavar='<cover>', help='The ID of an image that you want to be the cover of the album; ' 'you have to be logged in as the user') def cmd_album_update(client, args): fields = data_fields(args, client.allowed_album_fields) album = client.update_album(args.album_id, fields) generate_output({'album': album}) @cli_subparser('album') @cli_arg('album_id', help='Album ID')
MIT License
futuresharks/invokust
invokust/aws_lambda/lambda_load_test.py
LambdaLoadTest.increase_lambda_execution_time
python
def increase_lambda_execution_time(self, time): with self.lock: self.lambda_total_execution_time += time
Add Lambda execution time to the total
https://github.com/futuresharks/invokust/blob/6cf8871a811b8efa2f977ec89ad9cd1543b53bf9/invokust/aws_lambda/lambda_load_test.py#L162-L167
import json import time import logging import threading from boto3.session import Session from botocore.client import Config logger = logging.getLogger(__name__) logging.getLogger("botocore").setLevel(logging.CRITICAL) session = Session() config = Config(connect_timeout=10, read_timeout=310) client = session.client("lambda", config=config) class LambdaLoadTest(object): def __init__( self, lambda_function_name, threads, ramp_time, time_limit, lambda_payload, lambda_timeout=300000, ): self.lock = threading.Lock() self.start_time = time.time() self.logger = logging.getLogger() self.threads = threads self.ramp_time = ramp_time self.time_limit = ( time_limit ) self.lambda_function_name = lambda_function_name self.lambda_payload = lambda_payload self.lambda_invocation_errors = 0 self.lambda_invocation_count = 0 self.lambda_invocation_error_threshold = 20 self.lambda_total_execution_time = 0 self.requests_fail = 0 self.request_fail_ratio_threshold = 0.5 self.requests_total = 0 self.locust_results = [] self.thread_data = {} self.print_stats_delay = 3 self.exit_threads = False self.lambda_timeout = lambda_timeout def update_thread_data(self, thread_id, key, value): with self.lock: if thread_id not in self.thread_data: self.thread_data[thread_id] = {} self.thread_data[thread_id][key] = value def get_thread_count(self): return len([t for t in threading.enumerate() if t.getName() != "MainThread"]) def get_time_elapsed(self): return round(time.time() - self.start_time) def increase_lambda_invocation_error(self): with self.lock: self.lambda_invocation_errors += 1 def increase_lambda_invocation_count(self): with self.lock: self.lambda_invocation_count += 1 def get_invocation_error_ratio(self): try: return self.lambda_invocation_errors / float(self.lambda_invocation_count) except ZeroDivisionError: return 0 def increase_requests_total(self, requests): with self.lock: self.requests_total += requests def increase_requests_fail(self, requests): with self.lock: self.requests_fail += requests def get_request_fail_ratio(self): try: return self.requests_fail / float(self.requests_total) except ZeroDivisionError: return 0 def append_locust_results(self, results): with self.lock: self.locust_results.append(results) def get_summary_stats(self): return { "lambda_invocation_count": self.lambda_invocation_count, "total_lambda_execution_time": self.lambda_total_execution_time, "requests_total": self.requests_total, "request_fail_ratio": self.get_request_fail_ratio(), "invocation_error_ratio": self.get_invocation_error_ratio(), } def get_stats(self): return { "thread_count": self.get_thread_count(), "rpm": self.calculate_rpm(), "time_elapsed": self.get_time_elapsed(), "requests_total": self.requests_total, "request_fail_ratio": self.get_request_fail_ratio(), "invocation_error_ratio": self.get_invocation_error_ratio(), } def get_locust_results(self): return self.locust_results
MIT License
cleancut/green
green/output.py
GreenStream.writelines
python
def writelines(self, lines): for line in lines: self.write(line)
Just for better compatibility with real file objects
https://github.com/cleancut/green/blob/55625649869d44f8c9577f5f10626b1cbdcc48ad/green/output.py#L181-L186
from __future__ import unicode_literals from colorama import init, deinit, Fore, Style from colorama.ansi import Cursor from colorama.initialise import wrap_stream import logging import os import platform import re import sys from unidecode import unidecode global debug_level debug_level = 0 if sys.version_info[0] == 3: text_type = str unicode = None else: text_type = unicode def debug(message, level=1): if level <= debug_level: logging.debug(" " * (level - 1) * 2 + str(message)) class Colors: def __init__(self, termcolor=None): if termcolor is None: self.termcolor = sys.stdout.isatty() else: self.termcolor = termcolor def wrap(self, text, style): if self.termcolor: return "%s%s%s" % (style, text, Style.RESET_ALL) else: return text def start_of_line(self): return "\r" def up(self, lines=1): return Cursor.UP(lines) def bold(self, text): return self.wrap(text, Style.BRIGHT) def blue(self, text): if platform.system() == "Windows": return self.wrap(text, Fore.CYAN) else: return self.wrap(text, Fore.BLUE) def green(self, text): return self.wrap(text, Fore.GREEN) def red(self, text): return self.wrap(text, Fore.RED) def yellow(self, text): return self.wrap(text, Fore.YELLOW) def passing(self, text): return self.green(text) def failing(self, text): return self.red(text) def error(self, text): return self.red(text) def skipped(self, text): return self.blue(text) def unexpectedSuccess(self, text): return self.yellow(text) def expectedFailure(self, text): return self.yellow(text) def moduleName(self, text): return self.bold(text) def className(self, text): return text class GreenStream(object): indent_spaces = 2 _ascii_only_output = False coverage_pattern = re.compile(r"TOTAL\s+\d+\s+\d+\s+(?P<percent>\d+)%") def __init__( self, stream, override_appveyor=False, disable_windows=False, disable_unidecode=False, ): self.disable_unidecode = disable_unidecode self.stream = stream on_windows = platform.system() == "Windows" on_windows_ci = os.environ.get("GITHUB_ACTIONS", False) or os.environ.get( "APPVEYOR", False ) if override_appveyor or ( (on_windows and not on_windows_ci) and not disable_windows ): self.stream = wrap_stream(self.stream, None, None, None, True) self._ascii_only_output = True self.closed = False try: self.encoding = stream.encoding except: self.encoding = "UTF-8" self.coverage_percent = None def flush(self): self.stream.flush() def writeln(self, text=""): self.write(text + "\n") def write(self, text): if type(text) == bytes: text = text.decode("utf-8") if self._ascii_only_output and not self.disable_unidecode: text = text_type(unidecode(text)) match = self.coverage_pattern.search(text) if match: percent_str = match.groupdict().get("percent") if percent_str: self.coverage_percent = int(percent_str) self.stream.write(text)
MIT License
jweyn/dlwp
DLWP/data/era5.py
ERA5Reanalysis.__init__
python
def __init__(self, root_directory=None, file_id=''): self.raw_files = [] self.dataset_variables = [] self.dataset_levels = [] self.dataset_dates = None if root_directory is None: self._root_directory = '%s/.era5' % os.path.expanduser('~') else: self._root_directory = root_directory self._file_id = file_id self._delete_temp = False self.level_coord = [1, 2, 3, 5, 7, 10, 20, 30, 50, 70, 100, 125, 150, 175, 200, 225, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750] + list(range(775, 1001, 25)) self.inverse_lat = True self.Dataset = None self.basemap = None self._lat_array = None self._lon_array = None
Initialize an instance of the ERA5Reanalysis class. :param root_directory: str: local directory where raw files are stored. If None, defaults to ~/.era5 :param file_id: str: prepended to the processed file names. Useful if files for the same dates will be created with different parameters, i.e., hours or variables or levels.
https://github.com/jweyn/dlwp/blob/3f32bfab98eacee2abe880d5bd214b6060627edd/DLWP/data/era5.py#L93-L118
import os import warnings import itertools as it import numpy as np import netCDF4 as nc import pandas as pd import xarray as xr from datetime import datetime, timedelta try: import cdsapi except ImportError: warnings.warn("module 'cdsapi' not found; retrieval of ERA5 data unavailable.") def _check_exists(file_name, path=False): if os.path.exists(file_name): exists = True local_file = file_name else: exists = False local_file = None if path: return exists, local_file else: return exists def call_fetch(args): obj = args[0] obj._fetch(*args[1:]) netcdf_file_format = '' data_start_date = datetime(1979, 1, 1) data_end_date = datetime(2018, 12, 31) reforecast_start_date = datetime(1999, 1, 1) reforecast_end_date = datetime(2009, 12, 31, 18) fill_value = np.array(nc.default_fillvals['f4']).astype(np.float32) variable_names = { 'divergence': 'd', 'fraction_of_cloud_cover': 'cc', 'geopotential': 'z', 'ozone_mass_mixing_ratio': 'o3', 'potential_vorticity': 'pv', 'relative_humidity': 'r', 'specific_cloud_ice_water_content': 'ciwc', 'specific_cloud_liquid_water_content': 'clwc', 'specific_humidity': 'q', 'specific_rain_water_content': 'crwc', 'specific_snow_water_content': 'cswc', 'temperature': 't', 'u_component_of_wind': 'u', 'v_component_of_wind': 'v', 'vertical_velocity': 'w', 'vorticity': 'vo' } class ERA5Reanalysis(object):
MIT License
edwardoughton/pysim5g
src/pysim5g/generate_hex.py
find_site_locations
python
def find_site_locations(site_area, interfering_site_areas): site_area_site = Polygon( site_area[0]['geometry']['coordinates'][0] ).centroid transmitter = [] transmitter.append({ 'type': 'Feature', 'geometry': mapping(site_area_site), 'properties': { 'site_id': 'transmitter' } }) interfering_transmitters = [] for interfering_site in interfering_site_areas: interfering_transmitters.append({ 'type': 'Feature', 'geometry': mapping(interfering_site['centroid']), 'properties': { 'site_id': interfering_site['properties']['site_id'] } }) return transmitter, interfering_transmitters
Get the centroid for each site area and intefering site areas. Parameters ---------- site_area : List of dicts Contains the geojson site area for the transmitter. interfering_site_areas : List of dicts Contains the geojson interfering site areas. Returns ------- transmitter : List of dicts Contains the geojson site location for the transmitter. interfering_site_areas : List of dicts Contains the geojson site locations for interfering sites.
https://github.com/edwardoughton/pysim5g/blob/b352151edb56f4bc686b69ecce8dbae6ff202854/src/pysim5g/generate_hex.py#L218-L261
import os import configparser import math import fiona from shapely.ops import transform from shapely.geometry import Point, mapping, shape, Polygon from functools import partial from rtree import index import pyproj from collections import OrderedDict def convert_point_to_projected_crs(point, original_crs, new_crs): project = partial( pyproj.transform, pyproj.Proj(original_crs), pyproj.Proj(new_crs) ) new_geom = transform(project, Point(point)) output = { 'type': 'Feature', 'geometry': mapping(new_geom), 'properties': 'Crystal Palace Radio Tower' } return output def calculate_polygons(startx, starty, endx, endy, radius): sl = (2 * radius) * math.tan(math.pi / 6) p = sl * 0.5 b = sl * math.cos(math.radians(30)) w = b * 2 h = 2 * sl startx = startx - w starty = starty - h endx = endx + w endy = endy + h origx = startx origy = starty xoffset = b yoffset = 3 * p polygons = [] row = 1 counter = 0 while starty < endy: if row % 2 == 0: startx = origx + xoffset else: startx = origx while startx < endx: p1x = startx p1y = starty + p p2x = startx p2y = starty + (3 * p) p3x = startx + b p3y = starty + h p4x = startx + w p4y = starty + (3 * p) p5x = startx + w p5y = starty + p p6x = startx + b p6y = starty poly = [ (p1x, p1y), (p2x, p2y), (p3x, p3y), (p4x, p4y), (p5x, p5y), (p6x, p6y), (p1x, p1y)] polygons.append(poly) counter += 1 startx += w starty += yoffset row += 1 return polygons def find_closest_site_areas(hexagons, geom_shape): idx = index.Index() for site in hexagons: coords = site['centroid'] idx.insert(0, coords.bounds, site) transmitter = mapping(geom_shape.centroid) site_area = list( idx.nearest( (transmitter['coordinates'][0], transmitter['coordinates'][1], transmitter['coordinates'][0], transmitter['coordinates'][1]), 1, objects='raw') )[0] closest_site_area_centroid = Polygon( site_area['geometry']['coordinates'][0] ).centroid all_closest_sites = list( idx.nearest( closest_site_area_centroid.bounds, 7, objects='raw') ) interfering_site_areas = all_closest_sites[1:7] site_area = [] site_area.append(all_closest_sites[0]) return site_area, interfering_site_areas
MIT License
facebookresearch/compilergym
compiler_gym/bin/manual_env.py
CompilerGymShell.complete_set_benchmark
python
def complete_set_benchmark(self, text, line, begidx, endidx): return self.simple_complete(text, self.benchmarks)
Complete the set_benchmark argument
https://github.com/facebookresearch/compilergym/blob/00ae8c0d080da4d429f95398be1df01b5d6e7b71/compiler_gym/bin/manual_env.py#L357-L359
import cmd import random import readline import sys from itertools import islice from absl import app, flags from compiler_gym.envs import CompilerEnv from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags from compiler_gym.util.flags.env_from_flags import env_from_flags from compiler_gym.util.shell_format import emph from compiler_gym.util.tabulate import tabulate from compiler_gym.util.timer import Timer FLAGS = flags.FLAGS tutorial = "**************************".join( __doc__.split("**************************")[1:] ) class ActionHistoryElement: def __init__(self, action_name, action_index, observation, reward, done, info): self.action_name = action_name self.action_index = action_index self.observation = observation self.reward = reward self.done = done self.info = info def has_no_effect(self): return self.info.get("action_had_no_effect") def has_effect(self): return not self.has_no_effect() class CompilerGymShell(cmd.Cmd): intro = """Welcome to the CompilerGym Shell! --------------------------------- Type help or ? for more information. The 'tutorial' command will give a step by step guide.""" def __init__(self, env: CompilerEnv): super().__init__() self.env = env self.benchmarks = [] for dataset in self.env.datasets: self.benchmarks += islice(dataset.benchmark_uris(), 50) self.benchmarks.sort() for i, benchmark in enumerate(self.benchmarks): if benchmark.startswith("benchmark://"): self.benchmarks[i] = benchmark[len("benchmark://") :] self.observations = sorted(self.env.observation.spaces.keys()) self.rewards = sorted(self.env.reward.spaces.keys()) self.stack = [] self.set_prompt() def __del__(self): if self.env: self.env.close() self.env = None def do_tutorial(self, arg): print(tutorial) def preloop(self): self.old_completer_delims = readline.get_completer_delims() readline.set_completer_delims(" \t\n") def postloop(self): readline.set_completer_delims(self.old_completer_delims) self.stack.clear() self.env.close() self.env = None def set_prompt(self): benchmark_name = self.env.benchmark.uri if benchmark_name.startswith("benchmark://"): benchmark_name = benchmark_name[len("benchmark://") :] prompt = f"compiler_gym:{benchmark_name}>" self.prompt = f"\n{emph(prompt)} " def simple_complete(self, text, options): if text: return [opt for opt in options if opt.startswith(text)] else: return options def get_datasets(self): return sorted([k.name for k in self.env.datasets.datasets()]) def do_list_datasets(self, arg): print(", ".join(self.get_datasets())) def do_list_benchmarks(self, arg): print(", ".join(self.benchmarks))
MIT License
humancellatlas/table-testing
distributed_test/filter_merge_lambda/matrix_map_reduce.py
increment_state_field
python
def increment_state_field(request_id, field_name, increment_size): return dynamo_utils.increment_field( CFN_VARS["state_table"], {"RequestId": request_id}, field_name, increment_size)
Increment a field in the state table. This is used to keep track of how many lambda executions have completed and are expected to complete.
https://github.com/humancellatlas/table-testing/blob/66af81f0162502660e92ae94edaaa76b48fa07ca/distributed_test/filter_merge_lambda/matrix_map_reduce.py#L96-L103
import datetime import json import os import uuid import boto3 from aws_xray_sdk.core import xray_recorder from aws_xray_sdk.core import patch patch(["boto3"]) import dynamo_utils CFN_VARS = { "mapper_fn": os.environ.get("MAPPER_FN"), "work_fn": os.environ.get("WORK_FN"), "reducer_fn": os.environ.get("REDUCER_FN"), "state_table": os.environ.get("STATE_TABLE"), "timing_table": os.environ.get("TIMING_TABLE"), "result_bucket": os.environ.get("RESULT_BUCKET") } LAMBDA_CLIENT = boto3.client("lambda", region_name="us-east-1") STATE_TABLE = boto3.resource("dynamodb", region_name="us-east-1").Table(CFN_VARS["state_table"]) TIMING_TABLE = boto3.resource("dynamodb", region_name="us-east-1").Table(CFN_VARS["timing_table"]) FORMAT_HANDLERS = {} try: import parquet_impl FORMAT_HANDLERS["parquet"] = { "driver": parquet_impl.driver, "mapper": parquet_impl.mapper, "work": parquet_impl.work, "reducer": parquet_impl.reducer } except ImportError: pass try: import zarr_impl FORMAT_HANDLERS["zarr"] = { "driver": zarr_impl.driver, "mapper": zarr_impl.mapper, "work": zarr_impl.work, "reducer": zarr_impl.reducer } except ImportError: pass
MIT License
jhl-hust/ibcln
util/util.py
tensor2im
python
def tensor2im(input_image, imtype=np.uint8): if not isinstance(input_image, np.ndarray): if isinstance(input_image, torch.Tensor): image_tensor = input_image.data else: return input_image image_numpy = image_tensor[0].cpu().float().numpy() if image_numpy.shape[0] == 1: image_numpy = np.tile(image_numpy, (3, 1, 1)) image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0 else: image_numpy = input_image return image_numpy.astype(imtype)
Converts a Tensor array into a numpy image array. Parameters: input_image (tensor) -- the input image tensor array imtype (type) -- the desired type of the converted numpy array
https://github.com/jhl-hust/ibcln/blob/66056ffd83e873536cf26b76fa9532a40cbfa7fa/util/util.py#L9-L33
from __future__ import print_function import torch import numpy as np from PIL import Image import os
BSD 2-Clause Simplified License
microsoft/azure-devops-python-api
azure-devops/azure/devops/v6_0/core/core_client.py
CoreClient.queue_create_project
python
def queue_create_project(self, project_to_create): content = self._serialize.body(project_to_create, 'TeamProject') response = self._send(http_method='POST', location_id='603fe2ac-9723-48b9-88ad-09305aa6c6e1', version='6.0-preview.4', content=content) return self._deserialize('OperationReference', response)
QueueCreateProject. [Preview API] Queues a project to be created. Use the [GetOperation](../../operations/operations/get) to periodically check for create project status. :param :class:`<TeamProject> <azure.devops.v6_0.core.models.TeamProject>` project_to_create: The project to create. :rtype: :class:`<OperationReference> <azure.devops.v6_0.core.models.OperationReference>`
https://github.com/microsoft/azure-devops-python-api/blob/451cade4c475482792cbe9e522c1fee32393139e/azure-devops/azure/devops/v6_0/core/core_client.py#L247-L258
 from msrest import Serializer, Deserializer from ...client import Client from . import models class CoreClient(Client): def __init__(self, base_url=None, creds=None): super(CoreClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = '79134c72-4a58-4b42-976c-04e7115f32bf' def remove_project_avatar(self, project_id): route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') self._send(http_method='DELETE', location_id='54b2a2a0-859b-4d05-827c-ec4c862f641a', version='6.0-preview.1', route_values=route_values) def set_project_avatar(self, avatar_blob, project_id): route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') content = self._serialize.body(avatar_blob, 'ProjectAvatar') self._send(http_method='PUT', location_id='54b2a2a0-859b-4d05-827c-ec4c862f641a', version='6.0-preview.1', route_values=route_values, content=content) def create_connected_service(self, connected_service_creation_data, project_id): route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') content = self._serialize.body(connected_service_creation_data, 'WebApiConnectedServiceDetails') response = self._send(http_method='POST', location_id='b4f70219-e18b-42c5-abe3-98b07d35525e', version='6.0-preview.1', route_values=route_values, content=content) return self._deserialize('WebApiConnectedService', response) def get_connected_service_details(self, project_id, name): route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') response = self._send(http_method='GET', location_id='b4f70219-e18b-42c5-abe3-98b07d35525e', version='6.0-preview.1', route_values=route_values) return self._deserialize('WebApiConnectedServiceDetails', response) def get_connected_services(self, project_id, kind=None): route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') query_parameters = {} if kind is not None: query_parameters['kind'] = self._serialize.query('kind', kind, 'str') response = self._send(http_method='GET', location_id='b4f70219-e18b-42c5-abe3-98b07d35525e', version='6.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[WebApiConnectedService]', self._unwrap_collection(response)) def get_team_members_with_extended_properties(self, project_id, team_id, top=None, skip=None): route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') if team_id is not None: route_values['teamId'] = self._serialize.url('team_id', team_id, 'str') query_parameters = {} if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') response = self._send(http_method='GET', location_id='294c494c-2600-4d7e-b76c-3dd50c3c95be', version='6.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TeamMember]', self._unwrap_collection(response)) def get_process_by_id(self, process_id): route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') response = self._send(http_method='GET', location_id='93878975-88c5-4e6a-8abb-7ddd77a8a7d8', version='6.0-preview.1', route_values=route_values) return self._deserialize('Process', response) def get_processes(self): response = self._send(http_method='GET', location_id='93878975-88c5-4e6a-8abb-7ddd77a8a7d8', version='6.0-preview.1') return self._deserialize('[Process]', self._unwrap_collection(response)) def get_project_collection(self, collection_id): route_values = {} if collection_id is not None: route_values['collectionId'] = self._serialize.url('collection_id', collection_id, 'str') response = self._send(http_method='GET', location_id='8031090f-ef1d-4af6-85fc-698cd75d42bf', version='6.0-preview.2', route_values=route_values) return self._deserialize('TeamProjectCollection', response) def get_project_collections(self, top=None, skip=None): query_parameters = {} if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') response = self._send(http_method='GET', location_id='8031090f-ef1d-4af6-85fc-698cd75d42bf', version='6.0-preview.2', query_parameters=query_parameters) return self._deserialize('[TeamProjectCollectionReference]', self._unwrap_collection(response)) def get_project(self, project_id, include_capabilities=None, include_history=None): route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') query_parameters = {} if include_capabilities is not None: query_parameters['includeCapabilities'] = self._serialize.query('include_capabilities', include_capabilities, 'bool') if include_history is not None: query_parameters['includeHistory'] = self._serialize.query('include_history', include_history, 'bool') response = self._send(http_method='GET', location_id='603fe2ac-9723-48b9-88ad-09305aa6c6e1', version='6.0-preview.4', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TeamProject', response) def get_projects(self, state_filter=None, top=None, skip=None, continuation_token=None, get_default_team_image_url=None): query_parameters = {} if state_filter is not None: query_parameters['stateFilter'] = self._serialize.query('state_filter', state_filter, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if get_default_team_image_url is not None: query_parameters['getDefaultTeamImageUrl'] = self._serialize.query('get_default_team_image_url', get_default_team_image_url, 'bool') response = self._send(http_method='GET', location_id='603fe2ac-9723-48b9-88ad-09305aa6c6e1', version='6.0-preview.4', query_parameters=query_parameters) return self._deserialize('[TeamProjectReference]', self._unwrap_collection(response))
MIT License
elephantmipt/bert-distillation
src/experiment.py
Experiment.get_datasets
python
def get_datasets( self, stage: str, path_to_data: str, train_filename: str, valid_filename: str, max_sequence_length: int, text_field: str, model_name: str, **kwargs, ): datasets = OrderedDict() path_to_data = Path(path_to_data) train_df = pd.read_csv(path_to_data / train_filename) valid_df = pd.read_csv(path_to_data / valid_filename) data_params = dict(self.stages_config[stage]["data_params"]) model_name = data_params["model_name"] tokenizer = AutoTokenizer.from_pretrained(model_name) train_dataset = LanguageModelingDataset( texts=train_df[text_field], max_seq_length=max_sequence_length, tokenizer=tokenizer, sort=False, lazy=True, ) valid_dataset = LanguageModelingDataset( texts=valid_df[text_field], max_seq_length=max_sequence_length, tokenizer=tokenizer, sort=False, lazy=True, ) datasets["train"] = train_dataset datasets["valid"] = valid_dataset return datasets
@TODO: Docs. Contribution is welcome
https://github.com/elephantmipt/bert-distillation/blob/e414df9d913ff9c1ba971bb9f72b537edffdd039/src/experiment.py#L67-L111
from typing import Dict, Union from collections import OrderedDict import logging from pathlib import Path from catalyst.dl import ConfigExperiment, utils from catalyst.tools.typing import Model, Optimizer import pandas as pd from src.data import LanguageModelingDataset from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.data.data_collator import DataCollatorForLanguageModeling logging.getLogger("transformers.tokenization_utils").setLevel(logging.ERROR) class Experiment(ConfigExperiment): def __init__(self, config: Dict): super().__init__(config) self.config = config def get_transforms(self, stage: str = None, mode: str = None): return [] def get_optimizer( self, stage: str, model: Union[Model, Dict[str, Model]] ) -> Union[Optimizer, Dict[str, Optimizer]]: optimizer_params = self.stages_config[stage].get( "optimizer_params", {} ) key_value_flag = optimizer_params.pop("_key_value", False) if key_value_flag: optimizer = {} for key, params_ in optimizer_params.items(): optimizer_key = "optimizer_key" assert optimizer_key not in params_, "keyword reserved" params_[optimizer_key] = key optimizer[key] = self._get_optimizer( stage, model["student"], **params_ ) else: optimizer = self._get_optimizer( stage, model["student"], **optimizer_params ) return optimizer
MIT License
google/upvote
upvote/gae/lib/bit9/scripts/build_from_docs.py
_FindFromNext
python
def _FindFromNext(start_tag, search_tag): tag = start_tag.next_sibling while tag is not None: if isinstance(tag, bs4.element.Tag): if tag.name == search_tag: return tag, tag child_matches = tag.find_all(search_tag) if child_matches: return tag, child_matches[0] tag = tag.next_sibling if tag is None: return None, None
Finds a sibling that either is or contains a given tag. Because the REST API HTML in bedlam is non-heirarchical (i.e. blocks are flat, not nested), we need to search through all sibling nodes in the tree instead of child nodes. Args: start_tag: bs4.element.Tag, The tag from which siblings should be searched. search_tag: str, The name of the tag to look for. Returns: (parent_tag, target_tag), both bs4.element.Tags. parent_tag is the sibling tag of start_tag that encloses the tag matching search_tag. target_tag is the tag that matches the search_tag type exactly. parent_tag can be the same as target_tag if it's the one matching search_tag. If the search_tag is not found, (None, None) is returned.
https://github.com/google/upvote/blob/0b4477d40676a46ad58aaa7e14f9b13770d55c0c/upvote/gae/lib/bit9/scripts/build_from_docs.py#L57-L89
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import bs4 from upvote.gae.lib.bit9 import utils from absl import app from absl import flags FLAGS = flags.FLAGS flags.DEFINE_string( 'docs_path', None, 'The path to the HTML file containing the Bit9 REST API documentation.') flags.DEFINE_list( 'objects_to_output', [u'approvalRequest', u'fileInstance', u'fileCatalog', u'publisher', u'fileRule', u'certificate', u'policy', u'event', u'computer'], 'The list of Bit9 API object names for which models should be generated.') _TYPE_TO_PROPERTY = { 'String': 'StringProperty', 'DateTime': 'DateTimeProperty', 'Int16': 'Int16Property', 'Int32': 'Int32Property', 'Int64': 'Int64Property', 'Boolean': 'BooleanProperty', 'Decimal': 'DecimalProperty', 'Double': 'DoubleProperty', }
Apache License 2.0
pyansys/pyaedt
pyaedt/modeler/PrimitivesCircuit.py
CircuitComponents.__getitem__
python
def __getitem__(self, partname): if type(partname) is int: return self.components[partname] for el in self.components: if self.components[el].name == partname or self.components[el].composed_name == partname or el == partname: return self.components[el] return None
Retrieve a part. Parameters ---------- partname : int or str Part ID or part name. Returns ------- type Part object details.
https://github.com/pyansys/pyaedt/blob/817c7d706a2d10942470ccac959645e16e9ea971/pyaedt/modeler/PrimitivesCircuit.py#L16-L35
import random import warnings from collections import defaultdict from ..generic.general_methods import aedt_exception_handler, retry_ntimes from .Object3d import CircuitComponent class CircuitComponents(object): @aedt_exception_handler
MIT License
alttch/finac
finac/__init__.py
ls
python
def ls(account=None, asset=None, tp=None, passive=None, start=None, end=None, tag=None, pending=True, hide_empty=False, order_by=['tp', 'asset', 'account', 'balance'], group_by=None, base=None): if account and account.find('%') != -1: code = account account = None else: code = None if account: result = account_statement_summary( account=account, start=start if start else datetime.datetime.today().replace( day=1, hour=0, minute=0, second=0, microsecond=0).timestamp(), end=end, tag=tag, pending=pending, datefmt=True) stmt = result['statement'].copy() acc_info = account_info(account=account) precision = asset_precision(asset=acc_info['asset']) for i, r in enumerate(stmt): r = r.copy() del r['is_completed'] r['amount'] = format_money(r['amount'], precision) stmt[i] = r ft = rapidtables.format_table( stmt, fmt=rapidtables.FORMAT_GENERATOR, align=(rapidtables.ALIGN_LEFT, rapidtables.ALIGN_RIGHT, rapidtables.ALIGN_LEFT, rapidtables.ALIGN_LEFT, rapidtables.ALIGN_LEFT, rapidtables.ALIGN_LEFT, rapidtables.ALIGN_LEFT)) rcur = base.upper() if base else acc_info['asset'] if ft: h, tbl = ft neotermcolor.cprint(h, '@finac:title') neotermcolor.cprint('-' * len(h), '@finac:separator') for t, s in zip(tbl, result['statement']): neotermcolor.cprint( t, '@finac:credit' if s['amount'] < 0 else '@finac:debit', attrs='') neotermcolor.cprint('-' * len(h), '@finac:separator') print('Debit turnover: ', end='') neotermcolor.cprint(format_money(result['debit'], precision), style='finac:debit_sum', end=', ') print('credit turnover: ', end='') neotermcolor.cprint(format_money(result['credit'], precision), style='finac:credit_sum') print() if base: precision = asset_precision(base) print('Net profit/loss: ', end='') pl = result['debit'] - result['credit'] if base: pl = pl * asset_rate(acc_info['asset'], base, date=end) neotermcolor.cprint('{} {}'.format(format_money(pl, precision), rcur), attrs='bold', end='') print(', balance', end='') else: print('Balance', end='') balance = account_balance(account=account, date=end) if base: balance = balance * asset_rate(acc_info['asset'], base, date=end) print('{}: '.format(' to date' if end else ''), end='') neotermcolor.cprint('{} {}'.format(format_money(balance, precision), rcur), attrs='bold', end='') print() else: if not base: base = config.base_asset base = base.upper() result = account_list_summary(asset=asset, tp=tp, passive=passive, code=code, date=end, order_by=order_by, group_by=group_by, hide_empty=hide_empty, base=base) if not group_by: kf = 'accounts' rt_align = (rapidtables.ALIGN_LEFT, rapidtables.ALIGN_LEFT, rapidtables.ALIGN_CENTER, rapidtables.ALIGN_CENTER, rapidtables.ALIGN_RIGHT, rapidtables.ALIGN_RIGHT) elif group_by == 'asset': kf = 'assets' rt_align = (rapidtables.ALIGN_LEFT, rapidtables.ALIGN_RIGHT, rapidtables.ALIGN_RIGHT) else: kf = 'account_types' rt_align = (rapidtables.ALIGN_LEFT, rapidtables.ALIGN_RIGHT) res = result[kf] data = res.copy() bcp = asset_precision(asset=base) for i, r in enumerate(res): r = r.copy() if group_by not in ['type', 'tp']: r['balance'] = format_money(r['balance'], asset_precision(asset=r['asset'])) r['balance ' + base] = format_money(r['balance_bc'], bcp) del r['balance_bc'] if not group_by: del r['note'] if 'passive' in r: r['passive'] = 'P' if r['passive'] else '' res[i] = r ft = rapidtables.format_table(res, fmt=rapidtables.FORMAT_GENERATOR, align=rt_align) if not ft: return h, tbl = ft neotermcolor.cprint(h, '@finac:title') neotermcolor.cprint('-' * len(h), '@finac:separator') for t, s in zip(tbl, data): if s.get('passive'): style = 'finac:passive' else: style = 'finac:credit' if s['balance_bc'] < 0 else None neotermcolor.cprint(t, style=style, attrs='') neotermcolor.cprint('-' * len(h), '@finac:separator') neotermcolor.cprint('Total: ', end='') neotermcolor.cprint('{} {}'.format(format_money(result['total'], bcp), base), style='finac:sum') print()
Primary interactive function. Prints account statement if account code is specified, otherwise prints summary for all accounts Account code may contain '%' symbol as a wildcard. Args: account: account code asset: filter by asset code tp: filter by account type (or types) passive: list passive, active or all (if None) accounts start: start date (for statement), default: first day of current month end: end date (or balance date for summary) tag: filter transactions by tag (for statement) pending: include pending transactions hide_empty: hide empty accounts (for summary) order_by: column ordering (ordering by base is not supported) base: specify base asset
https://github.com/alttch/finac/blob/956dd3bda113cb65d57d6ffa5568440a87656177/finac/__init__.py#L149-L309
__author__ = 'Altertech, https://www.altertech.com/' __copyright__ = 'Copyright (C) 2019 Altertech' __license__ = 'MIT' __version__ = '0.5.7' import rapidtables import neotermcolor import datetime, time from functools import partial from collections import OrderedDict from finac.core import init, config from finac.core import ResourceNotFound, RateNotFound from finac.core import OverdraftError, OverlimitError from finac.core import ResourceAlreadyExists from finac.core import asset_create, asset_delete from finac.core import asset_set_rate, asset_rate from finac.core import asset_delete_rate, asset_rate_range from finac.core import asset_update from finac.core import asset_precision from finac.core import asset_list, asset_list_rates from finac.core import account_create, account_delete from finac.core import account_info from finac.core import account_update from finac.core import archive_transactions from finac.core import transaction_create, transaction_complete from finac.core import transaction_move, transaction_delete from finac.core import transaction_copy from finac.core import transaction_update, transaction_apply from finac.core import account_credit, account_debit, account_balance from finac.core import account_balance_range from finac.core import account_statement, account_statement_summary from finac.core import account_list, account_list_summary from finac.core import purge, transaction_purge, cleanup from finac.core import preload, exec_query from finac.plot import account_plot as plot from finac.plot import account_pie as pie from finac.df import df from finac.core import parse_number, parse_date, get_version tr = transaction_create tc = transaction_copy mv = transaction_move rm = transaction_delete cp = transaction_copy apply = transaction_apply complete = transaction_complete rate = asset_rate stmt = account_statement_summary def query(q): t_start = time.time() result = list(exec_query(q)) t_spent = time.time() - t_start ft = rapidtables.format_table(result, fmt=rapidtables.FORMAT_GENERATOR) if ft: h, tbl = ft neotermcolor.cprint(h, '@finac:title') neotermcolor.cprint('-' * len(h), '@finac:separator') for t in tbl: print(t) neotermcolor.cprint('-' * len(h), '@finac:separator') print(f'SELECT {len(result)}') print(f'Time: {t_spent:.3f}s') def check_version(warn=False): core_version = get_version() if __version__ != core_version: if warn: print('WARNING: client version: {}, core version: {}'.format( __version__, core_version)) return False else: return True def balance(account=None, asset=None, tp=None, passive=None, base=None, date=None): if account and account.find('%') == -1: return account_balance(account, tp=tp, base=base, date=date) else: return account_list_summary(asset=asset, tp=tp, passive=passive, code=account, date=date, base=base)['total'] balance_range = partial(account_balance_range, return_timestamp=False) def format_money(amnt, precision=2): return ('{:,.' + str(precision) + 'f}').format(amnt).replace(',', ' ') neotermcolor.set_style('finac:title', color='blue') neotermcolor.set_style('finac:separator', color='grey') neotermcolor.set_style('finac:sum', attrs='bold') neotermcolor.set_style('finac:debit', color='green') neotermcolor.set_style('finac:credit', color='red') neotermcolor.set_style('finac:passive', color='magenta') neotermcolor.set_style('finac:debit_sum', color='green', attrs='bold') neotermcolor.set_style('finac:credit_sum', color='red', attrs='bold')
MIT License
kengz/slm-lab
slm_lab/agent/__init__.py
Body.get_mean_lr
python
def get_mean_lr(self): if not hasattr(self.agent.algorithm, 'net_names'): return np.nan lrs = [] for attr, obj in self.agent.algorithm.__dict__.items(): if attr.endswith('lr_scheduler'): lrs.append(obj.get_lr()) return np.mean(lrs)
Gets the average current learning rate of the algorithm's nets.
https://github.com/kengz/slm-lab/blob/faca82c00c51a993e1773e115d5528ffb7ad4ade/slm_lab/agent/__init__.py#L192-L200
from slm_lab.agent import algorithm, memory from slm_lab.agent.algorithm import policy_util from slm_lab.agent.net import net_util from slm_lab.lib import logger, util, viz from slm_lab.lib.decorator import lab_api from torch.utils.tensorboard import SummaryWriter import numpy as np import os import pandas as pd import pydash as ps import torch import warnings logger = logger.get_logger(__name__) class Agent: def __init__(self, spec, body, global_nets=None): self.spec = spec self.agent_spec = spec['agent'][0] self.name = self.agent_spec['name'] assert not ps.is_list(global_nets), f'single agent global_nets must be a dict, got {global_nets}' self.body = body body.agent = self MemoryClass = getattr(memory, ps.get(self.agent_spec, 'memory.name')) self.body.memory = MemoryClass(self.agent_spec['memory'], self.body) AlgorithmClass = getattr(algorithm, ps.get(self.agent_spec, 'algorithm.name')) self.algorithm = AlgorithmClass(self, global_nets) logger.info(util.self_desc(self)) @lab_api def act(self, state): with torch.no_grad(): action = self.algorithm.act(state) return action @lab_api def update(self, state, action, reward, next_state, done): self.body.update(state, action, reward, next_state, done) if util.in_eval_lab_mode(): return self.body.memory.update(state, action, reward, next_state, done) loss = self.algorithm.train() if not np.isnan(loss): self.body.loss = loss explore_var = self.algorithm.update() return loss, explore_var @lab_api def save(self, ckpt=None): if util.in_eval_lab_mode(): return self.algorithm.save(ckpt=ckpt) @lab_api def close(self): self.save() class Body: def __init__(self, env, spec, aeb=(0, 0, 0)): self.agent = None self.env = env self.spec = spec self.a, self.e, self.b = self.aeb = aeb self.explore_var = np.nan self.entropy_coef = np.nan self.loss = np.nan self.mean_entropy = np.nan self.mean_grad_norm = np.nan self.best_total_reward_ma = -np.inf self.total_reward_ma = np.nan self.train_df = pd.DataFrame(columns=[ 'epi', 't', 'wall_t', 'opt_step', 'frame', 'fps', 'total_reward', 'total_reward_ma', 'loss', 'lr', 'explore_var', 'entropy_coef', 'entropy', 'grad_norm']) if util.in_train_lab_mode() and self.spec['meta']['resume']: train_df_filepath = util.get_session_df_path(self.spec, 'train') if os.path.exists(train_df_filepath): self.train_df = util.read(train_df_filepath) self.env.clock.load(self.train_df) if self.spec['meta']['rigorous_eval']: self.eval_df = self.train_df.copy() else: self.eval_df = self.train_df self.observation_space = self.env.observation_space self.action_space = self.env.action_space self.observable_dim = self.env.observable_dim self.state_dim = self.observable_dim['state'] self.action_dim = self.env.action_dim self.is_discrete = self.env.is_discrete self.action_type = policy_util.get_action_type(self.action_space) self.action_pdtype = ps.get(spec, f'agent.{self.a}.algorithm.action_pdtype') if self.action_pdtype in (None, 'default'): self.action_pdtype = policy_util.ACTION_PDS[self.action_type][0] self.ActionPD = policy_util.get_action_pd_cls(self.action_pdtype, self.action_type) def update(self, state, action, reward, next_state, done): if util.get_lab_mode() == 'dev': self.track_tensorboard(action) def __str__(self): class_attr = util.get_class_attr(self) class_attr.pop('spec') return f'body: {util.to_json(class_attr)}' def calc_df_row(self, env): frame = self.env.clock.frame wall_t = self.env.clock.wall_t fps = 0 if wall_t == 0 else frame / wall_t with warnings.catch_warnings(): warnings.filterwarnings('ignore') total_reward = np.nanmean(env.total_reward) if net_util.to_check_train_step(): grad_norms = net_util.get_grad_norms(self.agent.algorithm) self.mean_grad_norm = np.nan if ps.is_empty(grad_norms) else np.mean(grad_norms) row = pd.Series({ 'epi': self.env.clock.epi, 't': env.clock.t, 'wall_t': wall_t, 'opt_step': self.env.clock.opt_step, 'frame': frame, 'fps': fps, 'total_reward': total_reward, 'total_reward_ma': np.nan, 'loss': self.loss, 'lr': self.get_mean_lr(), 'explore_var': self.explore_var, 'entropy_coef': self.entropy_coef if hasattr(self, 'entropy_coef') else np.nan, 'entropy': self.mean_entropy, 'grad_norm': self.mean_grad_norm, }, dtype=np.float32) assert all(col in self.train_df.columns for col in row.index), f'Mismatched row keys: {row.index} vs df columns {self.train_df.columns}' return row def ckpt(self, env, df_mode): row = self.calc_df_row(env) df = getattr(self, f'{df_mode}_df') df.loc[len(df)] = row df.iloc[-1]['total_reward_ma'] = total_reward_ma = df[-viz.PLOT_MA_WINDOW:]['total_reward'].mean() df.drop_duplicates('frame', inplace=True) self.total_reward_ma = total_reward_ma
MIT License
frawau/aiolifx
aiolifx/aiolifx.py
Device.get_version
python
def get_version(self, callb=None): if self.vendor is None: mypartial = partial(self.resp_set_version) if callb: mycallb = lambda x, y: (mypartial(y), callb(x, y)) else: mycallb = lambda x, y: mypartial(y) response = self.req_with_resp(GetVersion, StateVersion, callb=mycallb) return (self.host_firmware_version, self.host_firmware_build_timestamp)
Convenience method to request the version from the device This method will check whether the value has already been retrieved from the device, if so, it will simply return it. If no, it will request the information from the device and request that callb be executed when a response is received. The default callback will simply cache the value. :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :returns: The cached value :rtype: str
https://github.com/frawau/aiolifx/blob/7ba0a3775e273ac878f286e9685939f8fa19da70/aiolifx/aiolifx.py#L709-L730
import asyncio as aio from .message import BROADCAST_MAC, BROADCAST_SOURCE_ID from .msgtypes import * from .products import * from .unpack import unpack_lifx_message from functools import partial import time, random, datetime, socket, ifaddr LISTEN_IP = "0.0.0.0" UDP_BROADCAST_IP = "255.255.255.255" UDP_BROADCAST_PORT = 56700 DEFAULT_TIMEOUT = 0.5 DEFAULT_ATTEMPTS = 3 DISCOVERY_INTERVAL = 180 DISCOVERY_STEP = 5 def mac_to_ipv6_linklocal(mac, prefix="fe80::"): mac_value = int( mac.translate(str.maketrans(dict([(x, None) for x in [" ", ".", ":", "-"]]))), 16, ) high2 = mac_value >> 32 & 0xFFFF ^ 0x0200 high1 = mac_value >> 24 & 0xFF low1 = mac_value >> 16 & 0xFF low2 = mac_value & 0xFFFF return prefix + ":{:04x}:{:02x}ff:fe{:02x}:{:04x}".format(high2, high1, low1, low2) def nanosec_to_hours(ns): return ns / (1000000000.0 * 60 * 60) class Device(aio.DatagramProtocol): def __init__(self, loop, mac_addr, ip_addr, port, parent=None): self.loop = loop self.mac_addr = mac_addr self.ip_addr = ip_addr self.port = port self.parent = parent self.registered = False self.retry_count = DEFAULT_ATTEMPTS self.timeout = DEFAULT_TIMEOUT self.unregister_timeout = DEFAULT_TIMEOUT self.transport = None self.task = None self.seq = 0 self.message = {} self.source_id = random.randint(0, (2 ** 32) - 1) self.default_callb = None self.label = None self.location = None self.group = None self.power_level = None self.vendor = None self.product = None self.version = None self.host_firmware_version = None self.host_firmware_build_timestamp = None self.wifi_firmware_version = None self.wifi_firmware_build_timestamp = None self.lastmsg = datetime.datetime.now() def seq_next(self): self.seq = (self.seq + 1) % 128 return self.seq def connection_made(self, transport): self.transport = transport self.register() def datagram_received(self, data, addr): self.register() response = unpack_lifx_message(data) self.lastmsg = datetime.datetime.now() if response.seq_num in self.message: response_type, myevent, callb = self.message[response.seq_num] if type(response) == response_type: if response.source_id == self.source_id: if "State" in response.__class__.__name__: setmethod = ( "resp_set_" + response.__class__.__name__.replace("State", "").lower() ) if setmethod in dir(self) and callable( getattr(self, setmethod) ): getattr(self, setmethod)(response) if callb: callb(self, response) myevent.set() del self.message[response.seq_num] elif type(response) == Acknowledgement: pass else: del self.message[response.seq_num] elif self.default_callb: self.default_callb(response) def register(self): if not self.registered: self.registered = True if self.parent: self.parent.register(self) def unregister(self): if self.registered: if ( datetime.datetime.now() - datetime.timedelta(seconds=self.unregister_timeout) > self.lastmsg ): self.registered = False if self.parent: self.parent.unregister(self) def cleanup(self): if self.transport: self.transport.close() self.transport = None if self.task: self.task.cancel() self.task = None async def fire_sending(self, msg, num_repeats): if num_repeats is None: num_repeats = self.retry_count sent_msg_count = 0 sleep_interval = 0.05 while sent_msg_count < num_repeats: if self.transport: self.transport.sendto(msg.packed_message) sent_msg_count += 1 await aio.sleep( sleep_interval ) def fire_and_forget( self, msg_type, payload={}, timeout_secs=None, num_repeats=None ): msg = msg_type( self.mac_addr, self.source_id, seq_num=0, payload=payload, ack_requested=False, response_requested=False, ) xx = self.loop.create_task(self.fire_sending(msg, num_repeats)) return True async def try_sending(self, msg, timeout_secs, max_attempts): if timeout_secs is None: timeout_secs = self.timeout if max_attempts is None: max_attempts = self.retry_count attempts = 0 while attempts < max_attempts: if msg.seq_num not in self.message: return event = aio.Event() self.message[msg.seq_num][1] = event attempts += 1 if self.transport: self.transport.sendto(msg.packed_message) try: myresult = await aio.wait_for(event.wait(), timeout_secs) break except Exception as inst: if attempts >= max_attempts: if msg.seq_num in self.message: callb = self.message[msg.seq_num][2] if callb: callb(self, None) del self.message[msg.seq_num] self.unregister() def req_with_ack( self, msg_type, payload, callb=None, timeout_secs=None, max_attempts=None ): msg = msg_type( self.mac_addr, self.source_id, seq_num=self.seq_next(), payload=payload, ack_requested=True, response_requested=False, ) self.message[msg.seq_num] = [Acknowledgement, None, callb] xx = self.loop.create_task(self.try_sending(msg, timeout_secs, max_attempts)) return True def req_with_resp( self, msg_type, response_type, payload={}, callb=None, timeout_secs=None, max_attempts=None, ): msg = msg_type( self.mac_addr, self.source_id, seq_num=self.seq_next(), payload=payload, ack_requested=False, response_requested=True, ) self.message[msg.seq_num] = [response_type, None, callb] xx = self.loop.create_task(self.try_sending(msg, timeout_secs, max_attempts)) return True def req_with_ack_resp( self, msg_type, response_type, payload, callb=None, timeout_secs=None, max_attempts=None, ): msg = msg_type( self.mac_addr, self.source_id, seq_num=self.seq_next(), payload=payload, ack_requested=True, response_requested=True, ) self.message[msg.seq_num] = [response_type, None, callb] xx = self.loop.create_task(self.try_sending(msg, timeout_secs, max_attempts)) return True def get_label(self, callb=None): if self.label is None: mypartial = partial(self.resp_set_label) if callb: mycallb = lambda x, y: (mypartial(y), callb(x, y)) else: mycallb = lambda x, y: mypartial(y) response = self.req_with_resp(GetLabel, StateLabel, callb=mycallb) return self.label def set_label(self, value, callb=None): if len(value) > 32: value = value[:32] mypartial = partial(self.resp_set_label, label=value) if callb: self.req_with_ack( SetLabel, {"label": value}, lambda x, y: (mypartial(y), callb(x, y)) ) else: self.req_with_ack(SetLabel, {"label": value}, lambda x, y: mypartial(y)) def resp_set_label(self, resp, label=None): if label: self.label = label elif resp: self.label = resp.label.decode().replace("\x00", "") def get_location(self, callb=None): if self.location is None: mypartial = partial(self.resp_set_location) if callb: mycallb = lambda x, y: (mypartial(y), callb(x, y)) else: mycallb = lambda x, y: mypartial(y) response = self.req_with_resp(GetLocation, StateLocation, callb=mycallb) return self.location def resp_set_location(self, resp, location=None): if location: self.location = location elif resp: self.location = resp.label.decode().replace("\x00", "") def get_group(self, callb=None): if self.group is None: mypartial = partial(self.resp_set_group) if callb: mycallb = lambda x, y: (mypartial(y), callb(x, y)) else: mycallb = lambda x, y: mypartial(y) response = self.req_with_resp(GetGroup, StateGroup, callb=callb) return self.group def resp_set_group(self, resp, group=None): if group: self.group = group elif resp: self.group = resp.label.decode().replace("\x00", "") def get_power(self, callb=None): if self.power_level is None: response = self.req_with_resp(GetPower, StatePower, callb=callb) return self.power_level def set_power(self, value, callb=None, rapid=False): on = [True, 1, "on"] off = [False, 0, "off"] mypartial = partial(self.resp_set_power, power_level=value) if callb: mycallb = lambda x, y: (mypartial(y), callb(x, y)) else: mycallb = lambda x, y: mypartial(y) if value in on and not rapid: response = self.req_with_ack(SetPower, {"power_level": 65535}, mycallb) elif value in off and not rapid: response = self.req_with_ack(SetPower, {"power_level": 0}, mycallb) elif value in on and rapid: response = self.fire_and_forget(SetPower, {"power_level": 65535}) self.power_level = 65535 elif value in off and rapid: response = self.fire_and_forget(SetPower, {"power_level": 0}) self.power_level = 0 def resp_set_power(self, resp, power_level=None): if power_level is not None: self.power_level = power_level elif resp: self.power_level = resp.power_level def get_wififirmware(self, callb=None): if self.wifi_firmware_version is None: mypartial = partial(self.resp_set_wififirmware) if callb: mycallb = lambda x, y: (mypartial(y), callb(x, y)) else: mycallb = lambda x, y: mypartial(y) response = self.req_with_resp(GetWifiFirmware, StateWifiFirmware, mycallb) return (self.wifi_firmware_version, self.wifi_firmware_build_timestamp) def resp_set_wififirmware(self, resp): if resp: self.wifi_firmware_version = float( str(str(resp.version >> 16) + "." + str(resp.version & 0xFF)) ) self.wifi_firmware_build_timestamp = resp.build def get_wifiinfo(self, callb=None): response = self.req_with_resp(GetWifiInfo, StateWifiInfo, callb=callb) return None def get_hostfirmware(self, callb=None): if self.host_firmware_version is None: mypartial = partial(self.resp_set_hostfirmware) if callb: mycallb = lambda x, y: (mypartial(y), callb(x, y)) else: mycallb = lambda x, y: mypartial(y) response = self.req_with_resp(GetHostFirmware, StateHostFirmware, mycallb) return (self.host_firmware_version, self.host_firmware_build_timestamp) def resp_set_hostfirmware(self, resp): if resp: self.host_firmware_version = ( str(resp.version >> 16) + "." + str(resp.version & 0xFFFF) ) self.host_firmware_build_timestamp = resp.build def get_hostinfo(self, callb=None): response = self.req_with_resp(GetInfo, StateInfo, callb=callb) return None
MIT License
academysoftwarefoundation/opencue
cuegui/cuegui/DarkPalette.py
GreyF
python
def GreyF(value): c = QtGui.QColor() c.setRgbF(value, value, value) return c
Creates a grey color.
https://github.com/academysoftwarefoundation/opencue/blob/da28ae905b81e7d1125db2073a369fdc0ae9acd4/cuegui/cuegui/DarkPalette.py#L104-L108
from __future__ import absolute_import from __future__ import print_function from __future__ import division import platform from PySide2 import QtGui from PySide2 import QtWidgets import cuegui.Constants def init(): QtGui.qApp.setPalette(DarkPalette()) if platform.system() in ['Darwin', 'Linux']: setDarkStyleSheet() elif platform.system() == 'Windows': QtGui.qApp.setStyle('Fusion') else: QtGui.qApp.setStyle(QtWidgets.QStyleFactory.create(cuegui.Constants.COLOR_THEME)) def setDarkStyleSheet(): QtGui.qApp.setStyleSheet(open(cuegui.Constants.DARK_STYLE_SHEET).read()) def DarkPalette(): p = QtGui.QPalette() c = GreyF(0.175) p.setColor(p.Window, c) p.setColor(p.Button, c) c = GreyF(0.79) p.setColor(p.WindowText, c) p.setColor(p.Text, c) p.setColor(p.ButtonText, c) p.setColor(p.BrightText, c) c = ColorF(0.6, 0.6, 0.8) p.setColor(p.Link, c) c = ColorF(0.8, 0.6, 0.8) p.setColor(p.LinkVisited, c) c = GreyF(0.215) p.setColor(p.Base, c) c = GreyF(0.25) p.setColor(p.AlternateBase, c) c = GreyF(0.0) p.setColor(p.Shadow, c) c = GreyF(0.13) p.setColor(p.Dark, c) c = GreyF(0.21) p.setColor(p.Mid, c) c = GreyF(0.25) p.setColor(p.Midlight, c) c = GreyF(0.40) p.setColor(p.Light, c) c = ColorF(0.31, 0.31, 0.25) p.setColor(p.Highlight, c) c = GreyF(0.46) p.setColor(QtGui.QPalette.Disabled, p.WindowText, c) p.setColor(QtGui.QPalette.Disabled, p.Text, c) p.setColor(QtGui.QPalette.Disabled, p.ButtonText, c) c = GreyF(0.55) p.setColor(QtGui.QPalette.Disabled, p.BrightText, c) return p
Apache License 2.0
ucam-smt/sgnmt
cam/sgnmt/predictors/structure.py
BracketPredictor.predict_next
python
def predict_next(self, words): if self.cur_depth == 0: if self.ends_with_opening: ret = self._no_closing_bracket() ret[utils.EOS_ID] = utils.NEG_INF return ret return {utils.EOS_ID: self.cur_length_scores.get( self.n_terminals, utils.NEG_INF) if self.length_scores else 0.0} ret = {utils.EOS_ID: utils.NEG_INF} if (self.cur_depth >= self.max_depth or self.n_terminals >= self.max_length): ret.update({w: utils.NEG_INF for w in words if (w > self.max_terminal_id and not w in self.closing_bracket_ids)}) if (self.length_scores and self.cur_depth == 1 and self.n_terminals > 0 and not self.n_terminals in self.cur_length_scores): ret.update(self._no_closing_bracket()) return ret
If the maximum depth is reached, exclude all opening brackets. If history is not balanced, exclude EOS. If the current depth is zero, exclude closing brackets. Args: words (list): Set of words to score Returns: dict.
https://github.com/ucam-smt/sgnmt/blob/c663ec7b251552e36b6b4f992f0ac21aad87cb7b/cam/sgnmt/predictors/structure.py#L462-L495
import logging from cam.sgnmt import utils from cam.sgnmt.predictors.core import Predictor, UnboundedVocabularyPredictor OSM_EOP_ID = 4 OSM_SRC_POP_ID = 4 OSM_SET_MARKER_ID = 5 OSM_JUMP_FWD_ID = 6 OSM_JUMP_BWD_ID = 7 OSM_SRC_POP2_ID = 8 OSM_COPY_ID = 8 OSM_SRC_UNPOP_ID = 9 def load_external_lengths(path): lengths = [] with open(path) as f: for line in f: scores = {} for pair in line.strip().split(): if ':' in pair: length, score = pair.split(':') scores[int(length)] = float(score) else: scores[int(pair)] = 0.0 lengths.append(scores) return lengths def update_trg_osm_ids(wmap_path): global OSM_SRC_POP_ID, OSM_SET_MARKER_ID, OSM_JUMP_FWD_ID, OSM_JUMP_BWD_ID, OSM_SRC_POP2_ID, OSM_COPY_ID, OSM_SRC_UNPOP_ID if not wmap_path: return with open(wmap_path) as f: for line in f: word, word_id = line.strip().split() if word == "<SRC_POP>": OSM_SRC_POP_ID = int(word_id) logging.debug("OSM SRC_POP = %d" % OSM_SRC_POP_ID) elif word == "<SET_MARKER>": OSM_SET_MARKER_ID = int(word_id) logging.debug("OSM SET_MARKER = %d" % OSM_SET_MARKER_ID) elif word == "<JUMP_FWD>": OSM_JUMP_FWD_ID = int(word_id) logging.debug("OSM JUMP_FWD = %d" % OSM_JUMP_FWD_ID) elif word == "<JUMP_BWD>": OSM_JUMP_BWD_ID = int(word_id) logging.debug("OSM JUMP_BWD = %d" % OSM_JUMP_BWD_ID) elif word == "<SRC_POP2>": OSM_SRC_POP2_ID = int(word_id) logging.debug("OSM SRC_POP2 = %d" % OSM_SRC_POP2_ID) elif word == "<COPY>": OSM_COPY_ID = int(word_id) logging.debug("OSM COPY = %d" % OSM_COPY_ID) elif word == "<SRC_UNPOP>": OSM_SRC_UNPOP_ID = int(word_id) logging.debug("SRC_UNPOP = %d" % OSM_SRC_UNPOP_ID) def update_src_osm_ids(wmap_path): global OSM_EOP_ID if not wmap_path: return with open(wmap_path) as f: for line in f: word, word_id = line.strip().split() if word == "<EOP>": OSM_EOP_ID = int(word_id) logging.debug("OSM EOP = %d" % OSM_EOP_ID) class OSMPredictor(Predictor): def __init__(self, src_wmap, trg_wmap, use_jumps=True, use_auto_pop=False, use_unpop=False, use_pop2=False, use_src_eop=False, use_copy=False): super(OSMPredictor, self).__init__() update_trg_osm_ids(trg_wmap) self.use_jumps = use_jumps self.use_auto_pop = use_auto_pop self.use_unpop = use_unpop self.use_src_eop = use_src_eop if use_src_eop: update_src_osm_ids(src_wmap) self.pop_ids = set([OSM_SRC_POP_ID]) if use_pop2: self.pop_ids.add(OSM_SRC_POP2_ID) if use_copy: self.pop_ids.add(OSM_COPY_ID) self.illegal_sequences = [] if use_jumps: self.illegal_sequences.extend([ [OSM_SET_MARKER_ID, OSM_SET_MARKER_ID] ]) if use_auto_pop: self.no_auto_pop = set() if use_jumps: self.no_auto_pop.add(OSM_JUMP_FWD_ID) self.no_auto_pop.add(OSM_JUMP_BWD_ID) self.no_auto_pop.add(OSM_SET_MARKER_ID) if use_unpop: self.no_auto_pop.add(OSM_SRC_UNPOP_ID) def _is_pop(self, token): if token in self.pop_ids: return True return self.use_auto_pop and token not in self.no_auto_pop def initialize(self, src_sentence): if self.use_src_eop: self.src_len = src_sentence.count(OSM_EOP_ID) + 1 else: self.src_len = len(src_sentence) self.n_holes = 0 self.head = 0 self.n_pop = 0 self.history = [] def predict_next(self): ret = {} if self.n_pop >= self.src_len: return {utils.EOS_ID: 0.0} else: ret[utils.EOS_ID] = utils.NEG_INF if self.use_unpop and self.n_pop <= 0: ret[OSM_SRC_UNPOP_ID] = utils.NEG_INF if self.use_jumps: if self.head <= 0: ret[OSM_JUMP_BWD_ID] = utils.NEG_INF if self.head >= self.n_holes: ret[OSM_JUMP_FWD_ID] = utils.NEG_INF for seq in self.illegal_sequences: hist = seq[:-1] if self.history[-len(hist):] == hist: ret[seq[-1]] = utils.NEG_INF return ret def get_unk_probability(self, posterior): if self.n_pop >= self.src_len: return utils.NEG_INF return 0.0 def consume(self, word): if not self._is_pop(word): if self.use_unpop and word == OSM_SRC_UNPOP_ID: self.n_pop -= 1 else: self.history.append(word) else: self.n_pop += 1 if self.use_jumps: if word == OSM_SET_MARKER_ID: self.n_holes += 1 self.head += 1 elif word == OSM_JUMP_FWD_ID: self.head += 1 elif word == OSM_JUMP_BWD_ID: self.head -= 1 def get_state(self): return self.n_holes, self.head, self.n_pop def set_state(self, state): self.n_holes, self.head, self.n_pop = state def is_equal(self, state1, state2): return state1 == state2 class ForcedOSMPredictor(Predictor): def __init__(self, trg_wmap, trg_test_file): super(ForcedOSMPredictor, self).__init__() update_trg_osm_ids(trg_wmap) self.trg_sentences = [] with open(trg_test_file) as f: for line in f: self.trg_sentences.append([int(w) for w in line.strip().split()]) def initialize(self, src_sentence): self.compiled = ["X"] self.head = 0 self.cur_trg_sentence = self.trg_sentences[self.current_sen_id] def _is_complete(self): n_terminals = len([s for s in self.compiled if s != "X"]) return n_terminals == len(self.cur_trg_sentence) def _generate_alignments(self, align_stub=[], compiled_start_pos=0, sentence_start_pos=0): for pos in range(compiled_start_pos, len(self.compiled)): if self.compiled[pos] != 'X': word = int(self.compiled[pos]) for sen_pos in range(sentence_start_pos, len(self.cur_trg_sentence)): if self.cur_trg_sentence[sen_pos] == word: self._generate_alignments( align_stub + [(pos, sen_pos)], pos+1, sen_pos+1) return self.alignments.append(align_stub) def _align(self): possible_words = [set() for _ in range(len(self.compiled))] self.alignments = [] self._generate_alignments(align_stub=[]) for alignment in self.alignments: alignment.append((len(self.compiled), len(self.cur_trg_sentence))) prev_compiled_pos = -1 prev_sentence_pos = -1 for compiled_pos, sentence_pos in alignment: section_words = set( self.cur_trg_sentence[prev_sentence_pos+1:sentence_pos]) if section_words: seen_gap = False for section_pos in range(prev_compiled_pos+1, compiled_pos): if self.compiled[section_pos] == "X": if seen_gap: possible_words[section_pos] |= section_words else: possible_words[section_pos].add( self.cur_trg_sentence[prev_sentence_pos + section_pos - prev_compiled_pos]) seen_gap = True prev_compiled_pos = compiled_pos prev_sentence_pos = sentence_pos return possible_words def predict_next(self): ret = {OSM_SRC_POP_ID: 0.0} possible_words = self._align() if possible_words[self.head]: ret[OSM_SET_MARKER_ID] = 0.0 if any(possible_words[:self.head]): ret[OSM_JUMP_BWD_ID] = 0.0 if any(possible_words[self.head+1:]): ret[OSM_JUMP_FWD_ID] = 0.0 if self._is_complete(): ret[utils.EOS_ID] = 0.0 for word in possible_words[self.head]: ret[word] = 0.0 return ret def get_unk_probability(self, posterior): return utils.NEG_INF def _jump_op(self, step): self.head += step while self.compiled[self.head] != "X": self.head += step def _insert_op(self, op): self.compiled = self.compiled[:self.head] + [op] + self.compiled[self.head:] self.head += 1 def consume(self, word): if word == OSM_SET_MARKER_ID: self._insert_op("X") elif word == OSM_JUMP_FWD_ID: self._jump_op(1) elif word == OSM_JUMP_BWD_ID: self._jump_op(-1) elif word != OSM_SRC_POP_ID: self._insert_op(str(word)) def get_state(self): return self.compiled, self.head def set_state(self, state): self.compiled, self.head = state def is_equal(self, state1, state2): return state1 == state2 class BracketPredictor(UnboundedVocabularyPredictor): def __init__(self, max_terminal_id, closing_bracket_id, max_depth=-1, extlength_path=""): super(BracketPredictor, self).__init__() self.max_terminal_id = max_terminal_id try: self.closing_bracket_ids = utils.split_comma(closing_bracket_id, int) except: self.closing_bracket_ids = [int(closing_bracket_id)] self.max_depth = max_depth if max_depth >= 0 else 1000000 if extlength_path: self.length_scores = load_external_lengths(extlength_path) else: self.length_scores = None self.max_length = 1000000 def initialize(self, src_sentence): self.cur_depth = 0 self.ends_with_opening = True self.n_terminals = 0 if self.length_scores: self.cur_length_scores = self.length_scores[self.current_sen_id] self.max_length = max(self.cur_length_scores) def _no_closing_bracket(self): return {i: utils.NEG_INF for i in self.closing_bracket_ids}
Apache License 2.0
iexcloud/pyex
pyEX/studies/technicals/math.py
sinh
python
def sinh(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.SINH(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "sinh": x})
This will return a dataframe of Vector Trigonometric Sinh for the given symbol across the given timeframe Args: client (pyEX.Client); Client symbol (string); Ticker timeframe (string); timeframe to use, for pyEX.chart col (string); column to use to calculate Returns: DataFrame: result
https://github.com/iexcloud/pyex/blob/48223a046d120703e8cc8f6c57f8a1450ee3f835/pyEX/studies/technicals/math.py#L221-L237
import pandas as pd import talib as t def acos(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.ACOS(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "acos": x}) def asin(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.ASIN(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "asin": x}) def atan(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.ATAN(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "atan": x}) def ceil(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.CEIL(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "ceil": x}) def cos(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.COS(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "cos": x}) def cosh(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.COSH(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "cosh": x}) def exp(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.EXP(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "exp": x}) def floor(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.FLOOR(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "floor": x}) def ln(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.LN(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "ln": x}) def log10(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.LOG10(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "log10": x}) def sin(client, symbol, timeframe="6m", col="close"): df = client.chartDF(symbol, timeframe) x = t.SIN(df[col].values.astype(float)) return pd.DataFrame({col: df[col].values, "sin": x})
Apache License 2.0
robotichead/nearbeach
NearBeach/views/request_for_change_views.py
new_request_for_change
python
def new_request_for_change(request, *args, **kwargs): t = loader.get_template('NearBeach/request_for_change/new_request_for_change.html') group_results = group.objects.filter( is_deleted=False, ) user_group_results = user_group.objects.filter( is_deleted=False, username=request.user, ).values( 'group_id', 'group__group_name', ).distinct() user_results = User.objects.filter( is_active=True, ) c = { 'group_results': serializers.serialize('json', group_results), 'nearbeach_title': 'New RFC', 'user_group_results': json.dumps(list(user_group_results), cls=DjangoJSONEncoder), 'user_results': serializers.serialize('json', user_results), } return HttpResponse(t.render(c, request))
:param request: :return:
https://github.com/robotichead/nearbeach/blob/4f5cd541cc6093c6ff047322500e326157d1f30c/NearBeach/views/request_for_change_views.py#L50-L86
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect, Http404 from django.urls import reverse from django.template import loader from django.core.serializers.json import DjangoJSONEncoder from django.contrib.auth.decorators import login_required from django.views.decorators.http import require_http_methods from django.core import serializers from NearBeach.decorators.check_user_permissions import check_user_permissions, check_rfc_permissions from NearBeach.models import request_for_change, User, user_group, object_assignment, group, change_task, request_for_change_group_approval, RFC_STATUS from NearBeach.forms import NewRequestForChangeForm, RfcModuleForm, RfcInformationSaveForm, NewChangeTaskForm, UpdateRFCStatus import json def get_rfc_context(rfc_id): rfc_results = request_for_change.objects.get(rfc_id=rfc_id) rfc_change_lead = User.objects.get(id=rfc_results.rfc_lead.id) user_list = User.objects.filter( is_active=True, id__in=user_group.objects.filter( is_deleted=False, group_id__in=object_assignment.objects.filter( is_deleted=False, request_for_change_id=rfc_id, ).values('group_id') ).values('username_id') ) c = { 'nearbeach_title': 'RFC %s' % rfc_id, 'rfc_id': rfc_id, 'rfc_results': serializers.serialize('json', [rfc_results]), 'rfc_change_lead': serializers.serialize('json', [rfc_change_lead]), 'user_list': serializers.serialize('json', user_list), } return c @login_required(login_url='login', redirect_field_name="") @check_rfc_permissions(min_permission_level=3)
MIT License
chaffelson/whoville
whoville/cloudbreak/apis/v1blueprints_api.py
V1blueprintsApi.delete_private_blueprint_with_http_info
python
def delete_private_blueprint_with_http_info(self, name, **kwargs): all_params = ['name'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_private_blueprint" % key ) params[key] = val del params['kwargs'] if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_private_blueprint`") collection_formats = {} path_params = {} if 'name' in params: path_params['name'] = params['name'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client. select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json']) auth_settings = ['tokenAuth'] return self.api_client.call_api('/v1/blueprints/user/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
delete private blueprint by name Ambari Blueprints are a declarative definition of a Hadoop cluster. With a Blueprint, you specify a stack, the component layout and the configurations to materialize a Hadoop cluster instance. Hostgroups defined in blueprints can be associated to different templates, thus you can spin up a highly available cluster running on different instance types. This will give you the option to group your Hadoop services based on resource needs (e.g. high I/O, CPU or memory) and create an infrastructure which fits your workload best. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_private_blueprint_with_http_info(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: None If the method is called asynchronously, returns the request thread.
https://github.com/chaffelson/whoville/blob/f71fda629c9fd50d0a482120165ea5abcc754522/whoville/cloudbreak/apis/v1blueprints_api.py#L175-L253
from __future__ import absolute_import import sys import os import re from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class V1blueprintsApi(object): def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def delete_blueprint(self, id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_blueprint_with_http_info(id, **kwargs) else: (data) = self.delete_blueprint_with_http_info(id, **kwargs) return data def delete_blueprint_with_http_info(self, id, **kwargs): all_params = ['id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_blueprint" % key ) params[key] = val del params['kwargs'] if ('id' not in params) or (params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `delete_blueprint`") collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client. select_header_accept(['application/json']) header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json']) auth_settings = ['tokenAuth'] return self.api_client.call_api('/v1/blueprints/{id}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_private_blueprint(self, name, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_private_blueprint_with_http_info(name, **kwargs) else: (data) = self.delete_private_blueprint_with_http_info(name, **kwargs) return data
Apache License 2.0
plotly/dash-technical-charting
quantmod/utils.py
update
python
def update(dict1, dict2): for key, value in dict2.items(): if isinstance(value, collections.Mapping): temp = update(dict1.get(key, {}), value) dict1[key] = temp elif isinstance(dict1, collections.Mapping): dict1[key] = dict2[key] else: dict1 = {key: dict2[key]} return dict1
Recursivel update dict-like objects and returns it. Need return to work properly even though dict1 is updated inplace. Parameters ---------- dict1 : dict Dictionary that contains the values to update. dict2 : dict Dictionary to be updated.
https://github.com/plotly/dash-technical-charting/blob/72af2c0c19bc2c56231061bff2fbd118b71a9819/quantmod/utils.py#L15-L37
from __future__ import absolute_import import collections import json import os
MIT License
reliaqualassociates/ramstk
src/ramstk/models/programdb/stakeholder/table.py
RAMSTKStakeholderTable.do_calculate_stakeholder
python
def do_calculate_stakeholder(self, node_id: int) -> None: self._do_calculate_improvement(node_id) pub.sendMessage( "succeed_calculate_stakeholder2", node_id=node_id, )
Calculate improvement factor and weight for currently selected item. :param node_id: the ID of the record to calculate. :return: None :rtype: None
https://github.com/reliaqualassociates/ramstk/blob/ffec5a107424914cf0026c6dfe26369c221f79f9/src/ramstk/models/programdb/stakeholder/table.py#L80-L92
from typing import Any, Dict, Type from pubsub import pub from ramstk.analyses import improvementfactor from ramstk.models import RAMSTKBaseTable, RAMSTKStakeholderRecord class RAMSTKStakeholderTable(RAMSTKBaseTable): _db_id_colname = "fld_stakeholder_id" _db_tablename = "ramstk_stakeholder" _select_msg = "selected_revision" _tag = "stakeholder" def __init__(self, **kwargs: Dict[Any, Any]) -> None: super().__init__(**kwargs) self._lst_id_columns = [ "revision_id", "stakeholder_id", ] self._record: Type[RAMSTKStakeholderRecord] = RAMSTKStakeholderRecord self.pkey = "stakeholder_id" pub.subscribe(self.do_calculate_stakeholder, "request_calculate_stakeholder") def do_get_new_record( self, attributes: Dict[str, Any] ) -> object: _new_record = self._record() _new_record.revision_id = attributes["revision_id"] _new_record.stakeholder_id = self.last_id + 1 _new_record.description = "New Stakeholder Input" return _new_record
BSD 3-Clause New or Revised License
chplushsieh/carvana-challenge
util/exp.py
load_checkpoint
python
def load_checkpoint(exp_name, ckpt_path): if ckpt_path is not None and os.path.isfile(ckpt_path): print("=> loading checkpoint '{}'".format(ckpt_path)) checkpoint = torch.load(ckpt_path) assert exp_name == checkpoint['exp_name'] model = get_network(exp_name) model.load_state_dict(checkpoint['state_dict']) optimizer = get_optimizer(model, exp_name) optimizer.load_state_dict(checkpoint['optimizer']) saved_epoch = checkpoint['epoch'] print("=> loaded checkpoint '{}' (epoch {})" .format(ckpt_path, saved_epoch)) else: model = get_network(exp_name) optimizer = get_optimizer(model, exp_name) saved_epoch = 0 criterion = get_criterion(exp_name) return model, optimizer, criterion, saved_epoch
load previously saved model
https://github.com/chplushsieh/carvana-challenge/blob/cba536657714df7c1c33150b92e3e152195b68db/util/exp.py#L120-L148
import torch import os from datetime import datetime import util.const as const import config import model.unet as unet import model.loss as loss def create_dir_if_not_exist(dir): if not os.path.exists(dir): os.makedirs(dir) return dir def get_network(exp_name): model_name = exp_name.split('_')[0] model_type = getattr(unet, model_name) model = model_type() return model def get_optimizer(model, exp_name): cfg = config.load_config_file(exp_name) optimizer_name = cfg['optimizer'] optimizer_method = getattr(torch.optim, optimizer_name) optimizer = optimizer_method( model.parameters(), lr=cfg['learning_rate'], momentum=cfg['momentum'], weight_decay=cfg['weight_decay'] ) return optimizer def get_criterion(exp_name): cfg = config.load_config_file(exp_name) criterion_name = cfg['criterion'] loss_method = getattr(loss, criterion_name) criterion = loss_method() return criterion def save_checkpoint(exp_name, epoch, model_state_dict, optimizer_state_dict): state = { 'exp_name': exp_name, 'epoch': epoch, 'state_dict': model_state_dict, 'optimizer' : optimizer_state_dict, } filename = str(epoch) + '.pth.tar' save_path = os.path.join(const.OUTPUT_DIR, exp_name, filename) torch.save(state, save_path) return def get_latest_ckpt(save_dir): ckpts = os.listdir(save_dir) ckpt_names = [ckpt.split('.')[0] for ckpt in ckpts if ckpt.endswith('.pth.tar')] if not ckpt_names: print("No checkpoints found. It's a new experiment. ") return None print("All checkpoints:") print(ckpt_names) ckpt_epochs = [ int(ckpt_name) for ckpt_name in ckpt_names] latest_epoch = max(ckpt_epochs) latest_path = os.path.join(save_dir, str(latest_epoch) + '.pth.tar') return latest_path def load_exp(exp_name): save_dir = os.path.join(const.OUTPUT_DIR, exp_name) if not os.path.isdir(save_dir): os.makedirs(save_dir) ckpt_path = get_latest_ckpt(save_dir) model, optimizer, criterion, saved_epoch = load_checkpoint(exp_name, ckpt_path) start_epoch = saved_epoch + 1 return model, optimizer, criterion, start_epoch
MIT License
gabrielstanovsky/props
props/bottle.py
Response.set_cookie
python
def set_cookie(self, key, value, secret=None, **kargs): if secret: value = touni(cookie_encode((key, value), secret)) elif not isinstance(value, basestring): raise TypeError('Secret missing for non-string Cookie.') self.COOKIES[key] = value for k, v in kargs.iteritems(): self.COOKIES[key][k.replace('_', '-')] = v
Add a cookie. If the `secret` parameter is set, this creates a `Secure Cookie` (described below). :param key: the name of the cookie. :param value: the value of the cookie. :param secret: required for secure cookies. (default: None) :param max_age: maximum age in seconds. (default: None) :param expires: a datetime object or UNIX timestamp. (defaut: None) :param domain: the domain that is allowed to read the cookie. (default: current domain) :param path: limits the cookie to a given path (default: /) If neither `expires` nor `max_age` are set (default), the cookie lasts only as long as the browser is not closed. Secure cookies may store any pickle-able object and are cryptographically signed to prevent manipulation. Keep in mind that cookies are limited to 4kb in most browsers. Warning: Secure cookies are not encrypted (the client can still see the content) and not copy-protected (the client can restore an old cookie). The main intention is to make pickling and unpickling save, not to store secret information at client side.
https://github.com/gabrielstanovsky/props/blob/c6392016214ee582de4eaf364e518078f9bd182b/props/bottle.py#L973-L1005
from __future__ import with_statement __author__ = 'Marcel Hellkamp' __version__ = '0.9.dev' __license__ = 'MIT' import base64 import cgi import email.utils import functools import hmac import httplib import itertools import mimetypes import os import re import subprocess import sys import tempfile import thread import threading import time import warnings from Cookie import SimpleCookie from tempfile import TemporaryFile from traceback import format_exc from urllib import quote as urlquote from urlparse import urlunsplit, urljoin try: from collections import MutableMapping as DictMixin except ImportError: from UserDict import DictMixin try: from urlparse import parse_qs except ImportError: from cgi import parse_qs try: import cPickle as pickle except ImportError: import pickle try: from json import dumps as json_dumps except ImportError: try: from simplejson import dumps as json_dumps except ImportError: try: from django.utils.simplejson import dumps as json_dumps except ImportError: json_dumps = None if sys.version_info >= (3,0,0): from io import BytesIO from io import TextIOWrapper class NCTextIOWrapper(TextIOWrapper): def close(self): pass def touni(x, enc='utf8'): return str(x, encoding=enc) if isinstance(x, bytes) else str(x) else: from StringIO import StringIO as BytesIO bytes = str NCTextIOWrapper = None def touni(x, enc='utf8'): return x if isinstance(x, unicode) else unicode(str(x), encoding=enc) def tob(data, enc='utf8'): return data.encode(enc) if isinstance(data, unicode) else bytes(data) if sys.version_info >= (3,0,0): tonat = touni else: tonat = tob tonat.__doc__ = """ Convert anything to native strings """ def depr(message, critical=False): if critical: raise DeprecationWarning(message) warnings.warn(message, DeprecationWarning, stacklevel=3) def makelist(data): if isinstance(data, (tuple, list, set, dict)): return list(data) elif data: return [data] else: return [] class DictProperty(object): def __init__(self, attr, key=None, read_only=False): self.attr, self.key, self.read_only = attr, key, read_only def __call__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter, self.key = func, self.key or func.__name__ return self def __get__(self, obj, cls): if not obj: return self key, storage = self.key, getattr(obj, self.attr) if key not in storage: storage[key] = self.getter(obj) return storage[key] def __set__(self, obj, value): if self.read_only: raise ApplicationError("Read-Only property.") getattr(obj, self.attr)[self.key] = value def __delete__(self, obj): if self.read_only: raise ApplicationError("Read-Only property.") del getattr(obj, self.attr)[self.key] def cached_property(func): return DictProperty('__dict__')(func) class lazy_attribute(object): def __init__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter = func def __get__(self, obj, cls): value = self.getter(cls) setattr(cls, self.__name__, value) return value class BottleException(Exception): pass class HTTPResponse(BottleException): def __init__(self, output='', status=200, header=None): super(BottleException, self).__init__("HTTP Response %d" % status) self.status = int(status) self.output = output self.headers = HeaderDict(header) if header else None def apply(self, response): if self.headers: for key, value in self.headers.iterallitems(): response.headers[key] = value response.status = self.status class HTTPError(HTTPResponse): def __init__(self, code=500, output='Unknown Error', exception=None, traceback=None, header=None): super(HTTPError, self).__init__(output, code, header) self.exception = exception self.traceback = traceback def __repr__(self): return template(ERROR_PAGE_TEMPLATE, e=self) class RouteError(BottleException): class RouteSyntaxError(RouteError): class RouteBuildError(RouteError): class Router(object): default = '[^/]+' @lazy_attribute def syntax(cls): return re.compile(r'(?<!\\):([a-zA-Z_][a-zA-Z_0-9]*)?(?:#(.*?)#)?') def __init__(self): self.routes = {} self.rules = [] self.named = {} self.static = {} self.dynamic = [] def add(self, rule, method, target, name=None): if rule in self.routes: self.routes[rule][method.upper()] = target else: self.routes[rule] = {method.upper(): target} self.rules.append(rule) if self.static or self.dynamic: self.static, self.dynamic = {}, {} if name: self.named[name] = (rule, None) def delete(self, rule, method=None): if rule not in self.routes and rule in self.named: rule = self.named[rule][0] if rule in self.routes: if method: del self.routes[rule][method] else: self.routes[rule].clear() if not self.routes[rule]: del self.routes[rule] self.rules.remove(rule) def build(self, _name, *anon, **args): if _name not in self.named: raise RouteBuildError("No route with that name.", _name) rule, pairs = self.named[_name] if not pairs: token = self.syntax.split(rule) parts = [p.replace('\\:',':') for p in token[::3]] names = token[1::3] if len(parts) > len(names): names.append(None) pairs = zip(parts, names) self.named[_name] = (rule, pairs) try: anon = list(anon) url = [s if k is None else s+str(args.pop(k)) if k else s+str(anon.pop()) for s, k in pairs] except IndexError: msg = "Not enough arguments to fill out anonymous wildcards." raise RouteBuildError(msg) except KeyError, e: raise RouteBuildError(*e.args) if args: url += ['?', urlencode(args.iteritems())] return ''.join(url) def match(self, environ): targets, urlargs = self._match_path(environ) if not targets: raise HTTPError(404, "Not found: " + environ['PATH_INFO']) environ['router.url_args'] = urlargs method = environ['REQUEST_METHOD'].upper() if method in targets: return targets[method], urlargs if method == 'HEAD' and 'GET' in targets: return targets['GET'], urlargs if 'ANY' in targets: return targets['ANY'], urlargs allowed = [verb for verb in targets if verb != 'ANY'] if 'GET' in allowed and 'HEAD' not in allowed: allowed.append('HEAD') raise HTTPError(405, "Method not allowed.", header=[('Allow',",".join(allowed))]) def _match_path(self, environ): path = environ['PATH_INFO'] or '/' match = self.static.get(path) if match: return match, {} for combined, rules in self.dynamic: match = combined.match(path) if not match: continue gpat, match = rules[match.lastindex - 1] return match, gpat.match(path).groupdict() if gpat else {} if self.static or self.dynamic or not self.routes: return None, {} if not environ.get('wsgi.run_once'): self._compile() return self._match_path(environ) epath = path.replace(':','\\:') match = self.routes.get(epath) if match: return match, {} for rule in self.rules: if rule.count(':') < rule.count('\\:'): continue match = self._compile_pattern(rule).match(path) if match: return self.routes[rule], match.groupdict() return None, {} def _compile(self): self.static = {} self.dynamic = [] def fpat_sub(m): return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:' for rule in self.rules: target = self.routes[rule] if not self.syntax.search(rule): self.static[rule.replace('\\:',':')] = target continue gpat = self._compile_pattern(rule) fpat = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, gpat.pattern) gpat = gpat if gpat.groupindex else None try: combined = '%s|(%s)' % (self.dynamic[-1][0].pattern, fpat) self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1]) self.dynamic[-1][1].append((gpat, target)) except (AssertionError, IndexError), e: self.dynamic.append((re.compile('(^%s$)'%fpat), [(gpat, target)])) except re.error, e: raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e)) def _compile_pattern(self, rule): out = '' for i, part in enumerate(self.syntax.split(rule)): if i%3 == 0: out += re.escape(part.replace('\\:',':')) elif i%3 == 1: out += '(?P<%s>' % part if part else '(?:' else: out += '%s)' % (part or '[^/]+') return re.compile('^%s$'%out) class Bottle(object): def __init__(self, catchall=True, autojson=True, config=None): self.routes = [] self.callbacks = {} self.router = Router() self.mounts = {} self.error_handler = {} self.catchall = catchall self.config = config or {} self.serve = True self.castfilter = [] if autojson and json_dumps: self.add_filter(dict, dict2json) self.hooks = {'before_request': [], 'after_request': []} def optimize(self, *a, **ka): depr("Bottle.optimize() is obsolete.") def mount(self, app, script_path): if not isinstance(app, Bottle): raise TypeError('Only Bottle instances are supported for now.') script_path = '/'.join(filter(None, script_path.split('/'))) path_depth = script_path.count('/') + 1 if not script_path: raise TypeError('Empty script_path. Perhaps you want a merge()?') for other in self.mounts: if other.startswith(script_path): raise TypeError('Conflict with existing mount: %s' % other) @self.route('/%s/:#.*#' % script_path, method="ANY") def mountpoint(): request.path_shift(path_depth) return app.handle(request.environ) self.mounts[script_path] = app def add_filter(self, ftype, func): if not isinstance(ftype, type): raise TypeError("Expected type object, got %s" % type(ftype)) self.castfilter = [(t, f) for (t, f) in self.castfilter if t != ftype] self.castfilter.append((ftype, func)) self.castfilter.sort() def match_url(self, path, method='GET'): return self.match({'PATH_INFO': path, 'REQUEST_METHOD': method}) def match(self, environ): target, args = self.router.match(environ) try: return self.callbacks[target], args except KeyError: callback, decorators = self.routes[target] wrapped = callback for wrapper in decorators[::-1]: wrapped = wrapper(wrapped) functools.update_wrapper(wrapped, callback) self.callbacks[target] = wrapped return wrapped, args def get_url(self, routename, **kargs): scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/' location = self.router.build(routename, **kargs).lstrip('/') return urljoin(urljoin('/', scriptname), location) def route(self, path=None, method='GET', no_hooks=False, decorate=None, template=None, template_opts={}, callback=None, name=None, static=False): if callable(path): path, callback = None, path decorators = makelist(decorate) if template: decorators.insert(0, view(template, **template_opts)) if not no_hooks: decorators.append(self._add_hook_wrapper) def wrapper(func): for rule in makelist(path) or yieldroutes(func): for verb in makelist(method): if static: rule = rule.replace(':','\\:') depr("Use backslash to escape ':' in routes.") self.router.add(rule, verb, len(self.routes), name=name) self.routes.append((func, decorators)) return func return wrapper(callback) if callback else wrapper def _add_hook_wrapper(self, func): @functools.wraps(func) def wrapper(*a, **ka): for hook in self.hooks['before_request']: hook() response.output = func(*a, **ka) for hook in self.hooks['after_request']: hook() return response.output return wrapper def get(self, path=None, method='GET', **kargs): return self.route(path, method, **kargs) def post(self, path=None, method='POST', **kargs): return self.route(path, method, **kargs) def put(self, path=None, method='PUT', **kargs): return self.route(path, method, **kargs) def delete(self, path=None, method='DELETE', **kargs): return self.route(path, method, **kargs) def error(self, code=500): def wrapper(handler): self.error_handler[int(code)] = handler return handler return wrapper def hook(self, name): def wrapper(func): self.add_hook(name, func) return func return wrapper def add_hook(self, name, func): if name not in self.hooks: raise ValueError("Unknown hook name %s" % name) if name in ('after_request'): self.hooks[name].insert(0, func) else: self.hooks[name].append(func) def remove_hook(self, name, func): if name not in self.hooks: raise ValueError("Unknown hook name %s" % name) self.hooks[name].remove(func) def handle(self, environ): if not self.serve: return HTTPError(503, "Server stopped") try: handler, args = self.match(environ) return handler(**args) except HTTPResponse, e: return e except Exception, e: if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError)) or not self.catchall: raise return HTTPError(500, 'Unhandled exception', e, format_exc(10)) def _cast(self, out, request, response, peek=None): for testtype, filterfunc in self.castfilter: if isinstance(out, testtype): return self._cast(filterfunc(out), request, response) if not out: response.headers['Content-Length'] = 0 return [] if isinstance(out, (tuple, list)) and isinstance(out[0], (bytes, unicode)): out = out[0][0:0].join(out) if isinstance(out, unicode): out = out.encode(response.charset) if isinstance(out, bytes): response.headers['Content-Length'] = str(len(out)) return [out] if isinstance(out, HTTPError): out.apply(response) return self._cast(self.error_handler.get(out.status, repr)(out), request, response) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.output, request, response) if hasattr(out, 'read'): if 'wsgi.file_wrapper' in request.environ: return request.environ['wsgi.file_wrapper'](out) elif hasattr(out, 'close') or not hasattr(out, '__iter__'): return WSGIFileWrapper(out) try: out = iter(out) first = out.next() while not first: first = out.next() except StopIteration: return self._cast('', request, response) except HTTPResponse, e: first = e except Exception, e: first = HTTPError(500, 'Unhandled exception', e, format_exc(10)) if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError)) or not self.catchall: raise if isinstance(first, HTTPResponse): return self._cast(first, request, response) if isinstance(first, bytes): return itertools.chain([first], out) if isinstance(first, unicode): return itertools.imap(lambda x: x.encode(response.charset), itertools.chain([first], out)) return self._cast(HTTPError(500, 'Unsupported response type: %s' % type(first)), request, response) def wsgi(self, environ, start_response): try: environ['bottle.app'] = self request.bind(environ) response.bind() out = self.handle(environ) out = self._cast(out, request, response) if response.status in (100, 101, 204, 304) or request.method == 'HEAD': if hasattr(out, 'close'): out.close() out = [] status = '%d %s' % (response.status, HTTP_CODES[response.status]) start_response(status, response.headerlist) return out except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception, e: if not self.catchall: raise err = '<h1>Critical error while processing request: %s</h1>' % environ.get('PATH_INFO', '/') if DEBUG: err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e) err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10) environ['wsgi.errors'].write(err) start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')]) return [tob(err)] def __call__(self, environ, start_response): return self.wsgi(environ, start_response) class Request(threading.local, DictMixin): def __init__(self, environ=None): self.bind(environ or {},) def bind(self, environ): self.environ = environ self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/') self.method = environ.get('REQUEST_METHOD', 'GET').upper() @property def _environ(self): depr("Request._environ renamed to Request.environ") return self.environ def copy(self): return Request(self.environ.copy()) def path_shift(self, shift=1): script_name = self.environ.get('SCRIPT_NAME','/') self['SCRIPT_NAME'], self.path = path_shift(script_name, self.path, shift) self['PATH_INFO'] = self.path def __getitem__(self, key): return self.environ[key] def __delitem__(self, key): self[key] = ""; del(self.environ[key]) def __iter__(self): return iter(self.environ) def __len__(self): return len(self.environ) def keys(self): return self.environ.keys() def __setitem__(self, key, value): self.environ[key] = value todelete = [] if key in ('PATH_INFO','REQUEST_METHOD'): self.bind(self.environ) elif key == 'wsgi.input': todelete = ('body','forms','files','params') elif key == 'QUERY_STRING': todelete = ('get','params') elif key.startswith('HTTP_'): todelete = ('headers', 'cookies') for key in todelete: if 'bottle.' + key in self.environ: del self.environ['bottle.' + key] @property def query_string(self): return self.environ.get('QUERY_STRING', '') @property def fullpath(self): return self.environ.get('SCRIPT_NAME', '').rstrip('/') + self.path @property def url(self): scheme = self.environ.get('wsgi.url_scheme', 'http') host = self.environ.get('HTTP_X_FORWARDED_HOST') host = host or self.environ.get('HTTP_HOST', None) if not host: host = self.environ.get('SERVER_NAME') port = self.environ.get('SERVER_PORT', '80') if (scheme, port) not in (('https','443'), ('http','80')): host += ':' + port parts = (scheme, host, urlquote(self.fullpath), self.query_string, '') return urlunsplit(parts) @property def content_length(self): return int(self.environ.get('CONTENT_LENGTH', '') or -1) @property def header(self): depr("The Request.header property was renamed to Request.headers") return self.headers @DictProperty('environ', 'bottle.headers', read_only=True) def headers(self): return WSGIHeaderDict(self.environ) @DictProperty('environ', 'bottle.get', read_only=True) def GET(self): data = parse_qs(self.query_string, keep_blank_values=True) get = self.environ['bottle.get'] = MultiDict() for key, values in data.iteritems(): for value in values: get[key] = value return get @DictProperty('environ', 'bottle.post', read_only=True) def POST(self): post = MultiDict() safe_env = {'QUERY_STRING':''} for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): if key in self.environ: safe_env[key] = self.environ[key] if NCTextIOWrapper: fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n') else: fb = self.body data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True) for item in data.list or []: post[item.name] = item if item.filename else item.value return post @DictProperty('environ', 'bottle.forms', read_only=True) def forms(self): forms = MultiDict() for name, item in self.POST.iterallitems(): if not hasattr(item, 'filename'): forms[name] = item return forms @DictProperty('environ', 'bottle.files', read_only=True) def files(self): files = MultiDict() for name, item in self.POST.iterallitems(): if hasattr(item, 'filename'): files[name] = item return files @DictProperty('environ', 'bottle.params', read_only=True) def params(self): params = MultiDict(self.GET) for key, value in self.forms.iterallitems(): params[key] = value return params @DictProperty('environ', 'bottle.body', read_only=True) def _body(self): maxread = max(0, self.content_length) stream = self.environ['wsgi.input'] body = BytesIO() if maxread < MEMFILE_MAX else TemporaryFile(mode='w+b') while maxread > 0: part = stream.read(min(maxread, MEMFILE_MAX)) if not part: break body.write(part) maxread -= len(part) self.environ['wsgi.input'] = body body.seek(0) return body @property def body(self): self._body.seek(0) return self._body @property def auth(self): return parse_auth(self.headers.get('Authorization','')) @DictProperty('environ', 'bottle.cookies', read_only=True) def COOKIES(self): raw_dict = SimpleCookie(self.headers.get('Cookie','')) cookies = {} for cookie in raw_dict.itervalues(): cookies[cookie.key] = cookie.value return cookies def get_cookie(self, key, secret=None): value = self.COOKIES.get(key) if secret and value: dec = cookie_decode(value, secret) return dec[1] if dec and dec[0] == key else None return value or None @property def is_ajax(self): return self.header.get('X-Requested-With') == 'XMLHttpRequest' class Response(threading.local): def __init__(self): self.bind() def bind(self): self._COOKIES = None self.status = 200 self.headers = HeaderDict() self.content_type = 'text/html; charset=UTF-8' @property def header(self): depr("Response.header renamed to Response.headers") return self.headers def copy(self): copy = Response() copy.status = self.status copy.headers = self.headers.copy() copy.content_type = self.content_type return copy def wsgiheader(self): for c in self.COOKIES.values(): if c.OutputString() not in self.headers.getall('Set-Cookie'): self.headers.append('Set-Cookie', c.OutputString()) if self.status in (204, 304) and 'content-type' in self.headers: del self.headers['content-type'] if self.status == 304: for h in ('allow', 'content-encoding', 'content-language', 'content-length', 'content-md5', 'content-range', 'content-type', 'last-modified'): if h in self.headers: del self.headers[h] return list(self.headers.iterallitems()) headerlist = property(wsgiheader) @property def charset(self): if 'charset=' in self.content_type: return self.content_type.split('charset=')[-1].split(';')[0].strip() return 'UTF-8' @property def COOKIES(self): if not self._COOKIES: self._COOKIES = SimpleCookie() return self._COOKIES
MIT License
stefan-korner/spacepylibrary
EGSE/EDENPDU.py
TC_Escoe.initAttributes
python
def initAttributes(self): TCscoe.initAttributes(self) self.pduType = PDU_TYPE_TC_E
hook for initializing attributes, delegates to parent class
https://github.com/stefan-korner/spacepylibrary/blob/6a9f0827005c03cbc59557def78bbc035a97bbea/EGSE/EDENPDU.py#L303-L306
from UTIL.DU import BITS, BYTES, UNSIGNED, STRING, TIME, BinaryUnit PDU_HEADER_BYTE_SIZE = 42 PDU_HEADER_ATTRIBUTES = { "pduType": ( 0, 4, STRING), "subType": ( 4, 10, STRING), "field1": (14, 16, STRING), "field2": (30, 4, UNSIGNED), "field3": (34, 4, UNSIGNED), "dataFieldLength": (38, 4, UNSIGNED)} CCSDS_PDU_SEC_HEADER_BYTE_SIZE = 36 TC_SPACE_SEC_HEADER_BYTE_SIZE = CCSDS_PDU_SEC_HEADER_BYTE_SIZE TC_SPACE_SEC_HEADER_ATTRIBUTES = { "structureType": ( 0, 1, UNSIGNED), "channel": ( 1, 1, UNSIGNED), "spare": ( 2, 1, BYTES), "telecommandType": ( 3, 1, UNSIGNED), "tcIdentificationWord": ( 4, 4, UNSIGNED), "telecommandOrigin": ( 8, 1, UNSIGNED), "telecommandProtMode": ( 9, 1, UNSIGNED), "time": (10, 22, BYTES), "mapId": (32, 1, UNSIGNED), "ancillaryInformation": (33, 1, UNSIGNED), "telecommandEchoStatus": (34, 1, UNSIGNED), "sequenceFlags": (35, 1, UNSIGNED)} TC_SCOE_SEC_HEADER_BYTE_SIZE = CCSDS_PDU_SEC_HEADER_BYTE_SIZE TC_SCOE_SEC_HEADER_ATTRIBUTES = { "structureType": ( 0, 1, UNSIGNED), "spare1": ( 1, 3, BYTES), "tcIdentificationWord": ( 4, 4, UNSIGNED), "telecommandOrigin": ( 8, 1, UNSIGNED), "spare2": ( 9, 1, UNSIGNED), "time": (10, 22, BYTES), "spare3": (32, 2, BYTES), "telecommandEchoStatus": (34, 1, UNSIGNED), "spare4": (35, 1, BYTES)} TM_SPACE_SEC_HEADER_BYTE_SIZE = CCSDS_PDU_SEC_HEADER_BYTE_SIZE TM_SPACE_SEC_HEADER_ATTRIBUTES = { "structureType": ( 0, 1, UNSIGNED), "channel": ( 1, 1, UNSIGNED), "dataQuality": ( 2, 1, UNSIGNED), "spare1": ( 3, 1, BYTES), "clcw": ( 4, 4, UNSIGNED), "spare2": ( 8, 2, BYTES), "time": (10, 22, BYTES), "masterChannelFrameCount": (32, 1, UNSIGNED), "virtualChannelFrameCount": (33, 1, UNSIGNED), "spare3": (34, 2, BYTES)} TM_SCOE_SEC_HEADER_BYTE_SIZE = CCSDS_PDU_SEC_HEADER_BYTE_SIZE TM_SCOE_SEC_HEADER_ATTRIBUTES = { "structureType": ( 0, 1, UNSIGNED), "spare1": ( 1, 9, BYTES), "time": (10, 22, BYTES), "spare2": (32, 4, BYTES)} PDU_TYPE_NULL = "????" PDU_TYPE_TC = "TC " PDU_TYPE_TC_E = "TC-E" PDU_TYPE_TC_A = "TC-A" PDU_TYPE_TM = "TM " PDU_TYPE_TM_D = "TM-D" PDU_TYPE_TC_D = "TC-D" PDU_TYPE_USER = "USER" PDU_TYPE_SEQ = "SEQ " PDU_TYPE_ERR = "ERR " PDU_TYPE_CMD = "CMD " PDU_TYPE_DUMP = "DUMP" PDU_TYPE_PAR = "PAR " SUB_TYPE_NULL = "??????????" SUB_TYPE_ANSW = "ANSW " SUB_TYPE_CLTU = "CLTU " SUB_TYPE_ENVELOPE = "ENVELOPE " SUB_TYPE_ERR = "ERR " SUB_TYPE_EXEC = "EXEC " SUB_TYPE_FRAME = "FRAME " SUB_TYPE_LOG = "LOG " SUB_TYPE_PHYSICAL = "PHYSICAL " SUB_TYPE_PROTOCOL = "PROTOCOL " SUB_TYPE_SEGMENT = "SEGMENT " SUB_TYPE_SCOE = "SCOE " SUB_TYPE_SPACE = "SPACE " SUB_TYPE_SPECIF_OND = "SPECIF_OND" SUB_TYPE_STATUS = "STATUS " SUB_TYPE_STOP = "STOP " SUB_TYPE_TIMEOUT = "TIMEOUT " SUB_TYPE_UNKNOWN = "UNKNOWN " FIELD1_NULL = "????????????????" TC_SPACE_STRUCTURE_TYPE = 0 TC_SCOE_STRUCTURE_TYPE = 2 TC_TYPE_PACKET = 0 TC_TYPE_SEGMENT = 1 TC_TYPE_FRAME = 2 TC_TYPE_CLTU = 3 TC_TYPE_PHYSICAL = 4 TC_ORIGIN_LOCAL = 0 TC_ORIGIN_CCS = 1 TC_ORIGIN_OCC = 2 TC_ORIGIN_OCC_NDIU = 3 TC_ORIGIN_PLAYBACK = 4 TC_PROT_MODE = 255 TC_ANCILLARY_INFORMATION = 0 TC_ECHO_STATUS_OK = 0 TC_SEQUENCE_FLAGS_CONTINUATION = 0 TC_SEQUENCE_FLAGS_FIRST = 1 TC_SEQUENCE_FLAGS_LAST = 2 TC_SEQUENCE_FLAGS_UN_SEGMENTED = 3 TM_SPACE_STRUCTURE_TYPE = 1 TM_SCOE_STRUCTURE_TYPE = 3 class PDU(BinaryUnit): def __init__(self, binaryString=None, attributesSize2=0, attributeMap2=None): BinaryUnit.__init__(self, binaryString, PDU_HEADER_BYTE_SIZE, PDU_HEADER_ATTRIBUTES, attributesSize2, attributeMap2) def initAttributes(self): BinaryUnit.initAttributes(self) self.pduType = PDU_TYPE_NULL self.subType = SUB_TYPE_NULL self.field1 = FIELD1_NULL self.setDataFieldLength() def getDataField(self): headerByteSize = PDU_HEADER_BYTE_SIZE return self.getBytes(headerByteSize, self.dataFieldLength) def setDataField(self, dataField): self.setLen(PDU_HEADER_BYTE_SIZE) self.append(dataField) self.dataFieldLength = len(dataField) def setDataFieldLength(self): self.dataFieldLength = len(self) - PDU_HEADER_BYTE_SIZE class CCSDSpdu(PDU): def __init__(self, binaryString=None, attributesSize2=0, attributeMap2=None): PDU.__init__(self, binaryString, attributesSize2, attributeMap2) def getCCSDSpacket(self): headersByteSize = PDU_HEADER_BYTE_SIZE + CCSDS_PDU_SEC_HEADER_BYTE_SIZE return self.getBytes(headersByteSize, self.dataFieldLength - CCSDS_PDU_SEC_HEADER_BYTE_SIZE) def setCCSDSpacket(self, packet): self.setLen(PDU_HEADER_BYTE_SIZE + CCSDS_PDU_SEC_HEADER_BYTE_SIZE) self.append(packet) self.setDataFieldLength() class TCspace(CCSDSpdu): def __init__(self, binaryString=None): CCSDSpdu.__init__(self, binaryString, TC_SPACE_SEC_HEADER_BYTE_SIZE, TC_SPACE_SEC_HEADER_ATTRIBUTES) def initAttributes(self): CCSDSpdu.initAttributes(self) self.pduType = PDU_TYPE_TC self.subType = SUB_TYPE_SPACE self.field1 = FIELD1_NULL self.structureType = TC_SPACE_STRUCTURE_TYPE self.telecommandType = TC_TYPE_PACKET self.telecommandOrigin = TC_ORIGIN_CCS self.telecommandProtMode = TC_PROT_MODE self.ancillaryInformation = TC_ANCILLARY_INFORMATION self.telecommandEchoStatus = TC_ECHO_STATUS_OK self.sequenceFlags = TC_SEQUENCE_FLAGS_UN_SEGMENTED class TC_Espace(TCspace): def __init__(self, binaryString=None): TCspace.__init__(self, binaryString) self.pduType = PDU_TYPE_TC_E def initAttributes(self): TCspace.initAttributes(self) self.pduType = PDU_TYPE_TC_E class TCscoe(CCSDSpdu): def __init__(self, binaryString=None): CCSDSpdu.__init__(self, binaryString, TC_SCOE_SEC_HEADER_BYTE_SIZE, TC_SCOE_SEC_HEADER_ATTRIBUTES) def initAttributes(self): CCSDSpdu.initAttributes(self) self.setDataFieldLength() self.pduType = PDU_TYPE_TC self.subType = SUB_TYPE_SCOE self.field1 = FIELD1_NULL self.structureType = TC_SCOE_STRUCTURE_TYPE self.telecommandOrigin = TC_ORIGIN_CCS self.telecommandEchoStatus = TC_ECHO_STATUS_OK class TC_Escoe(TCscoe): def __init__(self, binaryString=None): TCscoe.__init__(self, binaryString) self.pduType = PDU_TYPE_TC_E
MIT License
p0cl4bs/wifipumpkin3
wifipumpkin3/modules/spoof/dns_spoof.py
ModPump.do_start
python
def do_start(self, args): if self._background_mode: print( display_messages( "there are a dnsspoof module in brackground.", error=True ) ) return redirectTo = self.options.get("redirectTo")[0] if not self.options.get("domains")[0]: print( display_messages( "please, select a domains to perform attack ", error=True ) ) return print(display_messages("DnsSpoof attack", info=True, sublime=True)) print( display_messages( "Redirect to: {} ".format(setcolor(redirectTo, color="blue")), info=True, ) ) print(display_messages("Targets:", info=True, sublime=True)) for target in self.options.get("domains")[0].split(","): print( display_messages( "-> [{}] ".format(setcolor(target, color="red")), info=True, ) ) self.handler_dnshosts = open(self.filepath_dns_hosts, "a") for target in self.options.get("domains")[0].split(","): self.handler_dnshosts.write( self.rules_model.format(dns=target, redirect=redirectTo) ) self.handler_dnshosts.close() self.set_background_mode(True)
start update dns zones file
https://github.com/p0cl4bs/wifipumpkin3/blob/47d79c3c21cd63eb2deedaf7a25e8e219abec48b/wifipumpkin3/modules/spoof/dns_spoof.py#L101-L143
from wifipumpkin3.core.common.terminal import ModuleUI from wifipumpkin3.core.config.globalimport import * from wifipumpkin3.core.utility.printer import display_messages, setcolor from wifipumpkin3.core.common.threads import ProcessThread class ModPump(ModuleUI): name = "dns_spoof" default_hosts = [ "# this is an example zones file", "# each line with parts split on white space are considered thus:", "# 1: the host", "# 2: the record type", '# everything else: either a single string or json list if it starts with "["', "# lines starting with white space are striped of white space (including newline)", "# and added to the previous line", "example.com A 10.0.0.1", "example.com CNAME whatever.com", 'example.com MX ["whatever.com.", 5]', 'example.com MX ["mx2.whatever.com.", 10]', 'example.com MX ["mx3.whatever.com.", 20]', "example.com NS ns1.whatever.com.", "example.com NS ns2.whatever.com.", "example.com TXT hello this is some text", 'example.com SOA ["ns1.example.com", "dns.example.com"]', "# because the next record exceeds 255 in length dnserver will automatically", "# split it into a multipart record, the new lines here have no effect on that", "testing.com TXT one long value: IICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAg", " FWZUed1qcBziAsqZ/LzT2ASxJYuJ5sko1CzWFhFuxiluNnwKjSknSjanyYnm0vro4dhAtyiQ7O", " PVROOaNy9Iyklvu91KuhbYi6l80Rrdnuq1yjM//xjaB6DGx8+m1ENML8PEdSFbKQbh9akm2bkN", " w5DC5a8Slp7j+eEVHkgV3k3oRhkPcrKyoPVvniDNH+Ln7DnSGC+Aw5Sp+fhu5aZmoODhhX5/1m", " ANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA26JaFWZUed1qcBziAsqZ/LzTF2ASxJYuJ5sk", ] options = { "redirectTo": ["10.0.0.1", "ip address to redirect traffic"], "domains": [None, "the targets domain's name server"], } completions = list(options.keys()) domains = [] def __init__(self, parse_args=None, root=None): self.parse_args = parse_args self.root = root self.name_module = self.name self.filepath_dns_hosts = C.DNSHOSTS self.rules_model = "{dns} A {redirect}\n" super(ModPump, self).__init__(parse_args=self.parse_args, root=self.root) self.options["redirectTo"][0] = self.conf.get("dhcp", "router") def do_add(self, args): if not self.options.get("domains")[0]: self.options["domains"][0] = ",".join([args]) return targets = self.options.get("domains")[0].split(",") targets.append(args) self.options["domains"][0] = ",".join(targets) def do_rm(self, args): if not self.options.get("domains")[0]: return print(display_messages("the list of domains is empty", error=True)) targets = self.options.get("domains")[0].split(",") try: targets.remove(args) if targets != []: self.options["domains"][0] = ",".join(targets) else: self.options["domains"][0] = None except ValueError: return print( display_messages( "the value {} not in the domains list".format(args), error=True ) )
Apache License 2.0
phixion/home-assistantconfig
custom_components/sleep_as_android/device_trigger.py
async_get_triggers
python
async def async_get_triggers(hass, device_id): device_registry = await hass.helpers.device_registry.async_get_registry() device = device_registry.async_get(device_id) triggers = [] for t in TRIGGERS: triggers.append({ CONF_PLATFORM: "device", CONF_DOMAIN: DOMAIN, CONF_DEVICE_ID: device_id, CONF_TYPE: t, }) return triggers
Return a list of triggers.
https://github.com/phixion/home-assistantconfig/blob/9e042c2205477e1bf21eeca3f91d471f1a49dffc/custom_components/sleep_as_android/device_trigger.py#L52-L69
import logging import voluptuous as vol from homeassistant.const import (CONF_TYPE, CONF_PLATFORM, CONF_DOMAIN, CONF_DEVICE_ID, ) from homeassistant.components.homeassistant.triggers import event as event_trigger from homeassistant.core import HomeAssistant from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA as HA_TRIGGER_BASE_SCHEMA from .const import DOMAIN _LOGGER = logging.getLogger(__name__) TRIGGERS = [ "sleep_tracking_started", "sleep_tracking_stopped", "sleep_tracking_paused", "sleep_tracking_resumed", "alarm_snooze_clicked", "alarm_snooze_canceled", "time_to_bed_alarm_alert", "alarm_alert_start", "alarm_alert_dismiss", "alarm_skip_next", "rem", "smart_period", "before_smart_period", "lullaby_start", "lullaby_stop", "lullaby_volume_down", "deep_sleep", "light_sleep", "awake", "not_awake", "apnea_alarm", "antisnoring", "sound_event_snore", "sound_event_talk", "sound_event_cough", "sound_event_baby", "sound_event_laugh" ] TRIGGER_SCHEMA = HA_TRIGGER_BASE_SCHEMA.extend( { vol.Required(CONF_TYPE): vol.In(TRIGGERS), } )
MIT License
rpi-distro/thonny
thonny/running.py
BackendProxy.kill_current_process
python
def kill_current_process(self): pass
Kill the backend. Is called when Thonny no longer needs this backend (Thonny gets closed or new backend gets selected)
https://github.com/rpi-distro/thonny/blob/78a289c5948310377aacfe5349cb1a43d75ed7d8/thonny/running.py#L508-L514
from _thread import start_new_thread from logging import debug import os.path import subprocess import sys from thonny.common import serialize_message, ToplevelCommand, InlineCommand, parse_shell_command, CommandSyntaxError, parse_message, DebuggerCommand, InputSubmission, UserError from thonny.globals import get_workbench, get_runner import shlex from thonny import THONNY_USER_DIR from thonny.misc_utils import running_on_windows, running_on_mac_os, eqfn from shutil import which import shutil import tokenize import collections import signal import logging DEFAULT_CPYTHON_INTERPRETER = "default" WINDOWS_EXE = "python.exe" class Runner: def __init__(self): get_workbench().set_default("run.working_directory", os.path.expanduser("~")) get_workbench().set_default("run.auto_cd", True) get_workbench().set_default("run.backend_configuration", "Python (%s)" % DEFAULT_CPYTHON_INTERPRETER) get_workbench().set_default("run.used_interpreters", []) get_workbench().add_backend("Python", CPythonProxy) from thonny.shell import ShellView get_workbench().add_view(ShellView, "Shell", "s", visible_by_default=True, default_position_key='A') self._init_commands() self._state = None self._proxy = None self._postponed_commands = [] self._current_toplevel_command = None self._current_command = None self._check_alloc_console() def start(self): try: self.reset_backend() finally: self._poll_vm_messages() def _init_commands(self): shell = get_workbench().get_view("ShellView") shell.add_command("Run", self.handle_execute_from_shell) shell.add_command("Reset", self._handle_reset_from_shell) shell.add_command("cd", self._handle_cd_from_shell) get_workbench().add_command('run_current_script', "run", 'Run current script', handler=self._cmd_run_current_script, default_sequence="<F5>", tester=self._cmd_run_current_script_enabled, group=10, image_filename="run.run_current_script.gif", include_in_toolbar=True) get_workbench().add_command('reset', "run", 'Interrupt/Reset', handler=self.cmd_interrupt_reset, default_sequence="<Control-F2>", tester=self._cmd_interrupt_reset_enabled, group=70, image_filename="run.stop.gif", include_in_toolbar=True) get_workbench().add_command('interrupt', "run", "Interrupt execution", handler=self._cmd_interrupt, tester=self._cmd_interrupt_enabled, default_sequence="<Control-c>", bell_when_denied=False) def get_cwd(self): if hasattr(self._proxy, "cwd"): return self._proxy.cwd else: return "" def get_state(self): return self._state def _set_state(self, state): if self._state != state: logging.debug("Runner state changed: %s ==> %s" % (self._state, state)) self._state = state if self._state == "waiting_toplevel_command": self._current_toplevel_command = None if self._state != "running": self._current_command = None def get_current_toplevel_command(self): return self._current_toplevel_command def get_current_command(self): return self._current_command def get_sys_path(self): return self._proxy.get_sys_path() def send_command(self, cmd): if self._proxy is None: return if not self._state_is_suitable(cmd): if isinstance(cmd, DebuggerCommand) and self.get_state() == "running": self._postpone_command(cmd) return elif isinstance(cmd, InlineCommand): self._postpone_command(cmd) return else: raise AssertionError("Trying to send " + str(cmd) + " in state " + self.get_state()) if cmd.command in ("Run", "Debug", "Reset"): get_workbench().event_generate("BackendRestart") accepted = self._proxy.send_command(cmd) if (accepted and isinstance(cmd, (ToplevelCommand, DebuggerCommand, InlineCommand))): self._set_state("running") self._current_command = cmd if isinstance(cmd, ToplevelCommand): self._current_toplevel_command = cmd def send_program_input(self, data): assert self.get_state() == "waiting_input" self._proxy.send_program_input(data) self._set_state("running") def execute_script(self, script_path, args, working_directory=None, command_name="Run"): if (working_directory is not None and self._proxy.cwd != working_directory): cmd_line = "%cd " + shlex.quote(working_directory) + "\n" next_cwd = working_directory else: cmd_line = "" next_cwd = self._proxy.cwd rel_filename = os.path.relpath(script_path, next_cwd) cmd_line += "%" + command_name + " " + shlex.quote(rel_filename) for arg in args: cmd_line += " " + shlex.quote(arg) cmd_line += "\n" get_workbench().get_view("ShellView").submit_command(cmd_line) def execute_current(self, command_name, always_change_to_script_dir=False): editor = get_workbench().get_current_editor() if not editor: return filename = editor.get_filename(True) if not filename: return if editor.is_modified(): filename = editor.save_file() if not filename: return script_dir = os.path.realpath(os.path.dirname(filename)) if (get_workbench().get_option("run.auto_cd") and command_name[0].isupper() or always_change_to_script_dir): working_directory = script_dir else: working_directory = None self.execute_script(filename, [], working_directory, command_name) def handle_execute_from_shell(self, cmd_line): command, args = parse_shell_command(cmd_line) if len(args) >= 1: get_workbench().get_editor_notebook().save_all_named_editors() cmd = ToplevelCommand(command=command, filename=args[0], args=args[1:]) if os.path.isabs(cmd.filename): cmd.full_filename = cmd.filename else: cmd.full_filename = os.path.join(self.get_cwd(), cmd.filename) if command in ["Run", "run", "Debug", "debug"]: with tokenize.open(cmd.full_filename) as fp: cmd.source = fp.read() self.send_command(cmd) else: raise CommandSyntaxError("Command '%s' takes at least one argument", command) def _handle_reset_from_shell(self, cmd_line): command, args = parse_shell_command(cmd_line) assert command == "Reset" if len(args) == 0: self.send_command(ToplevelCommand(command="Reset")) else: raise CommandSyntaxError("Command 'Reset' doesn't take arguments") def _handle_cd_from_shell(self, cmd_line): command, args = parse_shell_command(cmd_line) assert command == "cd" if len(args) == 1: self.send_command(ToplevelCommand(command="cd", path=args[0])) else: raise CommandSyntaxError("Command 'cd' takes one argument") def _cmd_run_current_script_enabled(self): return (get_workbench().get_editor_notebook().get_current_editor() is not None and get_runner().get_state() == "waiting_toplevel_command" and "run" in get_runner().supported_features()) def _cmd_run_current_script(self): self.execute_current("Run") def _cmd_interrupt(self): self.interrupt_backend() def _cmd_interrupt_enabled(self): widget = get_workbench().focus_get() if not running_on_mac_os(): if hasattr(widget, "selection_get"): try: if widget.selection_get() != "": return False except: pass return get_runner().get_state() != "waiting_toplevel_command" def cmd_interrupt_reset(self): if self.get_state() == "waiting_toplevel_command": get_workbench().get_view("ShellView").submit_command("%Reset\n") else: get_runner().interrupt_backend() def _cmd_interrupt_reset_enabled(self): return True def _postpone_command(self, cmd): if isinstance(cmd, InlineCommand): for older_cmd in self._postponed_commands: if older_cmd.command == cmd.command: self._postponed_commands.remove(older_cmd) if len(self._postponed_commands) > 10: else: self._postponed_commands.append(cmd) def _state_is_suitable(self, cmd): if isinstance(cmd, ToplevelCommand): return (self.get_state() == "waiting_toplevel_command" or cmd.command in ["Reset", "Run", "Debug"]) elif isinstance(cmd, DebuggerCommand): return self.get_state() == "waiting_debugger_command" elif isinstance(cmd, InlineCommand): return self.get_state() in self._proxy.allowed_states_for_inline_commands() else: raise RuntimeError("Unknown command class: " + str(type(cmd))) def _send_postponed_commands(self): remaining = [] for cmd in self._postponed_commands: if self._state_is_suitable(cmd): logging.debug("Sending postponed command", cmd) self.send_command(cmd) else: remaining.append(cmd) self._postponed_commands = remaining def _poll_vm_messages(self): try: initial_state = self.get_state() while self._proxy is not None: msg = self._proxy.fetch_next_message() if not msg: break if msg.get("SystemExit", False): self.reset_backend() return if "command_context" in msg: self._set_state(msg["command_context"]) elif msg["message_type"] == "ToplevelResult": self._set_state("waiting_toplevel_command") elif msg["message_type"] == "InputRequest": self._set_state("waiting_input") else: if msg["message_type"] == "ToplevelResult": self._current_toplevel_command = None get_workbench().event_generate(msg["message_type"], **msg) get_workbench().set_option("run.working_directory", self.get_cwd()) if self.get_state() != initial_state: self._send_postponed_commands() finally: get_workbench().after(50, self._poll_vm_messages) def reset_backend(self): self.kill_backend() configuration = get_workbench().get_option("run.backend_configuration") backend_name, configuration_option = parse_configuration(configuration) if backend_name not in get_workbench().get_backends(): raise UserError("Can't find backend '{}'. Please select another backend from options" .format(backend_name)) backend_class = get_workbench().get_backends()[backend_name] self._set_state("running") self._proxy = None self._proxy = backend_class(configuration_option) def interrupt_backend(self): if self._proxy is not None: self._proxy.interrupt() def kill_backend(self): self._current_toplevel_command = None self._current_command = None self._postponed_commands = [] if self._proxy: self._proxy.kill_current_process() self._proxy = None def get_interpreter_command(self): return self._proxy.get_interpreter_command() def get_backend_description(self): return self._proxy.get_description() def _check_alloc_console(self): if (sys.executable.endswith("thonny.exe") or sys.executable.endswith("pythonw.exe")): import ctypes kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) exe = (sys.executable .replace("thonny.exe", "python.exe") .replace("pythonw.exe", "python.exe")) cmd = [exe, "-c", "print('Hi!'); input()"] child = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) child.stdout.readline() result = kernel32.AttachConsole(child.pid) if not result: err = ctypes.get_last_error() logging.info("Could not allocate console. Error code: " +str(err)) child.stdin.write(b"\n") child.stdin.flush() def supported_features(self): if self._proxy is None: return [] else: return self._proxy.supported_features() def get_frontend_python(self): return sys.executable.replace("thonny.exe", "pythonw.exe") def using_venv(self): return isinstance(self._proxy, CPythonProxy) and self._proxy.in_venv class BackendProxy: def __init__(self, configuration_option): @classmethod def get_configuration_options(cls): raise NotImplementedError() def get_description(self): raise NotImplementedError() def send_command(self, cmd): raise NotImplementedError() def allowed_states_for_inline_commands(self): return ["waiting_toplevel_command"] def send_program_input(self, data): raise NotImplementedError() def fetch_next_message(self): raise NotImplementedError() def get_sys_path(self): return [] def interrupt(self): self.kill_current_process()
MIT License
rundherum/pymia
pymia/filtering/registration.py
PlotOnResolutionChangeCallback.registration_started
python
def registration_started(self): self.resolution = 0
Callback for the StartEvent.
https://github.com/rundherum/pymia/blob/446192d56dfeee2293027b9b041a1e2140e38ae7/pymia/filtering/registration.py#L300-L302
import abc import enum import os import typing import SimpleITK as sitk import pymia.filtering.filter as pymia_fltr class RegistrationType(enum.Enum): AFFINE = 1 SIMILARITY = 2 RIGID = 3 BSPLINE = 4 class RegistrationCallback(abc.ABC): def __init__(self) -> None: self.registration_method = None self.fixed_image = None self.moving_image = None self.transform = None def set_params(self, registration_method: sitk.ImageRegistrationMethod, fixed_image: sitk.Image, moving_image: sitk.Image, transform: sitk.Transform): self.registration_method = registration_method self.fixed_image = fixed_image self.moving_image = moving_image self.transform = transform self.registration_method.AddCommand(sitk.sitkStartEvent, self.registration_started) self.registration_method.AddCommand(sitk.sitkEndEvent, self.registration_ended) self.registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, self.registration_resolution_changed) self.registration_method.AddCommand(sitk.sitkIterationEvent, self.registration_iteration_ended) def registration_ended(self): pass def registration_started(self): pass def registration_resolution_changed(self): pass def registration_iteration_ended(self): pass class MultiModalRegistrationParams(pymia_fltr.FilterParams): def __init__(self, fixed_image: sitk.Image, fixed_image_mask: sitk.Image = None, callbacks: typing.List[RegistrationCallback] = None): self.fixed_image = fixed_image self.fixed_image_mask = fixed_image_mask self.callbacks = callbacks class MultiModalRegistration(pymia_fltr.Filter): def __init__(self, registration_type: RegistrationType = RegistrationType.RIGID, number_of_histogram_bins: int = 200, learning_rate: float = 1.0, step_size: float = 0.001, number_of_iterations: int = 200, relaxation_factor: float = 0.5, shrink_factors: typing.List[int] = (2, 1, 1), smoothing_sigmas: typing.List[float] = (2, 1, 0), sampling_percentage: float = 0.2, sampling_seed: int = sitk.sitkWallClock, resampling_interpolator=sitk.sitkBSpline): super().__init__() if len(shrink_factors) != len(smoothing_sigmas): raise ValueError("shrink_factors and smoothing_sigmas need to be same length") self.registration_type = registration_type self.number_of_histogram_bins = number_of_histogram_bins self.learning_rate = learning_rate self.step_size = step_size self.number_of_iterations = number_of_iterations self.relaxation_factor = relaxation_factor self.shrink_factors = shrink_factors self.smoothing_sigmas = smoothing_sigmas self.sampling_percentage = sampling_percentage self.sampling_seed = sampling_seed self.resampling_interpolator = resampling_interpolator registration = sitk.ImageRegistrationMethod() registration.SetMetricAsMattesMutualInformation(self.number_of_histogram_bins) registration.SetMetricSamplingStrategy(registration.RANDOM) registration.SetMetricSamplingPercentage(self.sampling_percentage, self.sampling_seed) registration.SetMetricUseFixedImageGradientFilter(False) registration.SetMetricUseMovingImageGradientFilter(False) registration.SetInterpolator(sitk.sitkLinear) if self.registration_type == RegistrationType.BSPLINE: registration.SetOptimizerAsLBFGSB() else: registration.SetOptimizerAsRegularStepGradientDescent(learningRate=self.learning_rate, minStep=self.step_size, numberOfIterations=self.number_of_iterations, relaxationFactor=self.relaxation_factor, gradientMagnitudeTolerance=1e-4, estimateLearningRate=registration.EachIteration, maximumStepSizeInPhysicalUnits=0.0) registration.SetOptimizerScalesFromPhysicalShift() registration.SetShrinkFactorsPerLevel(self.shrink_factors) registration.SetSmoothingSigmasPerLevel(self.smoothing_sigmas) registration.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn() self.registration = registration self.transform = None def execute(self, image: sitk.Image, params: MultiModalRegistrationParams = None) -> sitk.Image: if params is None: raise ValueError("params is not defined") dimension = image.GetDimension() if dimension not in (2, 3): raise ValueError('Image dimension {} is not among the accepted (2, 3)'.format(dimension)) if self.registration_type == RegistrationType.BSPLINE: transform_domain_mesh_size = [10] * image.GetDimension() initial_transform = sitk.BSplineTransformInitializer(params.fixed_image, transform_domain_mesh_size) else: if self.registration_type == RegistrationType.RIGID: transform_type = sitk.VersorRigid3DTransform() if dimension == 3 else sitk.Euler2DTransform() elif self.registration_type == RegistrationType.AFFINE: transform_type = sitk.AffineTransform(dimension) elif self.registration_type == RegistrationType.SIMILARITY: transform_type = sitk.Similarity3DTransform() if dimension == 3 else sitk.Similarity2DTransform() else: raise ValueError('not supported registration_type') initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(params.fixed_image, image.GetPixelIDValue()), image, transform_type, sitk.CenteredTransformInitializerFilter.GEOMETRY) self.registration.SetInitialTransform(initial_transform, inPlace=True) if params.fixed_image_mask: self.registration.SetMetricFixedMask(params.fixed_image_mask) if params.callbacks is not None: for callback in params.callbacks: callback.set_params(self.registration, params.fixed_image, image, initial_transform) self.transform = self.registration.Execute(sitk.Cast(params.fixed_image, sitk.sitkFloat32), sitk.Cast(image, sitk.sitkFloat32)) if self.verbose: print('MultiModalRegistration:\n Final metric value: {0}'.format(self.registration.GetMetricValue())) print(' Optimizer\'s stopping condition, {0}'.format( self.registration.GetOptimizerStopConditionDescription())) elif self.number_of_iterations == self.registration.GetOptimizerIteration(): print('MultiModalRegistration: Optimizer terminated at number of iterations and did not converge!') return sitk.Resample(image, params.fixed_image, self.transform, self.resampling_interpolator, 0.0, image.GetPixelIDValue()) def __str__(self): return 'MultiModalRegistration:\n' ' registration_type: {self.registration_type}\n' ' number_of_histogram_bins: {self.number_of_histogram_bins}\n' ' learning_rate: {self.learning_rate}\n' ' step_size: {self.step_size}\n' ' number_of_iterations: {self.number_of_iterations}\n' ' relaxation_factor: {self.relaxation_factor}\n' ' shrink_factors: {self.shrink_factors}\n' ' smoothing_sigmas: {self.smoothing_sigmas}\n' ' sampling_percentage: {self.sampling_percentage}\n' ' resampling_interpolator: {self.resampling_interpolator}\n' .format(self=self) class PlotOnResolutionChangeCallback(RegistrationCallback): def __init__(self, plot_dir: str, file_name_prefix: str = '') -> None: super().__init__() self.plot_dir = plot_dir self.file_name_prefix = file_name_prefix self.resolution = 0 def registration_ended(self): self._write_image('end')
Apache License 2.0
roseou/flasky
venv/lib/python2.7/site-packages/flask_login.py
confirm_login
python
def confirm_login(): session['_fresh'] = True session['_id'] = _create_identifier() user_login_confirmed.send(current_app._get_current_object())
This sets the current session as fresh. Sessions become stale when they are reloaded from a cookie.
https://github.com/roseou/flasky/blob/0ebf366ddfe9604acfba99756f69a6d63063b3f9/venv/lib/python2.7/site-packages/flask_login.py#L749-L756
__version_info__ = ('0', '3', '1') __version__ = '.'.join(__version_info__) __author__ = 'Matthew Frazier' __maintainer__ = 'Max Countryman' __license__ = 'MIT/X11' __copyright__ = '(c) 2011 by Matthew Frazier' __all__ = ['LoginManager'] from flask import (_request_ctx_stack, abort, current_app, flash, redirect, request, session, url_for, has_request_context) from flask.signals import Namespace from werkzeug.local import LocalProxy from werkzeug.security import safe_str_cmp from werkzeug.urls import url_decode, url_encode from datetime import datetime, timedelta from functools import wraps from hashlib import sha512 import hmac import warnings import sys if sys.version < '3': from urlparse import urlparse, urlunparse else: from urllib.parse import urlparse, urlunparse unicode = str _signals = Namespace() current_user = LocalProxy(lambda: _get_user()) COOKIE_NAME = 'remember_token' COOKIE_DURATION = timedelta(days=365) COOKIE_SECURE = None COOKIE_HTTPONLY = False LOGIN_MESSAGE = u'Please log in to access this page.' LOGIN_MESSAGE_CATEGORY = 'message' REFRESH_MESSAGE = u'Please reauthenticate to access this page.' REFRESH_MESSAGE_CATEGORY = 'message' ID_ATTRIBUTE = 'get_id' AUTH_HEADER_NAME = 'Authorization' SESSION_KEYS = {'user_id', '_id', '_fresh'} class LoginManager(object): def __init__(self, app=None, add_context_processor=True): self.anonymous_user = AnonymousUserMixin self.login_view = None self.blueprint_login_views = {} self.login_message = LOGIN_MESSAGE self.login_message_category = LOGIN_MESSAGE_CATEGORY self.refresh_view = None self.needs_refresh_message = REFRESH_MESSAGE self.needs_refresh_message_category = REFRESH_MESSAGE_CATEGORY self.session_protection = 'basic' self.localize_callback = None self.token_callback = None self.user_callback = None self.unauthorized_callback = None self.needs_refresh_callback = None self.id_attribute = ID_ATTRIBUTE self.header_callback = None self.request_callback = None if app is not None: self.init_app(app, add_context_processor) def setup_app(self, app, add_context_processor=True): warnings.warn('Warning setup_app is deprecated. Please use init_app.', DeprecationWarning) self.init_app(app, add_context_processor) def init_app(self, app, add_context_processor=True): app.login_manager = self app.after_request(self._update_remember_cookie) self._login_disabled = app.config.get('LOGIN_DISABLED', False) if add_context_processor: app.context_processor(_user_context_processor) def unauthorized(self): user_unauthorized.send(current_app._get_current_object()) if self.unauthorized_callback: return self.unauthorized_callback() if request.blueprint in self.blueprint_login_views: login_view = self.blueprint_login_views[request.blueprint] else: login_view = self.login_view if not login_view: abort(401) if self.login_message: if self.localize_callback is not None: flash(self.localize_callback(self.login_message), category=self.login_message_category) else: flash(self.login_message, category=self.login_message_category) return redirect(login_url(login_view, request.url)) def user_loader(self, callback): self.user_callback = callback return callback def header_loader(self, callback): self.header_callback = callback return callback def request_loader(self, callback): self.request_callback = callback return callback def token_loader(self, callback): self.token_callback = callback return callback def unauthorized_handler(self, callback): self.unauthorized_callback = callback return callback def needs_refresh_handler(self, callback): self.needs_refresh_callback = callback return callback def needs_refresh(self): user_needs_refresh.send(current_app._get_current_object()) if self.needs_refresh_callback: return self.needs_refresh_callback() if not self.refresh_view: abort(401) if self.localize_callback is not None: flash(self.localize_callback(self.needs_refresh_message), category=self.needs_refresh_message_category) else: flash(self.needs_refresh_message, category=self.needs_refresh_message_category) return redirect(login_url(self.refresh_view, request.url)) def reload_user(self, user=None): ctx = _request_ctx_stack.top if user is None: user_id = session.get('user_id') if user_id is None: ctx.user = self.anonymous_user() else: if self.user_callback is None: raise Exception( "No user_loader has been installed for this " "LoginManager. Add one with the " "'LoginManager.user_loader' decorator.") user = self.user_callback(user_id) if user is None: ctx.user = self.anonymous_user() else: ctx.user = user else: ctx.user = user def _load_user(self): user_accessed.send(current_app._get_current_object()) config = current_app.config if config.get('SESSION_PROTECTION', self.session_protection): deleted = self._session_protection() if deleted: return self.reload_user() is_missing_user_id = 'user_id' not in session if is_missing_user_id: cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME) header_name = config.get('AUTH_HEADER_NAME', AUTH_HEADER_NAME) has_cookie = (cookie_name in request.cookies and session.get('remember') != 'clear') if has_cookie: return self._load_from_cookie(request.cookies[cookie_name]) elif self.request_callback: return self._load_from_request(request) elif header_name in request.headers: return self._load_from_header(request.headers[header_name]) return self.reload_user() def _session_protection(self): sess = session._get_current_object() ident = _create_identifier() app = current_app._get_current_object() mode = app.config.get('SESSION_PROTECTION', self.session_protection) if sess and ident != sess.get('_id', None): if mode == 'basic' or sess.permanent: sess['_fresh'] = False session_protected.send(app) return False elif mode == 'strong': for k in SESSION_KEYS: sess.pop(k, None) sess['remember'] = 'clear' session_protected.send(app) return True return False def _load_from_cookie(self, cookie): if self.token_callback: user = self.token_callback(cookie) if user is not None: session['user_id'] = getattr(user, self.id_attribute)() session['_fresh'] = False _request_ctx_stack.top.user = user else: self.reload_user() else: user_id = decode_cookie(cookie) if user_id is not None: session['user_id'] = user_id session['_fresh'] = False self.reload_user() if _request_ctx_stack.top.user is not None: app = current_app._get_current_object() user_loaded_from_cookie.send(app, user=_get_user()) def _load_from_header(self, header): user = None if self.header_callback: user = self.header_callback(header) if user is not None: self.reload_user(user=user) app = current_app._get_current_object() user_loaded_from_header.send(app, user=_get_user()) else: self.reload_user() def _load_from_request(self, request): user = None if self.request_callback: user = self.request_callback(request) if user is not None: self.reload_user(user=user) app = current_app._get_current_object() user_loaded_from_request.send(app, user=_get_user()) else: self.reload_user() def _update_remember_cookie(self, response): if 'remember' in session: operation = session.pop('remember', None) if operation == 'set' and 'user_id' in session: self._set_cookie(response) elif operation == 'clear': self._clear_cookie(response) return response def _set_cookie(self, response): config = current_app.config cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME) duration = config.get('REMEMBER_COOKIE_DURATION', COOKIE_DURATION) domain = config.get('REMEMBER_COOKIE_DOMAIN') path = config.get('REMEMBER_COOKIE_PATH', '/') secure = config.get('REMEMBER_COOKIE_SECURE', COOKIE_SECURE) httponly = config.get('REMEMBER_COOKIE_HTTPONLY', COOKIE_HTTPONLY) if self.token_callback: data = current_user.get_auth_token() else: data = encode_cookie(unicode(session['user_id'])) expires = datetime.utcnow() + duration response.set_cookie(cookie_name, value=data, expires=expires, domain=domain, path=path, secure=secure, httponly=httponly) def _clear_cookie(self, response): config = current_app.config cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME) domain = config.get('REMEMBER_COOKIE_DOMAIN') path = config.get('REMEMBER_COOKIE_PATH', '/') response.delete_cookie(cookie_name, domain=domain, path=path) class UserMixin(object): @property def is_active(self): return True @property def is_authenticated(self): return True @property def is_anonymous(self): return False def get_id(self): try: return unicode(self.id) except AttributeError: raise NotImplementedError('No `id` attribute - override `get_id`') def __eq__(self, other): if isinstance(other, UserMixin): return self.get_id() == other.get_id() return NotImplemented def __ne__(self, other): equal = self.__eq__(other) if equal is NotImplemented: return NotImplemented return not equal if sys.version_info[0] != 2: __hash__ = object.__hash__ class AnonymousUserMixin(object): @property def is_authenticated(self): return False @property def is_active(self): return False @property def is_anonymous(self): return True def get_id(self): return def encode_cookie(payload): return u'{0}|{1}'.format(payload, _cookie_digest(payload)) def decode_cookie(cookie): try: payload, digest = cookie.rsplit(u'|', 1) if hasattr(digest, 'decode'): digest = digest.decode('ascii') except ValueError: return if safe_str_cmp(_cookie_digest(payload), digest): return payload def make_next_param(login_url, current_url): l = urlparse(login_url) c = urlparse(current_url) if (not l.scheme or l.scheme == c.scheme) and (not l.netloc or l.netloc == c.netloc): return urlunparse(('', '', c.path, c.params, c.query, '')) return current_url def login_url(login_view, next_url=None, next_field='next'): if login_view.startswith(('https://', 'http://', '/')): base = login_view else: base = url_for(login_view) if next_url is None: return base parts = list(urlparse(base)) md = url_decode(parts[4]) md[next_field] = make_next_param(base, next_url) parts[4] = url_encode(md, sort=True) return urlunparse(parts) def make_secure_token(*args, **options): key = options.get('key') key = _secret_key(key) l = [s if isinstance(s, bytes) else s.encode('utf-8') for s in args] payload = b'\0'.join(l) token_value = hmac.new(key, payload, sha512).hexdigest() if hasattr(token_value, 'decode'): token_value = token_value.decode('utf-8') return token_value def login_fresh(): return session.get('_fresh', False) def login_user(user, remember=False, force=False, fresh=True): if not force and not user.is_active: return False user_id = getattr(user, current_app.login_manager.id_attribute)() session['user_id'] = user_id session['_fresh'] = fresh session['_id'] = _create_identifier() if remember: session['remember'] = 'set' _request_ctx_stack.top.user = user user_logged_in.send(current_app._get_current_object(), user=_get_user()) return True def logout_user(): user = _get_user() if 'user_id' in session: session.pop('user_id') if '_fresh' in session: session.pop('_fresh') cookie_name = current_app.config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME) if cookie_name in request.cookies: session['remember'] = 'clear' user_logged_out.send(current_app._get_current_object(), user=user) current_app.login_manager.reload_user() return True
MIT License
joeyhendricks/quickpotato
QuickPotato/database/operations.py
ContextManager.execute_query
python
def execute_query(connection, query): return connection.execute(query)
:param connection: :param query: :return:
https://github.com/joeyhendricks/quickpotato/blob/5e33e64d77997b00a43f5573353138436b1f1a34/QuickPotato/database/operations.py#L56-L63
from QuickPotato.configuration.management import options from QuickPotato.database.schemas import RawStatisticsSchemas, UnitPerformanceTestResultSchemas from sqlalchemy import create_engine from sqlalchemy.exc import ProgrammingError from QuickPotato.utilities.exceptions import DatabaseConnectionCannotBeSpawned, DatabaseSchemaCannotBeSpawned from sqlalchemy_utils import database_exists, create_database, drop_database import tempfile class ContextManager(RawStatisticsSchemas, UnitPerformanceTestResultSchemas): URL = options.connection_url def __init__(self): RawStatisticsSchemas.__init__(self) UnitPerformanceTestResultSchemas.__init__(self) def spawn_engine(self, database_name): try: url = self._validate_connection_url(database_name=database_name) engine = create_engine(url, echo=options.enable_database_echo) return engine except Exception: raise DatabaseConnectionCannotBeSpawned() def spawn_connection(self, database_name): try: engine = self.spawn_engine(database_name) return engine, engine.connect() except Exception: raise DatabaseConnectionCannotBeSpawned() @staticmethod def close_connection(engine, connection): connection.close() engine.dispose() return True @staticmethod
MIT License
baderlab/saber
saber/utils/data_utils.py
get_data_partitions
python
def get_data_partitions(training_data, train_valid_indices): partitioned_data = [] for i, _ in enumerate(train_valid_indices): partitioned_data.append([]) for j, _ in enumerate(train_valid_indices[i]): train_indices, valid_indices = train_valid_indices[i][j] x_word_train, x_char_train = training_data[i]['x_train'] y_train = training_data[i]['y_train'] x_word_train, x_word_valid = x_word_train[train_indices], x_word_train[valid_indices] x_char_train, x_char_valid = x_char_train[train_indices], x_char_train[valid_indices] y_train, y_valid = y_train[train_indices], y_train[valid_indices] partitioned_data[i].append({'x_train': [x_word_train, x_char_train], 'x_valid': [x_word_valid, x_char_valid], 'y_train': y_train, 'y_valid': y_valid, 'x_test': training_data[i]['x_test'], 'y_test': training_data[i]['y_test'],}) return partitioned_data
Get train and valid partitions for all k-folds for all datasets. For all Dataset objects in `datasets`, gets the train and valid partitions for the current k-fold (`fold`) using the indices given at `train_valid_indices`. Returns a list of lists of four-tuples: (x_train, x_valid, y_train, y_valid), where index i, j contains the data for the ith dataset and jth k-fold. Args: datasets (list): A list of Dataset objects. train_valid_indices (list): A list of list of two-tuples, where train_valid_indices[i][j]. is a tuple containing the train and valid indices (in that order) for the ith dataset. and jth fold. fold (int): The current fold in k-fold cross-validation. Returns: A list of lists, `partitioned_data`, where `partitioned_data[i][j]` contains the data for the ith dataset and jth fold.
https://github.com/baderlab/saber/blob/876be6bfdb1bc5b18cbcfa848c94b0d20c940f02/saber/utils/data_utils.py#L210-L252
import glob import logging import os from itertools import chain from sklearn.model_selection import KFold, train_test_split from .. import constants from ..preprocessor import Preprocessor LOGGER = logging.getLogger(__name__) def get_filepaths(filepath): partition_filepaths = {} train_partition = glob.glob(os.path.join(filepath, constants.TRAIN_FILE)) valid_partition = glob.glob(os.path.join(filepath, constants.VALID_FILE)) test_partition = glob.glob(os.path.join(filepath, constants.TEST_FILE)) if not train_partition: err_msg = "Must supply at least one file, train.* at {}".format(filepath) LOGGER.error('ValueError %s', err_msg) raise ValueError(err_msg) partition_filepaths['train'] = train_partition[0] partition_filepaths['valid'] = valid_partition[0] if valid_partition else None partition_filepaths['test'] = test_partition[0] if test_partition else None return partition_filepaths def load_single_dataset(config): from ..dataset import Dataset dataset = Dataset(directory=config.dataset_folder[0], replace_rare_tokens=config.replace_rare_tokens) dataset.load() return [dataset] def load_compound_dataset(config): from ..dataset import Dataset compound_dataset = [] for dir_ in config.dataset_folder: dataset = Dataset(directory=dir_, replace_rare_tokens=config.replace_rare_tokens) dataset.load() compound_dataset.append(dataset) combined_types = {'word': [dataset.type_to_idx['word'] for dataset in compound_dataset], 'char': [dataset.type_to_idx['char'] for dataset in compound_dataset]} combined_types['word'] = list(set(chain.from_iterable(combined_types['word']))) combined_types['char'] = list(set(chain.from_iterable(combined_types['char']))) type_to_idx = { 'word': Preprocessor.type_to_idx(combined_types['word'], constants.INITIAL_MAPPING['word']), 'char': Preprocessor.type_to_idx(combined_types['char'], constants.INITIAL_MAPPING['word']), } for dataset in compound_dataset: word_types, char_types = list(dataset.type_to_idx['word']), list(dataset.type_to_idx['char']) dataset.type_to_idx['word'] = Preprocessor.type_to_idx(word_types, type_to_idx['word']) dataset.type_to_idx['char'] = Preprocessor.type_to_idx(char_types, type_to_idx['char']) dataset.get_idx_seq() return compound_dataset def setup_dataset_for_transfer(dataset, type_to_idx): dataset.type_to_idx['word'] = type_to_idx['word'] dataset.type_to_idx['char'] = type_to_idx['char'] dataset.get_idx_seq() def collect_valid_data(training_data, test_size=0.10): if any(['x_train' not in data or 'y_train' not in data for data in training_data]): err_msg = "Argument `training_data` must contain the keys 'x_train' and 'y_train'" LOGGER.error("ValueError: %s", err_msg) raise ValueError(err_msg) for i, data in enumerate(training_data): x_train_word, x_valid_word, x_train_char, x_valid_char, y_train, y_valid = train_test_split(data['x_train'][0], data['x_train'][1], data['y_train'], test_size=test_size, random_state=42, shuffle=False) training_data[i] = {'x_train': [x_train_word, x_train_char], 'x_valid': [x_valid_word, x_valid_char], 'y_train': y_train, 'y_valid': y_valid, 'x_test': data['x_test'], 'y_test': data['y_test'], } return training_data def get_train_valid_indices(training_data, k_folds): train_valid_indices = [] kf = KFold(n_splits=k_folds, random_state=42) for i, _ in enumerate(training_data): X, _ = training_data[i]['x_train'] train_valid_indices.append([(ti, vi) for ti, vi in kf.split(X)]) return train_valid_indices
MIT License
icb-dcm/pypesto
pypesto/objective/pre_post_process.py
FixedParametersProcessor.reduce
python
def reduce(self, x: np.ndarray) -> np.ndarray: x = super().reduce(x) if x.size: return x[self.x_free_indices] else: return x
Embed simulation vector to subsetted vector with optimization parameters.
https://github.com/icb-dcm/pypesto/blob/fb2be819b41411dc1686d8429ce5efa6c535b70b/pypesto/objective/pre_post_process.py#L117-L126
import numpy as np from typing import Dict, Sequence from .constants import GRAD, HESS, RES, SRES class PrePostProcessor: def __init__(self): pass def preprocess( self, x: np.ndarray ) -> np.ndarray: return x def postprocess( self, result: Dict ) -> Dict: result = PrePostProcessor.as_ndarrays(result) return result def reduce( self, x: np.ndarray ) -> np.ndarray: return x @staticmethod def as_ndarrays( result: Dict ) -> Dict: keys = [GRAD, HESS, RES, SRES] for key in keys: if key in result: value = result[key] if value is not None: result[key] = np.array(value) return result class FixedParametersProcessor(PrePostProcessor): def __init__(self, dim_full: int, x_free_indices: Sequence[int], x_fixed_indices: Sequence[int], x_fixed_vals: Sequence[float]): super().__init__() self.dim_full: int = dim_full self.x_free_indices: np.ndarray = np.array(x_free_indices, dtype=int) self.x_fixed_indices: np.ndarray = np.array(x_fixed_indices, dtype=int) self.x_fixed_vals: np.ndarray = np.array(x_fixed_vals, dtype=float) def preprocess(self, x: np.ndarray) -> np.ndarray: x = super().preprocess(x) x_full = np.zeros(self.dim_full) x_full[self.x_free_indices] = x x_full[self.x_fixed_indices] = self.x_fixed_vals return x_full
BSD 3-Clause New or Revised License
noxrepo/pox
pox/lib/packet/icmpv6.py
icmp_base._init_
python
def _init_ (self): pass
Called during initialization Override me In most other hierarchies that follow a similar pattern, this method would be named "_init", but that name is already used in the packet_base hierarchy.
https://github.com/noxrepo/pox/blob/5f82461e01f8822bd7336603b361bff4ffbd2380/pox/lib/packet/icmpv6.py#L423-L433
import struct import random from .packet_utils import * from .packet_base import packet_base from pox.lib.addresses import IPAddr6,EthAddr from pox.lib.util import hexdump, init_helper TYPE_DEST_UNREACH = 1 TYPE_PACKET_TOO_BIG = 2 TYPE_TIME_EXCEED = 3 TYPE_PARAM_PROB = 4 TYPE_ECHO_REQUEST = 128 TYPE_ECHO_REPLY = 129 TYPE_MC_LISTENER_QUERY = 130 TYPE_MC_LISTENER_REPORT = 131 TYPE_MC_LISTENER_DONE = 132 TYPE_ROUTER_SOLICITATION = 133 TYPE_ROUTER_ADVERTISEMENT = 134 TYPE_NEIGHBOR_SOLICITATION = 135 TYPE_NEIGHBOR_ADVERTISEMENT = 136 TYPE_REDIRECT = 137 TYPE_ROUTER_RENUMBER = 138 TYPE_MC_LISTENER_REPORT_V2 = 143 TYPE_MRD_ADVERTISEMENT = 151 TYPE_MRD_SOLICITATION = 152 TYPE_MRD_TERMINATION = 153 CODE_UNREACH_NO_ROUTE = 0 CODE_UNREACH_ADMIN_PROHIBIT = 1 CODE_UNREACH_BEYOND_SRC_SCOPE = 2 CODE_UNREACH_ADDR_UNREACHABLE = 3 CODE_UNREACH_PORT_UNREACHABLE = 4 CODE_UNREACH_SRC_POLICY_FAIL = 5 CODE_UNREACH_DST_ROUTE_REJECT = 6 CODE_UNREACH_SRC_ROUTE_ERROR = 7 CODE_TIME_HOP_EXCEEDED = 0 CODE_TIME_FRAG_TIME_EXCEEDED = 1 CODE_PARAM_BAD_HEADER = 0 CODE_PARAM_BAD_NEXT_HEADER = 1 CODE_PARAM_BAD_OPTION = 2 _type_to_name = { 1 : "TYPE_DEST_UNREACH", 2 : "TYPE_PACKET_TOO_BIG", 3 : "TYPE_TIME_EXCEED", 4 : "TYPE_PARAM_PROB", 128 : "TYPE_ECHO_REQUEST", 129 : "TYPE_ECHO_REPLY", 130 : "TYPE_MC_LISTENER_QUERY", 131 : "TYPE_MC_LISTENER_REPORT", 132 : "TYPE_MC_LISTENER_DONE", 133 : "TYPE_ROUTER_SOLICITATION", 134 : "TYPE_ROUTER_ADVERTISEMENT", 135 : "TYPE_NEIGHBOR_SOLICITATION", 136 : "TYPE_NEIGHBOR_ADVERTISEMENT", 137 : "TYPE_REDIRECT", 138 : "TYPE_ROUTER_RENUMBER", 143 : "TYPE_MC_LISTENER_REPORT_V2", 151 : "TYPE_MRD_ADVERTISEMENT", 152 : "TYPE_MRD_SOLICITATION", 153 : "TYPE_MRD_TERMINATION", } _nd_options = {} def nd_option_def (cls): _nd_options[cls.TYPE] = cls return cls def _parse_ndp_options (raw, prev, offset = 0, buf_len = None): _offset = offset if buf_len is None: buf_len = len(raw) remaining = buf_len - offset r = [] while offset < buf_len - 2: if (buf_len - offset) % 8 != 0: raise RuntimeError("Bad option data length") offset,o = NDOptionBase.unpack_new(raw, offset, buf_len, prev=prev) r.append(o) return offset,r class NDOptionBase (packet_base): def __init__ (self, *args, **kw): self.prev = kw.pop('prev', None) self._init(*args, **kw) init_helper(self, kw) def __repr__ (self): s = type(self).__name__ if s.startswith("NDOption"): s = s[8:] elif s.startswith("NDOpt"): s = s[5:] ss = self._fields() if ss: s += ' ' s += " ".join(["%s:%s" % (k,v) for k,v in ss.items()]) return "[" + s + "]" @property def type (self): return self.prev.type @property def code (self): return self.prev.code def _fields (self): return None def _init (self, *args, **kw): pass def __len__ (self): assert self.LENGTH is not None return self.LENGTH @staticmethod def unpack_new (raw, offset = 0, buf_len = None, prev = None): if buf_len is None: buf_len = len(raw) if buf_len < 2: raise TruncatedException() t,l = struct.unpack_from("BB", raw, offset) if l == 0: raise RuntimeError("Zero-length NDP option") offset += 2 length_bytes = l * 8 - 2 if (buf_len - offset) < length_bytes: raise TruncatedException() c = _nd_options.get(t) if c is None: c = NDOptionGeneric if c.LENGTH is not None and c.LENGTH != length_bytes: raise RuntimeError("Bad length for NDP option") new_off,o = c._unpack_new(raw, offset, t, length_bytes, prev=prev) assert new_off == offset+length_bytes return new_off,o def pack (self): d = self._pack_body() while (len(d)+2) % 8: d += "\x00" return struct.pack("BB", self.TYPE, (len(d)+2)//8) + d @classmethod def _unpack_new (cls, raw, offset, t, length, prev): raise RuntimeError("Not implemented") def _pack_body (self): raise RuntimeError("Not implemented") class NDOptionGeneric (NDOptionBase): LENGTH = None TYPE = None def __repr__ (self): return "<NDP Option Type %s>" % (self.TYPE,) def _init (self, *args, **kw): self.raw = b'' def __len__ (self): return len(self.raw) def _pack_body (self): return self.raw @classmethod def _unpack_new (cls, raw, offset, t, length, prev): o = cls() o._init() o.TYPE = t o.prev = prev o.raw = raw[offset:offset+length] return offset+length,o class NDOptLinkLayerAddress (NDOptionBase): LENGTH = 6 def _init (self, *args, **kw): a = kw.pop('address',None) if a is None: self.address = None else: self.address = EthAddr(a) def _fields (self): return {'addr':self.address} @classmethod def _unpack_new (cls, raw, offset, t, length, prev): return offset+length,cls(address = EthAddr(raw[offset:offset+length]), prev=prev) def _pack_body (self): return self.address.raw @nd_option_def class NDOptSourceLinkLayerAddress (NDOptLinkLayerAddress): TYPE = 1 @nd_option_def class NDOptTargetLinkLayerAddress (NDOptLinkLayerAddress): TYPE = 2 @nd_option_def class NDOptPrefixInformation (NDOptionBase): LENGTH = 1 + 1 + 4 + 4 + 4 + 4 * 4 TYPE = 3 ON_LINK_FLAG = 0x80 AUTONOMOUS_FLAG = 0x40 def _init (self, *args, **kw): self.prefix_length = 0 self.on_link = False self.is_autonomous = False self.valid_lifetime = 0 self.preferred_lifetime = 0 self.prefix = IPAddr6.UNDEFINED def _fields (self): r = {} if self.on_link: r['on_link'] = True if self.is_autonomous: r['autonomous'] = True r['valid'] = self.valid_lifetime r['preferred'] = self.preferred_lifetime r['prefix'] = "%s/%s" % (self.prefix, self.prefix_length) return r @classmethod def _unpack_new (cls, raw, offset, t, length, prev): o = cls() o.prefix_length,flags,o.valid_lifetime,o.preferred_lifetime = struct.unpack_from('!BBII', raw, offset) offset += 1 + 1 + 4 + 4 offset += 4 o.prefix = IPAddr6(raw=raw[offset:offset+16]) offset += 16 o.on_link = (flags & cls.ON_LINK_FLAG) != 0 o.is_autonomous = (flags & cls.AUTONOMOUS_FLAG) != 0 o.prev = prev return offset,o @property def flags (self): f = 0 if self.on_link: f |= self.ON_LINK_FLAG if self.is_autonomous: f |= self.AUTONOMOUS_FLAG return f def pack (self): s = struct.pack("!BBII", self.prefix_length, self.flags, self.valid_lifetime,self.preferred_lifetime) s += '\x00' * 4 s += self.prefix.raw return s @nd_option_def class NDOptMTU (NDOptionBase): LENGTH = 6 TYPE = 5 def _init (self, *args, **kw): self.mtu = 0 def _fields (self): return {'mtu':self.mtu} @classmethod def _unpack_new (cls, raw, offset, t, length, prev): o = cls() o.prev = prev _,o.mtu = struct.unpack_from('!HI', raw, offset) offset += 2 + 4 return offset,o def pack (self): return struct.pack("!HI", 0, self.mtu) class icmp_base (packet_base): def __str__ (self): s = "[ICMPv6/" + self.__class__.__name__ ss = self._fields() if ss: s += ' ' s += " ".join(["%s:%s" % (k,v) for k,v in ss.items()]) return s + "]" def _fields (self): return {}
Apache License 2.0
mcouthon/r2k
r2k/cli/feed/feed_show.py
feed_show
python
def feed_show() -> None: if config.feeds: feeds = yaml.safe_dump(config.feeds) logger.info("Here are the existing feeds:") logger.secho(feeds, fg="white", bold=False) else: logger.info("There are no feeds available.\nAdd more by running `r2k feeds import/add`")
List all existing RSS feeds.
https://github.com/mcouthon/r2k/blob/10db38d965c5883d4bf129584880a80a020e9509/r2k/cli/feed/feed_show.py#L10-L17
import click import yaml from r2k.cli import cli_utils, logger from r2k.config import config @click.command("show") @cli_utils.config_path_option()
MIT License
facebookresearch/pycls
pycls/models/resnet.py
ResNet.complexity
python
def complexity(cx): if "cifar" in cfg.TRAIN.DATASET: d = int((cfg.MODEL.DEPTH - 2) / 6) cx = ResStemCifar.complexity(cx, 3, 16) cx = ResStage.complexity(cx, 16, 16, stride=1, d=d) cx = ResStage.complexity(cx, 16, 32, stride=2, d=d) cx = ResStage.complexity(cx, 32, 64, stride=2, d=d) cx = ResHead.complexity(cx, 64, cfg.MODEL.NUM_CLASSES) else: g, gw = cfg.RESNET.NUM_GROUPS, cfg.RESNET.WIDTH_PER_GROUP (d1, d2, d3, d4) = _IN_STAGE_DS[cfg.MODEL.DEPTH] w_b = gw * g cx = ResStemIN.complexity(cx, 3, 64) cx = ResStage.complexity(cx, 64, 256, 1, d=d1, w_b=w_b, groups=g) cx = ResStage.complexity(cx, 256, 512, 2, d=d2, w_b=w_b * 2, groups=g) cx = ResStage.complexity(cx, 512, 1024, 2, d=d3, w_b=w_b * 4, groups=g) cx = ResStage.complexity(cx, 1024, 2048, 2, d=d4, w_b=w_b * 8, groups=g) cx = ResHead.complexity(cx, 2048, cfg.MODEL.NUM_CLASSES) return cx
Computes model complexity. If you alter the model, make sure to update.
https://github.com/facebookresearch/pycls/blob/8c79a8e2adfffa7cae3a88aace28ef45e52aa7e5/pycls/models/resnet.py#L268-L287
from pycls.core.config import cfg from pycls.models.blocks import ( activation, conv2d, conv2d_cx, gap2d, gap2d_cx, init_weights, linear, linear_cx, norm2d, norm2d_cx, pool2d, pool2d_cx, ) from torch.nn import Module _IN_STAGE_DS = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3), 152: (3, 8, 36, 3)} def get_trans_fun(name): trans_funs = { "basic_transform": BasicTransform, "bottleneck_transform": BottleneckTransform, } err_str = "Transformation function '{}' not supported" assert name in trans_funs.keys(), err_str.format(name) return trans_funs[name] class ResHead(Module): def __init__(self, w_in, num_classes): super(ResHead, self).__init__() self.avg_pool = gap2d(w_in) self.fc = linear(w_in, num_classes, bias=True) def forward(self, x): x = self.avg_pool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x @staticmethod def complexity(cx, w_in, num_classes): cx = gap2d_cx(cx, w_in) cx = linear_cx(cx, w_in, num_classes, bias=True) return cx class BasicTransform(Module): def __init__(self, w_in, w_out, stride, w_b=None, groups=1): err_str = "Basic transform does not support w_b and groups options" assert w_b is None and groups == 1, err_str super(BasicTransform, self).__init__() self.a = conv2d(w_in, w_out, 3, stride=stride) self.a_bn = norm2d(w_out) self.a_af = activation() self.b = conv2d(w_out, w_out, 3) self.b_bn = norm2d(w_out) self.b_bn.final_bn = True def forward(self, x): for layer in self.children(): x = layer(x) return x @staticmethod def complexity(cx, w_in, w_out, stride, w_b=None, groups=1): err_str = "Basic transform does not support w_b and groups options" assert w_b is None and groups == 1, err_str cx = conv2d_cx(cx, w_in, w_out, 3, stride=stride) cx = norm2d_cx(cx, w_out) cx = conv2d_cx(cx, w_out, w_out, 3) cx = norm2d_cx(cx, w_out) return cx class BottleneckTransform(Module): def __init__(self, w_in, w_out, stride, w_b, groups): super(BottleneckTransform, self).__init__() (s1, s3) = (stride, 1) if cfg.RESNET.STRIDE_1X1 else (1, stride) self.a = conv2d(w_in, w_b, 1, stride=s1) self.a_bn = norm2d(w_b) self.a_af = activation() self.b = conv2d(w_b, w_b, 3, stride=s3, groups=groups) self.b_bn = norm2d(w_b) self.b_af = activation() self.c = conv2d(w_b, w_out, 1) self.c_bn = norm2d(w_out) self.c_bn.final_bn = True def forward(self, x): for layer in self.children(): x = layer(x) return x @staticmethod def complexity(cx, w_in, w_out, stride, w_b, groups): (s1, s3) = (stride, 1) if cfg.RESNET.STRIDE_1X1 else (1, stride) cx = conv2d_cx(cx, w_in, w_b, 1, stride=s1) cx = norm2d_cx(cx, w_b) cx = conv2d_cx(cx, w_b, w_b, 3, stride=s3, groups=groups) cx = norm2d_cx(cx, w_b) cx = conv2d_cx(cx, w_b, w_out, 1) cx = norm2d_cx(cx, w_out) return cx class ResBlock(Module): def __init__(self, w_in, w_out, stride, trans_fun, w_b=None, groups=1): super(ResBlock, self).__init__() self.proj, self.bn = None, None if (w_in != w_out) or (stride != 1): self.proj = conv2d(w_in, w_out, 1, stride=stride) self.bn = norm2d(w_out) self.f = trans_fun(w_in, w_out, stride, w_b, groups) self.af = activation() def forward(self, x): x_p = self.bn(self.proj(x)) if self.proj else x return self.af(x_p + self.f(x)) @staticmethod def complexity(cx, w_in, w_out, stride, trans_fun, w_b, groups): if (w_in != w_out) or (stride != 1): h, w = cx["h"], cx["w"] cx = conv2d_cx(cx, w_in, w_out, 1, stride=stride) cx = norm2d_cx(cx, w_out) cx["h"], cx["w"] = h, w cx = trans_fun.complexity(cx, w_in, w_out, stride, w_b, groups) return cx class ResStage(Module): def __init__(self, w_in, w_out, stride, d, w_b=None, groups=1): super(ResStage, self).__init__() for i in range(d): b_stride = stride if i == 0 else 1 b_w_in = w_in if i == 0 else w_out trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN) res_block = ResBlock(b_w_in, w_out, b_stride, trans_fun, w_b, groups) self.add_module("b{}".format(i + 1), res_block) def forward(self, x): for block in self.children(): x = block(x) return x @staticmethod def complexity(cx, w_in, w_out, stride, d, w_b=None, groups=1): for i in range(d): b_stride = stride if i == 0 else 1 b_w_in = w_in if i == 0 else w_out trans_f = get_trans_fun(cfg.RESNET.TRANS_FUN) cx = ResBlock.complexity(cx, b_w_in, w_out, b_stride, trans_f, w_b, groups) return cx class ResStemCifar(Module): def __init__(self, w_in, w_out): super(ResStemCifar, self).__init__() self.conv = conv2d(w_in, w_out, 3) self.bn = norm2d(w_out) self.af = activation() def forward(self, x): for layer in self.children(): x = layer(x) return x @staticmethod def complexity(cx, w_in, w_out): cx = conv2d_cx(cx, w_in, w_out, 3) cx = norm2d_cx(cx, w_out) return cx class ResStemIN(Module): def __init__(self, w_in, w_out): super(ResStemIN, self).__init__() self.conv = conv2d(w_in, w_out, 7, stride=2) self.bn = norm2d(w_out) self.af = activation() self.pool = pool2d(w_out, 3, stride=2) def forward(self, x): for layer in self.children(): x = layer(x) return x @staticmethod def complexity(cx, w_in, w_out): cx = conv2d_cx(cx, w_in, w_out, 7, stride=2) cx = norm2d_cx(cx, w_out) cx = pool2d_cx(cx, w_out, 3, stride=2) return cx class ResNet(Module): def __init__(self): datasets = ["cifar10", "imagenet"] err_str = "Dataset {} is not supported" assert cfg.TRAIN.DATASET in datasets, err_str.format(cfg.TRAIN.DATASET) assert cfg.TEST.DATASET in datasets, err_str.format(cfg.TEST.DATASET) super(ResNet, self).__init__() if "cifar" in cfg.TRAIN.DATASET: self._construct_cifar() else: self._construct_imagenet() self.apply(init_weights) def _construct_cifar(self): err_str = "Model depth should be of the format 6n + 2 for cifar" assert (cfg.MODEL.DEPTH - 2) % 6 == 0, err_str d = int((cfg.MODEL.DEPTH - 2) / 6) self.stem = ResStemCifar(3, 16) self.s1 = ResStage(16, 16, stride=1, d=d) self.s2 = ResStage(16, 32, stride=2, d=d) self.s3 = ResStage(32, 64, stride=2, d=d) self.head = ResHead(64, cfg.MODEL.NUM_CLASSES) def _construct_imagenet(self): g, gw = cfg.RESNET.NUM_GROUPS, cfg.RESNET.WIDTH_PER_GROUP (d1, d2, d3, d4) = _IN_STAGE_DS[cfg.MODEL.DEPTH] w_b = gw * g self.stem = ResStemIN(3, 64) self.s1 = ResStage(64, 256, stride=1, d=d1, w_b=w_b, groups=g) self.s2 = ResStage(256, 512, stride=2, d=d2, w_b=w_b * 2, groups=g) self.s3 = ResStage(512, 1024, stride=2, d=d3, w_b=w_b * 4, groups=g) self.s4 = ResStage(1024, 2048, stride=2, d=d4, w_b=w_b * 8, groups=g) self.head = ResHead(2048, cfg.MODEL.NUM_CLASSES) def forward(self, x): for module in self.children(): x = module(x) return x @staticmethod
MIT License
sylvainde/didyoumean-python
didyoumean/didyoumean_sugg_tests.py
PythonEnvRange.contains_current_env
python
def contains_current_env(self): return version_in_range(self.version_range) and interpreter_in(self.interpreters)
Check if current environment is in PythonEnvRange object.
https://github.com/sylvainde/didyoumean-python/blob/cd34238829349b652a3668413c8b8f2524fab0d9/didyoumean/didyoumean_sugg_tests.py#L285-L288
from didyoumean_internal import get_suggestions_for_exception, quote, STAND_MODULES, AVOID_REC_MSG, APPLY_REMOVED_MSG, BUFFER_REMOVED_MSG, CMP_REMOVED_MSG, CMP_ARG_REMOVED_MSG, EXC_ATTR_REMOVED_MSG, LONG_REMOVED_MSG, MEMVIEW_ADDED_MSG, RELOAD_REMOVED_MSG, STDERR_REMOVED_MSG, BREAKPOINT_ADDED_MSG, NO_KEYWORD_ARG_MSG, COMMA_INSTEAD_OF_PERIOD_MSG import didyoumean_common_tests as common import didyoumean_re as re import warnings import sys import math import os import tempfile from shutil import rmtree unittest_module = common.unittest_module exc_history = None this_is_a_global_list = [] initial_recursion_limit = sys.getrecursionlimit() def indent_code(string, tab="\t"): return ''.join(tab + l for l in string.splitlines(True)) def func_gen(name='some_func', param='', body='pass', args=None): function = "def {0}({1}):\n{2}\n".format(name, param, indent_code(body)) if args is None: return function else: call = "{0}({1})\n".format(name, args) return function + call def meth_gen(class_name='MyClass', name='some_method', param='', body='pass', args=None): indent_body = indent_code(body, tab="\t\t") method = "def {0}({1}):\n{2}\n".format(name, param, indent_code(body)) class_def = "class {0}():\n{1}\n".format(class_name, indent_code(method)) if args is None: return class_def else: call = "{0}().{1}({2})\n".format(class_name, name, args) return class_def + call def my_generator(): for i in range(5): yield i def endlessly_recursive_func(n): return endlessly_recursive_func(n-1) class FoobarClass(): def __init__(self): self.babar = 2 @classmethod def this_is_cls_mthd(cls): return 5 def nameerror_self(self): return babar def nameerror_self2(self): return this_is_cls_mthd @classmethod def nameerror_cls(cls): return this_is_cls_mthd def some_method(self): pass def some_method2(self, x): pass def _some_semi_private_method(self): pass def __some_private_method(self): pass def some_method_missing_self_arg(): pass def some_method_missing_self_arg2(x): pass @classmethod def some_cls_method_missing_cls(): pass @classmethod def some_cls_method_missing_cls2(x): pass class CustomClass(): pass class IndexClass(): def __index__(self): return 2 class CallClass(): def __call__(self): return 0 class GetItemClass(): def __getitem__(self, key): return 0 class DelItemClass(): def __delitem__(self, key): pass class SetItemClass(): def __setitem__(self, key, val): pass class LenClass(): def __len__(self): return 0 FIRST_VERSION = (0, 0) LAST_VERSION = (10, 0) ALL_VERSIONS = (FIRST_VERSION, LAST_VERSION) INTERPRETERS = ['cpython', 'pypy'] def from_version(version): return (version, LAST_VERSION) def up_to_version(version): return (FIRST_VERSION, version) def before_and_after(version): return up_to_version(version), from_version(version) def before_mid_and_after(vers1, vers2): assert vers1 < vers2 return up_to_version(vers1), (vers1, vers2), from_version(vers2) def ranges_between(*versions): first = up_to_version(versions[0]) mid = list(zip(versions, versions[1:])) last = from_version(versions[-1]) return [first] + mid + [last] def version_in_range(version_range): beg, end = version_range return beg <= sys.version_info < end def interpreter_in(interpreters): is_pypy = hasattr(sys, "pypy_translation_info") interpreter = 'pypy' if is_pypy else 'cpython' return interpreter in interpreters def format_str(template, *args): return [template.format(arg) for arg in args] class PythonEnvRange(object): def __init__(self, version_range=None, interpreters=None): self.interpreters = listify(interpreters, INTERPRETERS, str) self.version_range = ALL_VERSIONS if version_range is None else version_range
MIT License
xrplf/xrpl-py
xrpl/core/addresscodec/codec.py
encode_seed
python
def encode_seed(entropy: bytes, encoding_type: CryptoAlgorithm) -> str: if len(entropy) != SEED_LENGTH: raise XRPLAddressCodecException(f"Entropy must have length {SEED_LENGTH}") if encoding_type not in CryptoAlgorithm: raise XRPLAddressCodecException( f"Encoding type must be one of {CryptoAlgorithm}" ) prefix = _ALGORITHM_TO_PREFIX_MAP[encoding_type] return _encode(entropy, prefix, SEED_LENGTH)
Returns an encoded seed. Args: entropy: Entropy bytes of SEED_LENGTH. encoding_type: Either ED25519 or SECP256K1. Returns: An encoded seed. Raises: XRPLAddressCodecException: If entropy is not of length SEED_LENGTH or the encoding type is not one of CryptoAlgorithm.
https://github.com/xrplf/xrpl-py/blob/3635339bfb579353e56f126bbcf303d931b26d65/xrpl/core/addresscodec/codec.py#L65-L88
from typing import Dict, List, Tuple import base58 from typing_extensions import Final from xrpl.constants import CryptoAlgorithm from xrpl.core.addresscodec.exceptions import XRPLAddressCodecException from xrpl.core.addresscodec.utils import XRPL_ALPHABET _CLASSIC_ADDRESS_PREFIX: Final[List[int]] = [0x0] _ACCOUNT_PUBLIC_KEY_PREFIX: Final[List[int]] = [0x23] _FAMILY_SEED_PREFIX: Final[List[int]] = [0x21] _NODE_PUBLIC_KEY_PREFIX: Final[List[int]] = [0x1C] _ED25519_SEED_PREFIX: Final[List[int]] = [0x01, 0xE1, 0x4B] SEED_LENGTH: Final[int] = 16 _CLASSIC_ADDRESS_LENGTH: Final[int] = 20 _NODE_PUBLIC_KEY_LENGTH: Final[int] = 33 _ACCOUNT_PUBLIC_KEY_LENGTH: Final[int] = 33 _ALGORITHM_TO_PREFIX_MAP: Final[Dict[CryptoAlgorithm, List[int]]] = { CryptoAlgorithm.ED25519: _ED25519_SEED_PREFIX, CryptoAlgorithm.SECP256K1: _FAMILY_SEED_PREFIX, } def _encode(bytestring: bytes, prefix: List[int], expected_length: int) -> str: if expected_length and len(bytestring) != expected_length: error_message = """unexpected_payload_length: len(bytestring) does not match expected_length. Ensure that the bytes are a bytestring.""" raise XRPLAddressCodecException(error_message) encoded_prefix = bytes(prefix) payload = encoded_prefix + bytestring return base58.b58encode_check(payload, alphabet=XRPL_ALPHABET).decode("utf-8") def _decode(b58_string: str, prefix: bytes) -> bytes: prefix_length = len(prefix) decoded = base58.b58decode_check(b58_string, alphabet=XRPL_ALPHABET) if decoded[:prefix_length] != prefix: raise XRPLAddressCodecException("Provided prefix is incorrect") return decoded[prefix_length:]
ISC License
stanford-iprl-lab/torchfilter
torchfilter/data/_single_step_dataset.py
SingleStepDataset.__getitem__
python
def __getitem__( self, index: int ) -> Tuple[ types.StatesNumpy, types.StatesNumpy, types.ObservationsNumpy, types.ControlsNumpy, ]: return self.samples[index]
Get a single-step prediction sample from our dataset. Args: index (int): Subsequence number in our dataset. Returns: tuple: `(previous_state, state, observation, control)` tuple that contains data for a single subsequence. Each tuple member should be either a numpy array or dict of numpy arrays with shape `(subsequence_length, ...)`.
https://github.com/stanford-iprl-lab/torchfilter/blob/da0250baf2197f59b6e67f37cafdd63015380cbb/torchfilter/data/_single_step_dataset.py#L44-L61
from typing import List, Tuple import fannypack as fp from torch.utils.data import Dataset from .. import types class SingleStepDataset(Dataset): def __init__(self, trajectories: List[types.TrajectoryNumpy]): self.samples: List[ Tuple[ types.StatesNumpy, types.StatesNumpy, types.ObservationsNumpy, types.ControlsNumpy, ] ] = [] for traj in trajectories: T = len(traj.states) for t in range(T - 1): self.samples.append( ( traj.states[t], traj.states[t + 1], fp.utils.SliceWrapper(traj.observations)[t + 1], fp.utils.SliceWrapper(traj.controls)[t + 1], ) )
MIT License
stackstorm/st2contrib
packs/duo/actions/auth_ping.py
AuthPingAction.run
python
def run(self): try: data = self.duo_auth.ping() except RuntimeError, e: self.send_user_error("Ping failed! %s" % e) raise ValueError("Ping failed! %s" % e) else: return data
Ping the Duo Platorm. Returns: An dict with info returned by Duo. Raises: RuntimeError: On ping failure.
https://github.com/stackstorm/st2contrib/blob/095b021a31ba134728deb7c707240196d016e729/packs/duo/actions/auth_ping.py#L22-L38
from lib.actions import AuthBaseAction class AuthPingAction(AuthBaseAction):
Apache License 2.0
wintoncode/winton-kafka-streams
winton_kafka_streams/processor/_context.py
Context.send
python
def send(self, topic, key, obj): print(f"Send {obj} to {topic}") pass
Send the key value-pair to a Kafka topic
https://github.com/wintoncode/winton-kafka-streams/blob/5867a1c42fc80bba07173fd1d004b2849b429fdf/winton_kafka_streams/processor/_context.py#L37-L43
import functools import logging from typing import Any, Callable from winton_kafka_streams.state.key_value_state_store import KeyValueStateStore from ..errors.kafka_streams_error import KafkaStreamsError log = logging.getLogger(__name__) def _raise_if_null_record(fn: Callable[..., Any]) -> Callable[..., Any]: @functools.wraps(fn) def _inner(*args, **kwargs): if args[0].current_record is None: raise KafkaStreamsError(f"Record cannot be unset when retrieving {fn.__name__}") return fn(*args, **kwargs) return _inner class Context: def __init__(self, _state_record_collector, _state_stores): self.current_node = None self.current_record = None self.state_record_collector = _state_record_collector self._state_stores = _state_stores
Apache License 2.0
graphsense/graphsense-rest
openapi_server/models/stats_version.py
StatsVersion.timestamp
python
def timestamp(self): return self._timestamp
Gets the timestamp of this StatsVersion. :return: The timestamp of this StatsVersion. :rtype: str
https://github.com/graphsense/graphsense-rest/blob/2e4a9c20835e54d971e3fc3aae5780bc87d48647/openapi_server/models/stats_version.py#L115-L122
from __future__ import absolute_import from datetime import date, datetime from typing import List, Dict from openapi_server.models.base_model_ import Model from openapi_server import util class StatsVersion(Model): def __init__(self, nr=None, hash=None, timestamp=None, file=None): self.openapi_types = { 'nr': str, 'hash': str, 'timestamp': str, 'file': str } self.attribute_map = { 'nr': 'nr', 'hash': 'hash', 'timestamp': 'timestamp', 'file': 'file' } self._nr = nr self._hash = hash self._timestamp = timestamp self._file = file @classmethod def from_dict(cls, dikt) -> 'StatsVersion': return util.deserialize_model(dikt, cls) def to_dict(self, prefix=""): return { 'nr': self._nr, 'hash': self._hash, 'timestamp': self._timestamp, 'file': self._file } @property def nr(self): return self._nr @nr.setter def nr(self, nr): self._nr = nr @property def hash(self): return self._hash @hash.setter def hash(self, hash): self._hash = hash @property
MIT License
lumaku/ctc-segmentation
ctc_segmentation/ctc_segmentation.py
prepare_token_list
python
def prepare_token_list(config, text): ground_truth = [-1] utt_begin_indices = [] for utt in text: if not ground_truth[-1] == config.blank: ground_truth += [config.blank] utt_begin_indices.append(len(ground_truth) - 1) ground_truth += utt.tolist() if not ground_truth[-1] == config.blank: ground_truth += [config.blank] logging.debug(f"ground_truth: {ground_truth}") utt_begin_indices.append(len(ground_truth) - 1) ground_truth_mat = np.array(ground_truth, dtype=np.int64).reshape(-1, 1) return ground_truth_mat, utt_begin_indices
Prepare the given token list for CTC segmentation. This function expects the text input in form of a list of numpy arrays: [np.array([2, 5]), np.array([7, 9])] :param config: an instance of CtcSegmentationParameters :param text: list of numpy arrays with tokens :return: label matrix, character index matrix
https://github.com/lumaku/ctc-segmentation/blob/6f1e89c518c9ecd5b178b22c046aac5eaec43976/ctc_segmentation/ctc_segmentation.py#L328-L357
import logging import numpy as np try: from .ctc_segmentation_dyn import cython_fill_table except ImportError: import pyximport pyximport.install(setup_args={"include_dirs": np.get_include()}) from .ctc_segmentation_dyn import cython_fill_table class CtcSegmentationParameters: max_prob = -10000000000.0 skip_prob = -10000000000.0 min_window_size = 8000 max_window_size = 100000 index_duration = 0.025 score_min_mean_over_L = 30 space = "·" blank = 0 replace_spaces_with_blanks = False blank_transition_cost_zero = False preamble_transition_cost_zero = True backtrack_from_max_t = False self_transition = "ε" start_of_ground_truth = "#" excluded_characters = ".,»«•❍·" tokenized_meta_symbol = "▁" char_list = None subsampling_factor = None frame_duration_ms = None @property def index_duration_in_seconds(self): if self.subsampling_factor and self.frame_duration_ms: t = self.frame_duration_ms * self.subsampling_factor / 1000 else: t = self.index_duration return t @property def flags(self): flags = int(self.blank_transition_cost_zero) flags += 2 * int(self.preamble_transition_cost_zero) return flags def update_excluded_characters(self): self.excluded_characters = "".join( [ char for char in self.excluded_characters if True not in [char == j for j in self.char_list] ] ) logging.debug(f"Excluded characters: {self.excluded_characters}") def __init__(self, **kwargs): self.set(**kwargs) def set(self, **kwargs): for key in kwargs: if ( not key.startswith("_") and hasattr(self, key) and kwargs[key] is not None ): setattr(self, key, kwargs[key]) def __repr__(self): output = "CtcSegmentationParameters( " for attribute in self.__dict__.keys(): value = self.__dict__[attribute] output += f"{attribute}={value}, " output += ")" return output def ctc_segmentation(config, lpz, ground_truth): blank = config.blank offset = 0 audio_duration = lpz.shape[0] * config.index_duration_in_seconds logging.info( f"CTC segmentation of {len(ground_truth)} chars " f"to {audio_duration:.2f}s audio " f"({lpz.shape[0]} indices)." ) if len(ground_truth) > lpz.shape[0] and config.skip_prob <= config.max_prob: raise AssertionError("Audio is shorter than text!") window_size = config.min_window_size while True: table = np.zeros( [min(window_size, lpz.shape[0]), len(ground_truth)], dtype=np.float32 ) table.fill(config.max_prob) offsets = np.zeros([len(ground_truth)], dtype=np.int64) t, c = cython_fill_table( table, lpz.astype(np.float32), np.array(ground_truth, dtype=np.int64), np.array(offsets, dtype=np.int64), config.blank, config.flags, ) if config.backtrack_from_max_t: t = table.shape[0] - 1 logging.debug( f"Max. joint probability to align text to audio: " f"{table[:, c].max()} at time index {t}" ) timings = np.zeros([len(ground_truth)]) char_probs = np.zeros([lpz.shape[0]]) state_list = [""] * lpz.shape[0] try: while t != 0 or c != 0: min_s = None min_switch_prob_delta = np.inf max_lpz_prob = config.max_prob for s in range(ground_truth.shape[1]): if ground_truth[c, s] != -1: offset = offsets[c] - (offsets[c - 1 - s] if c - s > 0 else 0) switch_prob = ( lpz[t + offsets[c], ground_truth[c, s]] if c > 0 else config.max_prob ) est_switch_prob = table[t, c] - table[t - 1 + offset, c - 1 - s] if abs(switch_prob - est_switch_prob) < min_switch_prob_delta: min_switch_prob_delta = abs(switch_prob - est_switch_prob) min_s = s max_lpz_prob = max(max_lpz_prob, switch_prob) stay_prob = ( max(lpz[t + offsets[c], blank], max_lpz_prob) if t > 0 else config.max_prob ) est_stay_prob = table[t, c] - table[t - 1, c] if abs(stay_prob - est_stay_prob) > min_switch_prob_delta: if c > 0: for s in range(0, min_s + 1): timings[c - s] = ( offsets[c] + t ) * config.index_duration_in_seconds char_probs[offsets[c] + t] = max_lpz_prob char_index = ground_truth[c, min_s] state_list[offsets[c] + t] = config.char_list[char_index] c -= 1 + min_s t -= 1 - offset else: char_probs[offsets[c] + t] = stay_prob state_list[offsets[c] + t] = config.self_transition t -= 1 except IndexError: logging.warning( "IndexError: Backtracking was not successful, " "the window size might be too small." ) window_size *= 2 if window_size < config.max_window_size: logging.warning("Increasing the window size to: " + str(window_size)) continue else: logging.error("Maximum window size reached.") logging.error("Check data and character list!") raise break return timings, char_probs, state_list def prepare_text(config, text, char_list=None): if type(config.blank) == str: config.blank = 0 if char_list is not None: config.char_list = char_list blank = config.char_list[config.blank] ground_truth = config.start_of_ground_truth utt_begin_indices = [] for utt in text: if not ground_truth.endswith(config.space): ground_truth += config.space utt_begin_indices.append(len(ground_truth) - 1) for char in utt: if char.isspace() and config.replace_spaces_with_blanks: if not ground_truth.endswith(config.space): ground_truth += config.space elif char in config.char_list and char not in config.excluded_characters: ground_truth += char if not ground_truth.endswith(config.space): ground_truth += config.space logging.debug(f"ground_truth: {ground_truth}") utt_begin_indices.append(len(ground_truth) - 1) max_char_len = max([len(c) for c in config.char_list]) ground_truth_mat = np.ones([len(ground_truth), max_char_len], np.int64) * -1 for i in range(len(ground_truth)): for s in range(max_char_len): if i - s < 0: continue span = ground_truth[i - s : i + 1] span = span.replace(config.space, blank) if span in config.char_list: char_index = config.char_list.index(span) ground_truth_mat[i, s] = char_index return ground_truth_mat, utt_begin_indices def prepare_tokenized_text(config, text): ground_truth = [config.start_of_ground_truth] utt_begin_indices = [] for utt in text: if not ground_truth[-1] == config.space: ground_truth += [config.space] utt_begin_indices.append(len(ground_truth) - 1) for token in utt.split(): if token in config.char_list: if config.replace_spaces_with_blanks and not token.beginswith( config.tokenized_meta_symbol ): ground_truth += [config.space] ground_truth += [token] if not ground_truth[-1] == config.space: ground_truth += [config.space] logging.debug(f"ground_truth: {ground_truth}") utt_begin_indices.append(len(ground_truth) - 1) max_char_len = 1 ground_truth_mat = np.ones([len(ground_truth), max_char_len], np.int64) * -1 for i in range(1, len(ground_truth)): if ground_truth[i] == config.space: ground_truth_mat[i, 0] = config.blank else: char_index = config.char_list.index(ground_truth[i]) ground_truth_mat[i, 0] = char_index return ground_truth_mat, utt_begin_indices
Apache License 2.0
imjoy-team/imjoy-engine
imjoy/minio.py
kwarg_to_flag
python
def kwarg_to_flag(**kwargs): _args = [] for _key, _value in kwargs.items(): key = "--" + _key.replace("_", "-") if _value in (True, False): _args.append(key) else: _args.append(f"{key} {_value}") return " ".join(_args)
Convert key arguments into flags.
https://github.com/imjoy-team/imjoy-engine/blob/e529f02ff5ccfb39385a192ef62ee32e1d2bccf6/imjoy/minio.py#L52-L61
import json import logging import os import re import stat import subprocess import sys import tempfile import urllib.request logging.basicConfig(stream=sys.stdout) logger = logging.getLogger("minio") logger.setLevel(logging.INFO) MATH_PATTERN = re.compile("{(.+?)}") EXECUTABLE_PATH = "bin" def setup_minio_executables(): os.makedirs(EXECUTABLE_PATH, exist_ok=True) assert ( sys.platform == "linux" ), "Manual setup required to, please download minio and minio client \ from https://min.io/ and place them under ./bin" mc_path = EXECUTABLE_PATH + "/mc" minio_path = EXECUTABLE_PATH + "/minio" if not os.path.exists(minio_path): print("Minio server executable not found, downloading... ") urllib.request.urlretrieve( "https://dl.min.io/server/minio/release/linux-amd64/minio", minio_path ) if not os.path.exists(mc_path): print("Minio client executable not found, downloading... ") urllib.request.urlretrieve( "https://dl.min.io/client/mc/release/linux-amd64/mc", mc_path ) stat_result = os.stat(minio_path) if not bool(stat_result.st_mode & stat.S_IEXEC): os.chmod(minio_path, stat_result.st_mode | stat.S_IEXEC) stat_result = os.stat(mc_path) if not bool(stat_result.st_mode & stat.S_IEXEC): os.chmod(mc_path, stat_result.st_mode | stat.S_IEXEC) print("MinIO executables are ready.")
MIT License
jcyk/amr-gs
stog/data/iterators/multiprocess_iterator.py
_create_tensor_dicts
python
def _create_tensor_dicts(input_queue: Queue, output_queue: Queue, iterator: DataIterator, shuffle: bool, index: int) -> None: def instances() -> Iterator[Instance]: instance = input_queue.get() while instance is not None: yield instance instance = input_queue.get() for tensor_dict in iterator(instances(), num_epochs=1, shuffle=shuffle): output_queue.put(tensor_dict) output_queue.put(index)
Pulls at most ``max_instances_in_memory`` from the input_queue, groups them into batches of size ``batch_size``, converts them to ``TensorDict`` s, and puts them on the ``output_queue``.
https://github.com/jcyk/amr-gs/blob/5666215b04151cadf121917826376acc16cb8b30/stog/data/iterators/multiprocess_iterator.py#L15-L34
from typing import Iterable, Iterator, List, Optional import logging from torch.multiprocessing import Manager, Process, Queue, get_logger from stog.utils.checks import ConfigurationError from stog.data.instance import Instance from stog.data.iterators.data_iterator import DataIterator, TensorDict from stog.data.dataset import Batch from stog.data.vocabulary import Vocabulary logger = get_logger() logger.setLevel(logging.INFO)
MIT License
tensorflow/tensor2tensor
tensor2tensor/layers/common_video.py
VideoWriter.write_multi
python
def write_multi(self, frames, encoded_frames=None): if encoded_frames is None: encoded_frames = iter(lambda: None, 1) for (frame, encoded_frame) in zip(frames, encoded_frames): self.write(frame, encoded_frame)
Writes multiple video frames.
https://github.com/tensorflow/tensor2tensor/blob/c22a226704e5887862bf9edd9f269892c9016ad4/tensor2tensor/layers/common_video.py#L666-L672
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensor2tensor.layers import common_layers from tensor2tensor.utils import contrib import tensorflow.compat.v1 as tf from tensorflow.python.ops import summary_op_util try: from tensorflow.python.distribute import summary_op_util as distribute_summary_op_util except ImportError: distribute_summary_op_util = summary_op_util tfl = common_layers.layers() def swap_time_and_batch_axes(inputs): transposed_axes = tf.concat([[1, 0], tf.range(2, tf.rank(inputs))], axis=0) return tf.transpose(inputs, transposed_axes) def encode_to_shape(inputs, shape, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): w, h = shape[1], shape[2] x = inputs x = tfl.flatten(x) x = tfl.dense(x, w * h, activation=None, name="enc_dense") x = tf.reshape(x, (-1, w, h, 1)) return x def decode_to_shape(inputs, shape, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): x = inputs x = tfl.flatten(x) x = tfl.dense(x, shape[2], activation=None, name="dec_dense") x = tf.expand_dims(x, axis=1) return x def basic_lstm(inputs, state, num_units, name=None): input_shape = common_layers.shape_list(inputs) cell = tf.nn.rnn_cell.BasicLSTMCell( num_units, name=name, reuse=tf.AUTO_REUSE) if state is None: state = cell.zero_state(input_shape[0], tf.float32) outputs, new_state = cell(inputs, state) return outputs, new_state def lstm_cell(inputs, state, num_units, use_peepholes=False, cell_clip=0.0, initializer=None, num_proj=None, num_unit_shards=None, num_proj_shards=None, reuse=None, name=None): input_shape = common_layers.shape_list(inputs) cell = tf.nn.rnn_cell.LSTMCell(num_units, use_peepholes=use_peepholes, cell_clip=cell_clip, initializer=initializer, num_proj=num_proj, num_unit_shards=num_unit_shards, num_proj_shards=num_proj_shards, reuse=reuse, name=name, state_is_tuple=False) if state is None: state = cell.zero_state(input_shape[0], tf.float32) outputs, new_state = cell(inputs, state) return outputs, new_state def conv_lstm_2d(inputs, state, output_channels, kernel_size=5, name=None, spatial_dims=None): input_shape = common_layers.shape_list(inputs) batch_size, input_channels = input_shape[0], input_shape[-1] if spatial_dims is None: input_shape = input_shape[1:] else: input_shape = spatial_dims + [input_channels] cell = contrib.rnn().ConvLSTMCell( 2, input_shape, output_channels, [kernel_size, kernel_size], name=name) if state is None: state = cell.zero_state(batch_size, tf.float32) outputs, new_state = cell(inputs, state) return outputs, new_state def scheduled_sample_count(ground_truth_x, generated_x, batch_size, scheduled_sample_var): num_ground_truth = scheduled_sample_var idx = tf.random_shuffle(tf.range(batch_size)) ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size)) ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx) generated_examps = tf.gather(generated_x, generated_idx) output = tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps]) if isinstance(batch_size, int): output.set_shape([batch_size] + common_layers.shape_list(output)[1:]) return output def inject_additional_input(layer, inputs, name, mode="concat"): layer_shape = common_layers.shape_list(layer) input_shape = common_layers.shape_list(inputs) zeros_mask = tf.zeros(layer_shape, dtype=tf.float32) if mode == "concat": emb = encode_to_shape(inputs, layer_shape, name) layer = tf.concat(values=[layer, emb], axis=-1) elif mode == "multiplicative": filters = layer_shape[-1] input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]]) input_mask = tf.layers.dense(input_reshaped, filters, name=name) input_broad = input_mask + zeros_mask layer *= input_broad elif mode == "multi_additive": filters = layer_shape[-1] input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]]) input_mul = tf.layers.dense(input_reshaped, filters, name=name + "_mul") layer *= tf.nn.sigmoid(input_mul) input_add = tf.layers.dense(input_reshaped, filters, name=name + "_add") layer += input_add else: raise ValueError("Unknown injection mode: %s" % mode) return layer def scheduled_sample_prob(ground_truth_x, generated_x, batch_size, scheduled_sample_var): probability_threshold = scheduled_sample_var probability_of_generated = tf.random_uniform([batch_size]) return tf.where(probability_of_generated > probability_threshold, generated_x, ground_truth_x) def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift): prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]]) image_height = int(prev_image.get_shape()[1]) image_width = int(prev_image.get_shape()[2]) inputs = [] for xkern in range(dna_kernel_size): for ykern in range(dna_kernel_size): inputs.append( tf.expand_dims( tf.slice(prev_image_pad, [0, xkern, ykern, 0], [-1, image_height, image_width, -1]), [3])) inputs = tf.concat(axis=3, values=inputs) kernel = tf.nn.relu(dna_input - relu_shift) + relu_shift kernel = tf.expand_dims( kernel / tf.reduce_sum(kernel, [3], keep_dims=True), [4]) return tf.reduce_sum(kernel * inputs, [3], keep_dims=False) def cdna_transformation(prev_image, cdna_input, num_masks, color_channels, dna_kernel_size, relu_shift): batch_size = tf.shape(cdna_input)[0] height = int(prev_image.get_shape()[1]) width = int(prev_image.get_shape()[2]) cdna_kerns = tfl.dense( cdna_input, dna_kernel_size * dna_kernel_size * num_masks, name="cdna_params", activation=None) cdna_kerns = tf.reshape( cdna_kerns, [batch_size, dna_kernel_size, dna_kernel_size, 1, num_masks]) cdna_kerns = (tf.nn.relu(cdna_kerns - relu_shift) + relu_shift) norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True) cdna_kerns /= norm_factor cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) cdna_kerns = tf.reshape( cdna_kerns, [dna_kernel_size, dna_kernel_size, batch_size, num_masks]) prev_image = tf.transpose(prev_image, [3, 1, 2, 0]) transformed = tf.nn.depthwise_conv2d( prev_image, cdna_kerns, [1, 1, 1, 1], "SAME") transformed = tf.reshape( transformed, [color_channels, height, width, batch_size, num_masks]) transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) transformed = tf.unstack(transformed, axis=-1) return transformed def vgg_layer(inputs, nout, kernel_size=3, activation=tf.nn.leaky_relu, padding="SAME", is_training=True, has_batchnorm=False, scope=None): with tf.variable_scope(scope): net = tfl.conv2d(inputs, nout, kernel_size=kernel_size, padding=padding, activation=None, name="conv") if has_batchnorm: net = tfl.batch_normalization(net, training=is_training, name="bn") net = activation(net) return net def tile_and_concat(image, latent, concat_latent=True): if not concat_latent: return image image_shape = common_layers.shape_list(image) latent_shape = common_layers.shape_list(latent) height, width = image_shape[1], image_shape[2] latent_dims = latent_shape[1] height_multiples = height // latent_dims pad = height - (height_multiples * latent_dims) latent = tf.reshape(latent, (-1, latent_dims, 1, 1)) latent = tf.tile(latent, (1, height_multiples, width, 1)) latent = tf.pad(latent, [[0, 0], [pad // 2, pad // 2], [0, 0], [0, 0]]) return tf.concat([image, latent], axis=-1) def _encode_gif(images, fps): writer = WholeVideoWriter(fps) writer.write_multi(images) return writer.finish() def ffmpeg_works(): images = np.zeros((2, 32, 32, 3), dtype=np.uint8) try: _encode_gif(images, 2) return True except (IOError, OSError): return False def py_gif_summary(tag, images, max_outputs, fps, return_summary_value=False): images = np.asarray(images) if images.dtype != np.uint8: raise ValueError("Tensor must have dtype uint8 for gif summary.") if images.ndim != 5: raise ValueError("Tensor must be 5-D for gif summary.") batch_size, _, height, width, channels = images.shape if channels not in (1, 3): raise ValueError("Tensors must have 1 or 3 channels for gif summary.") summ = tf.Summary() all_summ_values = [] num_outputs = min(batch_size, max_outputs) for i in range(num_outputs): image_summ = tf.Summary.Image() image_summ.height = height image_summ.width = width image_summ.colorspace = channels try: image_summ.encoded_image_string = _encode_gif(images[i], fps) except (IOError, OSError) as e: tf.logging.warning( "Unable to encode images to a gif string because either ffmpeg is " "not installed or ffmpeg returned an error: %s. Falling back to an " "image summary of the first frame in the sequence.", e) try: from PIL import Image import io with io.BytesIO() as output: Image.fromarray(images[i][0]).save(output, "PNG") image_summ.encoded_image_string = output.getvalue() except ImportError as e: tf.logging.warning( "Gif summaries requires ffmpeg or PIL to be installed: %s", e) image_summ.encoded_image_string = "" if num_outputs == 1: summ_tag = "{}/gif".format(tag) else: summ_tag = "{}/gif/{}".format(tag, i) curr_summ_value = tf.Summary.Value(tag=summ_tag, image=image_summ) all_summ_values.append(curr_summ_value) summ.value.add(tag=summ_tag, image=image_summ) summ_str = summ.SerializeToString() if return_summary_value: return all_summ_values, summ_str return summ_str def gif_summary(name, tensor, max_outputs=3, fps=10, collections=None, family=None): tensor = tf.convert_to_tensor(tensor) if len(tensor.get_shape()) != 5: raise ValueError("Assuming videos given as tensors in the format " "[batch, time, height, width, channels] but got one " "of shape: %s" % str(tensor.get_shape())) tensor = tf.cast(tensor, tf.uint8) if distribute_summary_op_util.skip_summary(): return tf.constant("") with summary_op_util.summary_scope( name, family, values=[tensor]) as (tag, scope): val = tf.py_func( py_gif_summary, [tag, tensor, max_outputs, fps], tf.string, stateful=False, name=scope) summary_op_util.collect(val, collections, [tf.GraphKeys.SUMMARIES]) return val def tinyify(array, tiny_mode, small_mode): if tiny_mode: return [1 for _ in array] if small_mode: return [max(x // 4, 1) for x in array] return array def get_gaussian_tensor(mean, log_var): z = tf.random_normal(tf.shape(mean), 0, 1, dtype=tf.float32) z = mean + tf.exp(log_var / 2.0) * z return z def conv_latent_tower(images, time_axis, latent_channels=1, min_logvar=-5, is_training=False, random_latent=False, tiny_mode=False, small_mode=False): conv_size = tinyify([32, 64, 64], tiny_mode, small_mode) with tf.variable_scope("latent", reuse=tf.AUTO_REUSE): images = tf.to_float(images) images = tf.unstack(images, axis=time_axis) images = tf.concat(images, axis=3) x = images x = common_layers.make_even_size(x) x = tfl.conv2d(x, conv_size[0], [3, 3], strides=(2, 2), padding="SAME", activation=tf.nn.relu, name="latent_conv1") x = contrib.layers().layer_norm(x) if not small_mode: x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2), padding="SAME", activation=tf.nn.relu, name="latent_conv2") x = contrib.layers().layer_norm(x) x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(1, 1), padding="SAME", activation=tf.nn.relu, name="latent_conv3") x = contrib.layers().layer_norm(x) nc = latent_channels mean = tfl.conv2d(x, nc, [3, 3], strides=(2, 2), padding="SAME", activation=None, name="latent_mean") logv = tfl.conv2d(x, nc, [3, 3], strides=(2, 2), padding="SAME", activation=tf.nn.relu, name="latent_std") logvar = logv + min_logvar if not is_training: return tf.zeros_like(mean), tf.zeros_like(logvar) ret_mean, ret_logvar = tf.cond( random_latent, lambda: (tf.zeros_like(mean), tf.zeros_like(logvar)), lambda: (mean, logvar)) return ret_mean, ret_logvar def beta_schedule(schedule, global_step, final_beta, decay_start, decay_end): if decay_start > decay_end: raise ValueError("decay_end is smaller than decay_end.") if schedule == "constant": decayed_value = 0.0 elif schedule == "linear": decayed_value = tf.train.polynomial_decay( learning_rate=final_beta, global_step=global_step - decay_start, decay_steps=decay_end - decay_start, end_learning_rate=0.0) elif schedule == "noisy_linear_cosine_decay": decayed_value = tf.train.noisy_linear_cosine_decay( learning_rate=final_beta, global_step=global_step - decay_start, decay_steps=decay_end - decay_start) else: raise ValueError("Unknown beta schedule.") increased_value = final_beta - decayed_value increased_value = tf.maximum(0.0, increased_value) beta = tf.case( pred_fn_pairs={ tf.less(global_step, decay_start): lambda: 0.0, tf.greater(global_step, decay_end): lambda: final_beta}, default=lambda: increased_value) return beta def extract_random_video_patch(videos, num_frames=-1): if num_frames == -1: return videos batch_size, num_total_frames, h, w, c = common_layers.shape_list(videos) if num_total_frames < num_frames: raise ValueError("Expected num_frames <= %d, got %d" % (num_total_frames, num_frames)) frame_start = tf.random_uniform( shape=(batch_size,), minval=0, maxval=num_total_frames - num_frames + 1, dtype=tf.int32) range_inds = tf.expand_dims(tf.range(num_frames), axis=0) frame_inds = range_inds + tf.expand_dims(frame_start, axis=1) frame_inds = tf.reshape(frame_inds, [-1]) batch_inds = tf.expand_dims(tf.range(batch_size), axis=1) batch_inds = tf.tile(batch_inds, [1, num_frames]) batch_inds = tf.reshape(batch_inds, [-1]) gather_inds = tf.stack((batch_inds, frame_inds), axis=1) video_patches = tf.gather_nd(videos, gather_inds) return tf.reshape(video_patches, (batch_size, num_frames, h, w, c)) class VideoWriter(object): def write(self, frame, encoded_frame=None): raise NotImplementedError
Apache License 2.0
markvdw/gpflow-inter-domain
GPflow/transforms.py
Log1pe.backward
python
def backward(self, y): ys = np.maximum(y - self._lower, np.finfo(np_float_type).eps) return ys + np.log(-np.expm1(-ys))
Inverse of the softplus transform: .. math:: x = \log( \exp(y) - 1) The bound for the input y is [self._lower. inf[, self._lower is subtracted prior to any calculations. The implementation avoids overflow explicitly by applying the log sum exp trick: .. math:: \log ( \exp(y) - \exp(0)) &= ys + \log( \exp(y-ys) - \exp(-ys)) \\ &= ys + \log( 1 - \exp(-ys) ys = \max(0, y) As y can not be negative, ys could be replaced with y itself. However, in case :math:`y=0` this results in np.log(0). Hence the zero is replaced by a machine epsilon. .. math:: ys = \max( \epsilon, y)
https://github.com/markvdw/gpflow-inter-domain/blob/0cf621e1896a3e1996f863b586c6cd2f795dd9f0/GPflow/transforms.py#L157-L184
from __future__ import absolute_import import numpy as np import tensorflow as tf from . import tf_wraps as tfw from ._settings import settings float_type = settings.dtypes.float_type np_float_type = np.float32 if float_type is tf.float32 else np.float64 class Transform(object): def forward(self, x): raise NotImplementedError def backward(self, y): raise NotImplementedError def tf_forward(self, x): raise NotImplementedError def tf_log_jacobian(self, x): raise NotImplementedError def free_state_size(self, variable_shape): return np.prod(variable_shape) def __str__(self): raise NotImplementedError def __getstate__(self): return self.__dict__.copy() def __setstate__(self, d): self.__dict__ = d class Identity(Transform): def tf_forward(self, x): return tf.identity(x) def forward(self, x): return x def backward(self, y): return y def tf_log_jacobian(self, x): return tf.zeros((1,), float_type) def __str__(self): return '(none)' class Exp(Transform): def __init__(self, lower=1e-6): self._lower = lower def tf_forward(self, x): return tf.exp(x) + self._lower def forward(self, x): return np.exp(x) + self._lower def backward(self, y): return np.log(y - self._lower) def tf_log_jacobian(self, x): return tf.reduce_sum(x) def __str__(self): return '+ve' class Log1pe(Transform): def __init__(self, lower=1e-6): self._lower = lower def forward(self, x): return np.logaddexp(0, x) + self._lower def tf_forward(self, x): return tf.nn.softplus(x) + self._lower def tf_log_jacobian(self, x): return -tf.reduce_sum(tf.log(1. + tf.exp(-x)))
Apache License 2.0
ploxiln/fab-classic
fabric/operations.py
run
python
def run(command, shell=True, pty=True, combine_stderr=None, quiet=False, warn_only=False, stdin=None, stdout=None, stderr=None, timeout=None, shell_escape=None, capture_buffer_size=None): return _run_command( command, shell, pty, combine_stderr, quiet=quiet, warn_only=warn_only, stdin=stdin, stdout=stdout, stderr=stderr, timeout=timeout, shell_escape=shell_escape, capture_buffer_size=capture_buffer_size, )
Run a shell command on a remote host. If ``shell`` is True (the default), `run` will execute the given command string via a shell interpreter, the value of which may be controlled by setting ``env.shell`` (defaulting to something similar to ``/bin/bash -l -c "<command>"``.) Any double-quote (``"``) or dollar-sign (``$``) characters in ``command`` will be automatically escaped when ``shell`` is True (unless disabled by setting ``shell_escape=False``). When ``shell=False``, no shell wrapping or escaping will occur. (It's possible to specify ``shell=False, shell_escape=True`` if desired, which will still trigger escaping of dollar signs, etc but will not wrap with a shell program invocation). `run` will return the result of the remote program's stdout as a single (likely multiline) string. This string will exhibit ``failed`` and ``succeeded`` boolean attributes specifying whether the command failed or succeeded, and will also include the return code as the ``return_code`` attribute. Furthermore, it includes a copy of the requested & actual command strings executed, as ``.command`` and ``.real_command``, respectively. To lessen memory use when running extremely verbose programs (and, naturally, when having access to their full output afterwards is not necessary!) you may limit how much of the program's stdout/err is stored by setting ``capture_buffer_size`` to an integer value. .. warning:: Do not set ``capture_buffer_size`` to any value smaller than the length of ``env.sudo_prompt`` or you will likely break the functionality of `sudo`! Ditto any user prompts stored in ``env.prompts``. .. note:: This value is used for each buffer independently, so e.g. ``1024`` may result in storing a total of ``2048`` bytes if there's data in both streams.) Any text entered in your local terminal will be forwarded to the remote program as it runs, thus allowing you to interact with password or other prompts naturally. For more on how this works, see :doc:`/usage/interactivity`. You may pass ``pty=False`` to forego creation of a pseudo-terminal on the remote end in case the presence of one causes problems for the command in question. However, this will force Fabric itself to echo any and all input you type while the command is running, including sensitive passwords. (With ``pty=True``, the remote pseudo-terminal will echo for you, and will intelligently handle password-style prompts.) See :ref:`pseudottys` for details. Similarly, if you need to programmatically examine the stderr stream of the remote program (exhibited as the ``stderr`` attribute on this function's return value), you may set ``combine_stderr=False``. Doing so has a high chance of causing garbled output to appear on your terminal (though the resulting strings returned by `~fabric.operations.run` will be properly separated). For more info, please read :ref:`combine_streams`. To ignore non-zero return codes, specify ``warn_only=True``. To both ignore non-zero return codes *and* force a command to run silently, specify ``quiet=True``. To override which local streams are used to display remote stdout and/or stderr, specify ``stdout`` or ``stderr``. (By default, the regular ``sys.stdout`` and ``sys.stderr`` Python stream objects are used.) For example, ``run("command", stderr=sys.stdout)`` would print the remote standard error to the local standard out, while preserving it as its own distinct attribute on the return value (as per above.) Alternately, you could even provide your own stream objects or loggers, e.g. ``myout = StringIO(); run("command", stdout=myout)``. If you want an exception raised when the remote program takes too long to run, specify ``timeout=N`` where ``N`` is an integer number of seconds, after which to time out. This will cause ``run`` to raise a `~fabric.exceptions.CommandTimeout` exception. If you want to disable Fabric's automatic attempts at escaping quotes, dollar signs etc., specify ``shell_escape=False``. Examples:: run("ls /var/www/") run("ls /home/myuser", shell=False) output = run('ls /var/www/site1') run("take_a_long_time", timeout=5) .. versionadded:: 1.5 The ``quiet``, ``warn_only``, ``stdout`` and ``stderr`` kwargs. .. versionadded:: 1.5 The return value attributes ``.command`` and ``.real_command``. .. versionadded:: 1.6 The ``timeout`` argument. .. versionadded:: 1.7 The ``shell_escape`` argument. .. versionadded:: 1.11 The ``capture_buffer_size`` argument. .. versionadded:: 1.17 The ``stdin`` argument.
https://github.com/ploxiln/fab-classic/blob/6adf6765e87f694a70f32fedc197a754f7bf94f3/fabric/operations.py#L960-L1072
import errno import os import os.path import posixpath import re import six import subprocess import sys import time from glob import glob from collections import deque from contextlib import closing, contextmanager from fabric.context_managers import (settings, char_buffered, hide, quiet as quiet_manager, warn_only as warn_only_manager) from fabric.io import output_loop, input_loop from fabric.network import needs_host, ssh, ssh_config from fabric.sftp import SFTP from fabric.state import env, connections, output, win32, default_channel from fabric.thread_handling import ThreadHandler from fabric.utils import ( abort, error, handle_prompt_abort, indent, _pty_size, warn, apply_lcwd, ) def _shell_escape(string): for char in ('"', '$', '`'): string = string.replace(char, r'\%s' % char) return string class _stdoutString(str): @property def stdout(self): return str(self) class _stdoutBytes(bytes): @property def stdout(self): return bytes(self) class _AttributeList(list): pass def require(*keys, **kwargs): missing_keys = [ k for k in keys if k not in env or (isinstance(env[k], (dict, list, tuple, set)) and not env[k]) ] if not missing_keys: return if len(missing_keys) > 1: variable = "variables were" used = "These variables are" else: variable = "variable was" used = "This variable is" if 'command' in env: prefix = "The command '%s' failed because the " % env.command else: prefix = "The " msg = "%sfollowing required environment %s not defined:\n%s" % ( prefix, variable, indent(missing_keys) ) if 'used_for' in kwargs: msg += "\n\n%s used for %s" % (used, kwargs['used_for']) if 'provided_by' in kwargs: funcs = kwargs['provided_by'] if not hasattr(funcs, '__iter__'): funcs = [funcs] if len(funcs) > 1: command = "one of the following commands" else: command = "the following command" provided_by = [getattr(obj, '__name__', str(obj)) for obj in funcs] msg += "\n\nTry running %s prior to this one, to fix the problem:\n%s" % (command, indent(provided_by)) abort(msg) def prompt(text, key=None, default='', validate=None): handle_prompt_abort("a user-specified prompt() call") if key: previous_value = env.get(key) default_str = "" if default != '': default_str = " [%s] " % str(default).strip() else: default_str = " " prompt_str = text.strip() + default_str value = None while value is None: if six.PY3 is True: value = input(prompt_str) or default else: value = raw_input(prompt_str) or default if validate: if callable(validate): try: value = validate(value) except Exception as e: value = None print("Validation failed for the following reason:") print(indent(e.message) + "\n") else: if not validate.startswith('^'): validate = r'^' + validate if not validate.endswith('$'): validate += r'$' result = re.findall(validate, value) if not result: print("Regular expression validation failed: '%s' does not match '%s'\n" % (value, validate)) value = None if key: env[key] = value if key and previous_value is not None and previous_value != value: warn("overwrote previous env variable '%s'; used to be '%s', is now '%s'." % ( key, previous_value, value )) return value @needs_host def put(local_path=None, remote_path=None, use_sudo=False, mirror_local_mode=False, mode=None, use_glob=True, temp_dir=None): local_path = local_path or os.getcwd() local_is_path = not (hasattr(local_path, 'read') and callable(local_path.read)) ftp = SFTP(env.host_string) with closing(ftp) as ftp: home = ftp.normalize('.') remote_path = remote_path or home if remote_path.startswith('~'): remote_path = remote_path.replace('~', home, 1) if temp_dir is None: temp_dir = home if not os.path.isabs(remote_path): if env.get('cwd'): remote_path = env.cwd.rstrip('/') + '/' + remote_path else: remote_path = posixpath.join(home, remote_path) if local_is_path: local_path = os.path.expanduser(local_path) local_path = apply_lcwd(local_path, env) if use_glob: names = glob(local_path) else: if os.path.exists(local_path): names = [local_path] else: names = [] else: names = [local_path] if local_is_path and not names: err = "'%s' is not a valid local path or glob." % local_path raise ValueError(err) if ftp.exists(remote_path): if local_is_path and len(names) != 1 and not ftp.isdir(remote_path): raise ValueError("'%s' is not a directory" % remote_path) remote_paths = [] failed_local_paths = [] for lpath in names: try: if local_is_path and os.path.isdir(lpath): p = ftp.put_dir(lpath, remote_path, use_sudo, mirror_local_mode, mode, temp_dir) remote_paths.extend(p) else: p = ftp.put(lpath, remote_path, use_sudo, mirror_local_mode, mode, local_is_path, temp_dir) remote_paths.append(p) except Exception as e: msg = "put() encountered an exception while uploading '%s'" failure = lpath if local_is_path else "<StringIO>" failed_local_paths.append(failure) error(message=msg % lpath, exception=e) ret = _AttributeList(remote_paths) ret.failed = failed_local_paths ret.succeeded = not ret.failed return ret @needs_host def get(remote_path, local_path=None, use_sudo=False, temp_dir=None): local_path = local_path or "%(host)s/%(path)s" local_is_path = not (hasattr(local_path, 'write') and callable(local_path.write)) if local_is_path: local_path = apply_lcwd(local_path, env) ftp = SFTP(env.host_string) with closing(ftp) as ftp: home = ftp.normalize('.') if remote_path.startswith('~'): remote_path = remote_path.replace('~', home, 1) if local_is_path: local_path = os.path.expanduser(local_path) if temp_dir is None: temp_dir = home if not os.path.isabs(remote_path): if env.get('cwd'): remote_path_escaped = env.cwd.rstrip('/') remote_path_escaped = remote_path_escaped.replace('\\ ', ' ') remote_path = remote_path_escaped + '/' + remote_path else: remote_path = posixpath.join(home, remote_path) local_files = [] failed_remote_files = [] try: if '*' in remote_path or '?' in remote_path: names = ftp.glob(remote_path) if not names: raise IOError(errno.ENOENT, "No such file") else: names = [remote_path] if not local_is_path: if len(names) > 1 or ftp.isdir(names[0]): error("[%s] %s is a glob or directory, but local_path is a file object!" % (env.host_string, remote_path)) for remote_path in names: if ftp.isdir(remote_path): result = ftp.get_dir(remote_path, local_path, use_sudo, temp_dir) local_files.extend(result) else: result = ftp.get(remote_path, local_path, use_sudo, local_is_path, os.path.basename(remote_path), temp_dir) if local_is_path: local_files.append(result) except Exception as e: failed_remote_files.append(remote_path) msg = "get() encountered an exception while downloading '%s'" error(message=msg % remote_path, exception=e) ret = _AttributeList(local_files if local_is_path else []) ret.failed = failed_remote_files ret.succeeded = not ret.failed return ret def _sudo_prefix_argument(argument, value): if value is None: return "" if str(value).isdigit(): value = "#%s" % value return ' %s "%s"' % (argument, value) def _sudo_prefix(user, group=None): prefix = env.sudo_prefix % env if user is not None or group is not None: return "%s%s%s " % (prefix, _sudo_prefix_argument('-u', user), _sudo_prefix_argument('-g', group)) return prefix def _shell_wrap(command, shell_escape, shell=True, sudo_prefix=None): if shell and not env.use_shell: shell = False if sudo_prefix is None: sudo_prefix = "" else: sudo_prefix += " " if shell: shell = env.shell + " " if shell_escape: command = _shell_escape(command) command = '"%s"' % command else: shell = "" return sudo_prefix + shell + command def _prefix_commands(command, which): prefixes = list(env.command_prefixes) cwd = env.cwd if which == 'remote' else env.lcwd redirect = " >/dev/null" if not win32 else '' if cwd: prefixes.insert(0, 'cd %s%s' % (cwd, redirect)) glue = " && " prefix = (glue.join(prefixes) + glue) if prefixes else "" return prefix + command def _prefix_env_vars(command, local=False): env_vars = {} path = env.path if path: if env.path_behavior == 'append': path = '$PATH:\"%s\"' % path elif env.path_behavior == 'prepend': path = '\"%s\":$PATH' % path elif env.path_behavior == 'replace': path = '\"%s\"' % path env_vars['PATH'] = path env_vars.update(env.shell_env) if env_vars: set_cmd, exp_cmd = '', '' if win32 and local: set_cmd = 'SET ' else: exp_cmd = 'export ' exports = ' '.join( '%s%s="%s"' % (set_cmd, k, v if k == 'PATH' else _shell_escape(v)) for k, v in six.iteritems(env_vars) ) shell_env_str = '%s%s && ' % (exp_cmd, exports) else: shell_env_str = '' return shell_env_str + command def _execute(channel, command, pty=True, combine_stderr=None, invoke_shell=False, stdin=None, stdout=None, stderr=None, timeout=None, capture_buffer_size=None): stdin = stdin or sys.stdin stdout = stdout or sys.stdout stderr = stderr or sys.stderr timeout = env.command_timeout if (timeout is None) else timeout using_pty = invoke_shell or (pty and env.always_use_pty and (stdin is sys.stdin)) remote_interrupt = env.remote_interrupt if remote_interrupt is None: remote_interrupt = invoke_shell if remote_interrupt and not using_pty: remote_interrupt = False with char_buffered(stdin): if combine_stderr is None: combine_stderr = env.combine_stderr channel.set_combine_stderr(combine_stderr) if using_pty: rows, cols = _pty_size() channel.get_pty(width=cols, height=rows) config_agent = ssh_config().get('forwardagent', 'no').lower() == 'yes' forward = None if env.forward_agent or config_agent: forward = ssh.agent.AgentRequestHandler(channel) if invoke_shell: channel.invoke_shell() if command: channel.sendall(command + "\n") else: channel.exec_command(command=command) stdout_buf = deque(maxlen=capture_buffer_size) stderr_buf = deque(maxlen=capture_buffer_size) if invoke_shell: stdout_buf = stderr_buf = None workers = ( ThreadHandler('out', output_loop, channel, "recv", capture=stdout_buf, stream=stdout, timeout=timeout), ThreadHandler('err', output_loop, channel, "recv_stderr", capture=stderr_buf, stream=stderr, timeout=timeout), ThreadHandler('in', input_loop, channel, stdin, using_pty) ) while True: if channel.exit_status_ready(): break else: for worker in workers: worker.raise_if_needed() try: time.sleep(ssh.io_sleep) except KeyboardInterrupt: if not remote_interrupt: raise channel.send('\x03') status = channel.recv_exit_status() for worker in workers: worker.thread.join() worker.raise_if_needed() channel.close() if forward is not None: forward.close() if not invoke_shell: stdout_buf = ''.join(stdout_buf).strip() stderr_buf = ''.join(stderr_buf).strip() if output.running and (output.stdout and stdout_buf and not stdout_buf.endswith("\n")) or (output.stderr and stderr_buf and not stderr_buf.endswith("\n")): print("") return stdout_buf, stderr_buf, status @needs_host def open_shell(command=None): _execute(channel=default_channel(), command=command, pty=True, combine_stderr=True, invoke_shell=True) @contextmanager def _noop(): yield def _run_command(command, shell=True, pty=True, combine_stderr=True, sudo=False, user=None, quiet=False, warn_only=False, stdin=None, stdout=None, stderr=None, group=None, timeout=None, shell_escape=None, capture_buffer_size=None): manager = _noop if warn_only: manager = warn_only_manager if quiet: manager = quiet_manager with manager(): given_command = command if shell_escape is None: shell_escape = env.get('shell_escape', True) wrapped_command = _shell_wrap( _prefix_env_vars(_prefix_commands(command, 'remote')), shell_escape, shell, _sudo_prefix(user, group) if sudo else None ) which = 'sudo' if sudo else 'run' if output.debug: print("[%s] %s: %s" % (env.host_string, which, wrapped_command)) elif output.running: print("[%s] %s: %s" % (env.host_string, which, given_command)) result_stdout, result_stderr, status = _execute( channel=default_channel(), command=wrapped_command, pty=pty, combine_stderr=combine_stderr, invoke_shell=False, stdin=stdin, stdout=stdout, stderr=stderr, timeout=timeout, capture_buffer_size=capture_buffer_size) out = _stdoutString(result_stdout) err = result_stderr out.failed = False out.command = given_command out.real_command = wrapped_command if status not in env.ok_ret_codes: out.failed = True msg = "%s() received nonzero return code %s while executing" % ( which, status ) if env.warn_only: msg += " '%s'!" % given_command else: msg += "!\n\nRequested: %s\nExecuted: %s" % ( given_command, wrapped_command ) error(message=msg, stdout=out, stderr=err) out.return_code = status out.succeeded = not out.failed out.stderr = err return out @needs_host
BSD 2-Clause Simplified License
dedsecinside/awesome-scripts
APIs/Telegram API/telethon/extensions/binaryreader.py
BinaryReader.__init__
python
def __init__(self, data): self.stream = BytesIO(data) self._last = None
Initialize the stream. Args: self: (todo): write your description data: (todo): write your description
https://github.com/dedsecinside/awesome-scripts/blob/856835e5ff5f8a6af2d74bb25800c620feb712e3/APIs/Telegram API/telethon/extensions/binaryreader.py#L23-L32
import os import time from datetime import datetime, timezone, timedelta from io import BytesIO from struct import unpack from ..errors import TypeNotFoundError from ..tl.alltlobjects import tlobjects from ..tl.core import core_objects _EPOCH_NAIVE = datetime(*time.gmtime(0)[:6]) _EPOCH = _EPOCH_NAIVE.replace(tzinfo=timezone.utc) class BinaryReader:
MIT License
googleads/google-ads-python
examples/planning/forecast_reach.py
_forecast_suggested_mix
python
def _forecast_suggested_mix( client, customer_id, location_id, currency_code, budget ): preferences = client.get_type("Preferences") preferences.has_guaranteed_price = True preferences.starts_with_sound = True preferences.is_skippable = False preferences.top_content_only = True preferences.ad_length = ( client.enums.ReachPlanAdLengthEnum.FIFTEEN_OR_TWENTY_SECONDS ) reach_plan_service = client.get_service("ReachPlanService") request = client.get_type("GenerateProductMixIdeasRequest") request.customer_id = customer_id request.plannable_location_id = location_id request.preferences = preferences request.currency_code = currency_code request.budget_micros = int(budget * ONE_MILLION) mix_response = reach_plan_service.generate_product_mix_ideas( request=request ) product_mix = [] for product in mix_response.product_allocation: planned_product = client.get_type("PlannedProduct") planned_product.plannable_product_code = product.plannable_product_code planned_product.budget_micros = product.budget_micros product_mix.append(planned_product) _request_reach_curve( client, customer_id, product_mix, location_id, currency_code )
Pulls a forecast for a product mix based on your set of preferences. Args: client: an initialized GoogleAdsClient instance. customer_id: The customer ID for the reach forecast. product_mix: The product mix for the reach forecast. location_id: The location ID to plan for. currency_code: Three-character ISO 4217 currency code. budget: Budget to allocate to the plan.
https://github.com/googleads/google-ads-python/blob/6794993e146abcfe21292677144c66cb546446bc/examples/planning/forecast_reach.py#L214-L257
import argparse import math import sys from google.ads.googleads.client import GoogleAdsClient from google.ads.googleads.errors import GoogleAdsException ONE_MILLION = 1.0e6 def main(client, customer_id): location_id = "2840" currency_code = "USD" budget = 500000 _show_plannable_locations(client) _show_plannable_products(client, location_id) _forecast_manual_mix( client, customer_id, location_id, currency_code, budget ) _forecast_suggested_mix( client, customer_id, location_id, currency_code, budget ) def _show_plannable_locations(client): reach_plan_service = client.get_service("ReachPlanService") response = reach_plan_service.list_plannable_locations() print("Plannable Locations") print("Name,\tId,\tParentCountryId") for location in response.plannable_locations: print( f"'{location.name}',\t{location.id},\t{location.parent_country_id}" ) def _show_plannable_products(client, location_id): reach_plan_service = client.get_service("ReachPlanService") response = reach_plan_service.list_plannable_products( plannable_location_id=location_id ) print(f"Plannable Products for Location ID {location_id}") for product_metadata in response.product_metadata: print( f"{product_metadata.plannable_product_code} : " f"{product_metadata.plannable_product_name}" ) print("Age Ranges:") for age_range in product_metadata.plannable_targeting.age_ranges: print(f"\t- {age_range.name}") print("Genders:") for gender in product_metadata.plannable_targeting.genders: print(f"\t- {gender.type_.name}") print("Devices:") for device in product_metadata.plannable_targeting.devices: print(f"\t- {device.type_.name}") def _request_reach_curve( client, customer_id, product_mix, location_id, currency_code ): request = client.get_type("GenerateReachForecastRequest") request.customer_id = customer_id request.campaign_duration.duration_in_days = 28 request.currency_code = currency_code request.cookie_frequency_cap = 0 request.min_effective_frequency = 1 request.planned_products = product_mix request.targeting.plannable_location_id = location_id request.targeting.age_range = ( client.enums.ReachPlanAgeRangeEnum.AGE_RANGE_18_65_UP ) for gender_type in [ client.enums.GenderTypeEnum.FEMALE, client.enums.GenderTypeEnum.MALE, ]: gender = client.get_type("GenderInfo") gender.type_ = gender_type request.targeting.genders.append(gender) for device_type in [ client.enums.DeviceEnum.DESKTOP, client.enums.DeviceEnum.MOBILE, client.enums.DeviceEnum.TABLET, ]: device = client.get_type("DeviceInfo") device.type_ = device_type request.targeting.devices.append(device) reach_plan_service = client.get_service("ReachPlanService") response = reach_plan_service.generate_reach_forecast(request=request) print( "Currency, Cost, On-Target Reach, On-Target Imprs, Total Reach," " Total Imprs, Products" ) for point in response.reach_curve.reach_forecasts: product_splits = [] for p in point.planned_product_reach_forecasts: product_splits.append( {p.plannable_product_code: p.cost_micros / ONE_MILLION} ) print( [ currency_code, point.cost_micros / ONE_MILLION, point.forecast.on_target_reach, point.forecast.on_target_impressions, point.forecast.total_reach, point.forecast.total_impressions, product_splits, ] ) def _forecast_manual_mix( client, customer_id, location_id, currency_code, budget ): product_mix = [] trueview_allocation = 0.15 bumper_allocation = 1 - trueview_allocation product_splits = [ ("TRUEVIEW_IN_STREAM", trueview_allocation), ("BUMPER", bumper_allocation), ] for product, split in product_splits: planned_product = client.get_type("PlannedProduct") planned_product.plannable_product_code = product planned_product.budget_micros = math.trunc(budget * ONE_MILLION * split) product_mix.append(planned_product) _request_reach_curve( client, customer_id, product_mix, location_id, currency_code )
Apache License 2.0
splunk/addonfactory-ucc-generator
splunk_add_on_ucc_framework/__init__.py
install_libs
python
def install_libs(path, ucc_lib_target): def _install_libs(requirements, ucc_target, installer="python3"): if not os.path.exists(requirements): logger.warning(f"Unable to find requirements file. {requirements}") else: if not os.path.exists(ucc_target): os.makedirs(ucc_target) install_cmd = ( installer + ' -m pip install -r "' + requirements + '" --no-compile --prefer-binary --ignore-installed --use-deprecated=legacy-resolver --target "' + ucc_target + '"' ) os.system(installer + " -m pip install pip --upgrade") os.system(install_cmd) logger.info(f" Checking for requirements in {path}") if os.path.exists(os.path.join(path, "lib", "requirements.txt")): logger.info(" Uses common requirements") _install_libs( requirements=os.path.join(path, "lib", "requirements.txt"), ucc_target=ucc_lib_target, ) elif os.path.exists( os.path.join(os.path.abspath(os.path.join(path, os.pardir)), "requirements.txt") ): logger.info(" Uses common requirements") _install_libs( requirements=os.path.join( os.path.abspath(os.path.join(path, os.pardir)), "requirements.txt" ), ucc_target=ucc_lib_target, ) else: logger.info(" Not using common requirements") noshipdirs = ["setuptools", "bin", "pip", "distribute", "wheel"] p = Path(ucc_lib_target) for nsd in noshipdirs: try: for o in p.glob(nsd + "*"): if o.is_dir(): logging.info(f" removing directory {o} from output must not ship") shutil.rmtree(o) except FileNotFoundError: pass NO_USER_EXEC = ~stat.S_IEXEC NO_GROUP_EXEC = ~stat.S_IXGRP NO_OTHER_EXEC = ~stat.S_IXOTH NO_EXEC = NO_USER_EXEC & NO_GROUP_EXEC & NO_OTHER_EXEC for o in p.rglob("*"): if not o.is_dir() and os.access(o, os.X_OK): logging.info(f" fixing {o} execute bit") current_permissions = stat.S_IMODE(os.lstat(o).st_mode) os.chmod(o, current_permissions & NO_EXEC)
Install 3rd Party libraries in addon. Args: parent_path (str): Path of parent directory. ucc_lib_target (str): Target path to install libraries.
https://github.com/splunk/addonfactory-ucc-generator/blob/e6f30a47cf4bea6bcc1b5669ec30784ff75b7d24/splunk_add_on_ucc_framework/__init__.py#L300-L377
__version__ = "5.9.0" import argparse import configparser import json import logging import os import shutil import stat import sys from pathlib import Path from defusedxml import ElementTree as defused_et from dunamai import Style, Version from jinja2 import Environment, FileSystemLoader from .app_conf import AppConf from .app_manifest import ( APP_MANIFEST_FILE_NAME, APP_MANIFEST_WEBSITE, AppManifest, AppManifestFormatException, ) from .global_config_validator import ( GlobalConfigValidator, GlobalConfigValidatorException, ) from .meta_conf import MetaConf from .start_alert_build import alert_build from .uccrestbuilder import build from .uccrestbuilder.global_config import ( GlobalConfigBuilderSchema, GlobalConfigPostProcessor, ) sourcedir = os.path.dirname(os.path.realpath(__file__)) j2_env = Environment(loader=FileSystemLoader(os.path.join(sourcedir, "templates"))) logger = logging.getLogger("UCC") logger.setLevel(logging.INFO) formatter = logging.Formatter("%(asctime)s [%(name)s] %(levelname)s: %(message)s") shandler = logging.StreamHandler() shandler.setLevel(logging.INFO) shandler.setFormatter(formatter) logger.addHandler(shandler) PARENT_DIR = ".." def get_os_path(path): if "\\\\" in path: path = path.replace("\\\\", os.sep) else: path = path.replace("\\", os.sep) path = path.replace("/", os.sep) return path.strip(os.sep) def recursive_overwrite(src, dest, ignore_list=None): if os.path.isdir(src): if not os.path.isdir(dest): os.makedirs(dest) files = os.listdir(src) for f in files: if not ignore_list or not os.path.join(dest, f) in ignore_list: recursive_overwrite( os.path.join(src, f), os.path.join(dest, f), ignore_list ) else: logger.info(f"Excluding : {os.path.join(dest, f)}") else: if os.path.exists(dest): os.remove(dest) shutil.copy(src, dest) def clean_before_build(outputdir): logger.info("Cleaning out directory " + outputdir) shutil.rmtree(os.path.join(outputdir), ignore_errors=True) os.makedirs(os.path.join(outputdir)) logger.info("Cleaned out directory " + outputdir) def version_tuple(version_str): filled = [] for point in version_str.split("."): filled.append(point.zfill(8)) return tuple(filled) def _handle_biased_terms(conf_entities: dict) -> dict: for entity in conf_entities: entity_option = entity.get("options") if entity_option and "whiteList" in entity_option: entity_option["allowList"] = entity_option.get("whiteList") del entity_option["whiteList"] if entity_option and "blackList" in entity_option: entity_option["denyList"] = entity_option.get("blackList") del entity_option["blackList"] return conf_entities def handle_biased_terms_update(schema_content: dict) -> dict: pages = schema_content.get("pages", {}) ta_tabs = pages.get("configuration", {}).get("tabs", {}) for tab in ta_tabs: conf_entities = tab.get("entity") tab["entity"] = _handle_biased_terms(conf_entities) if "inputs" in pages: services = pages.get("inputs", {}).get("services", {}) for service in services: conf_entities = service.get("entity") service["entity"] = _handle_biased_terms(conf_entities) schema_content["meta"]["schemaVersion"] = "0.0.1" return schema_content def handle_dropping_api_version_update(schema_content: dict) -> dict: if schema_content["meta"].get("apiVersion"): del schema_content["meta"]["apiVersion"] schema_content["meta"]["schemaVersion"] = "0.0.3" return schema_content def handle_update(config_path): with open(config_path) as config_file: schema_content = json.load(config_file) version = schema_content.get("meta").get("schemaVersion", "0.0.0") if version_tuple(version) < version_tuple("0.0.1"): schema_content = handle_biased_terms_update(schema_content) with open(config_path, "w") as config_file: json.dump(schema_content, config_file, ensure_ascii=False, indent=4) if version_tuple(version) < version_tuple("0.0.2"): ta_tabs = schema_content.get("pages").get("configuration", {}).get("tabs", {}) for tab in ta_tabs: if tab["name"] == "account": conf_entities = tab.get("entity") oauth_state_enabled_entity = {} for entity in conf_entities: if entity.get("field") == "oauth_state_enabled": logger.warning( "oauth_state_enabled field is no longer a separate " "entity since UCC version 5.0.0. It is now an " "option in the oauth field. Please update the " "globalconfig.json file accordingly." ) oauth_state_enabled_entity = entity if entity.get("field") == "oauth" and not entity.get( "options", {} ).get("oauth_state_enabled"): entity["options"]["oauth_state_enabled"] = False if oauth_state_enabled_entity: conf_entities.remove(oauth_state_enabled_entity) tab_options = tab.get("options", {}) if tab_options.get("onChange"): logger.error( "The onChange option is no longer supported since UCC " "version 5.0.0. You can use custom hooks to implement " "these actions." ) del tab_options["onChange"] if tab_options.get("onLoad"): logger.error( "The onLoad option is no longer supported since UCC " "version 5.0.0. You can use custom hooks to implement " "these actions." ) del tab_options["onLoad"] is_inputs = "inputs" in schema_content.get("pages") if is_inputs: services = schema_content.get("pages").get("inputs", {}).get("services", {}) for service in services: service_options = service.get("options", {}) if service_options.get("onChange"): logger.error( "The onChange option is no longer supported since UCC " "version 5.0.0. You can use custom hooks to implement " "these actions." ) del service_options["onChange"] if service_options.get("onLoad"): logger.error( "The onLoad option is no longer supported since UCC " "version 5.0.0. You can use custom hooks to implement " "these actions." ) del service_options["onLoad"] schema_content["meta"]["schemaVersion"] = "0.0.2" with open(config_path, "w") as config_file: json.dump(schema_content, config_file, ensure_ascii=False, indent=4) if version_tuple(version) < version_tuple("0.0.3"): schema_content = handle_dropping_api_version_update(schema_content) with open(config_path, "w") as config_file: json.dump(schema_content, config_file, ensure_ascii=False, indent=4) return schema_content def replace_token(ta_name, outputdir): logger.info("Replace tokens in views") views = ["inputs.xml", "configuration.xml", "redirect.xml"] for view in views: template_dir = os.path.join( outputdir, ta_name, "default", "data", "ui", "views" ) with open(os.path.join(template_dir, view)) as f: s = f.read() with open(os.path.join(template_dir, view), "w") as f: s = s.replace("${package.name}", ta_name) if view == "redirect.xml": s = s.replace("${ta.name}", ta_name.lower()) f.write(s)
Apache License 2.0
decentralized-identity/keripy
src/keri/vdr/eventing.py
Tever.vcState
python
def vcState(self, vcpre): vci = nsKey([self.prefixer.qb64, vcpre]) cnt = self.reger.cntTels(vci) if cnt == 1: return VcStates.issued elif cnt == 2: return VcStates.revoked return None
Calculate state (issued/revoked) of VC from db. Returns None if never issued from this Registry Parameters: vcpre: the VC identifier
https://github.com/decentralized-identity/keripy/blob/7bdaf57972d44c2435726bd2300b01a1a90a83b2/src/keri/vdr/eventing.py#L777-L792
import json from collections import deque, namedtuple from orderedset import OrderedSet as oset from ..core.coring import (MtrDex, Serder, Serials, Versify, Prefixer, Ilks, Seqner, Verfer) from ..core.eventing import SealEvent, ample, TraitDex, verifySigs, validateSN from ..db import basing from ..db.dbing import dgKey, snKey from ..help import helping from ..kering import (MissingWitnessSignatureError, Version, MissingAnchorError, ValidationError, OutOfOrderError, LikelyDuplicitousError) from ..vdr.viring import Registry, nsKey from .. import help logger = help.ogler.getLogger() VCP_LABELS = ["v", "i", "s", "t", "bt", "b", "c"] VRT_LABELS = ["v", "i", "s", "t", "p", "bt", "b", "ba", "br"] ISS_LABELS = ["v", "i", "s", "t", "ri", "dt"] BIS_LABELS = ["v", "i", "s", "t", "ra", "dt"] REV_LABELS = ["v", "i", "s", "t", "p", "dt"] BRV_LABELS = ["v", "i", "s", "t", "ra", "p", "dt"] VcState = namedtuple("VcState", 'issued revoked') VcStates = VcState(issued='issued', revoked="revoked") def incept( pre, toad=None, baks=None, cnfg=None, version=Version, kind=Serials.json, code=None, ): vs = Versify(version=version, kind=kind, size=0) isn = 0 ilk = Ilks.vcp cnfg = cnfg if cnfg is not None else [] baks = baks if baks is not None else [] if TraitDex.NoBackers in cnfg and len(baks) > 0: raise ValueError("{} backers specified for NB vcp, 0 allowed".format(len(baks))) if len(oset(baks)) != len(baks): raise ValueError("Invalid baks = {}, has duplicates.".format(baks)) if isinstance(toad, str): toad = "{:x}".format(toad) elif toad is None: if not baks: toad = 0 else: toad = ample(len(baks)) if baks: if toad < 1 or toad > len(baks): raise ValueError("Invalid toad = {} for baks = {}".format(toad, baks)) else: if toad != 0: raise ValueError("Invalid toad = {} for baks = {}".format(toad, baks)) ked = dict(v=vs, i="", ii=pre, s="{:x}".format(isn), t=ilk, c=cnfg, bt="{:x}".format(toad), b=baks ) prefixer = Prefixer(ked=ked, code=code, allows=[MtrDex.Blake3_256]) ked["i"] = prefixer.qb64 return Serder(ked=ked) def rotate( regk, dig, sn=1, toad=None, baks=None, cuts=None, adds=None, version=Version, kind=Serials.json, ): if sn < 1: raise ValueError("Invalid sn = {} for vrt.".format(sn)) vs = Versify(version=version, kind=kind, size=0) ilk = Ilks.vrt baks = baks if baks is not None else [] bakset = oset(baks) if len(bakset) != len(baks): raise ValueError("Invalid baks = {}, has duplicates.".format(baks)) cuts = cuts if cuts is not None else [] cutset = oset(cuts) if len(cutset) != len(cuts): raise ValueError("Invalid cuts = {}, has duplicates.".format(cuts)) if (bakset & cutset) != cutset: raise ValueError("Invalid cuts = {}, not all members in baks.".format(cuts)) adds = adds if adds is not None else [] addset = oset(adds) if len(addset) != len(adds): raise ValueError("Invalid adds = {}, has duplicates.".format(adds)) if cutset & addset: raise ValueError("Intersecting cuts = {} and adds = {}.".format(cuts, adds)) if bakset & addset: raise ValueError("Intersecting baks = {} and adds = {}.".format(baks, adds)) newbakset = (bakset - cutset) | addset if len(newbakset) != (len(baks) - len(cuts) + len(adds)): raise ValueError("Invalid member combination among baks = {}, cuts ={}, " "and adds = {}.".format(baks, cuts, adds)) if isinstance(toad, str): toad = "{:x}".format(toad) elif toad is None: if not newbakset: toad = 0 else: toad = ample(len(newbakset)) if newbakset: if toad < 1 or toad > len(newbakset): raise ValueError("Invalid toad = {} for resultant wits = {}" "".format(toad, list(newbakset))) else: if toad != 0: raise ValueError("Invalid toad = {} for resultant wits = {}" "".format(toad, list(newbakset))) ked = dict(v=vs, i=regk, p=dig, s="{:x}".format(sn), t=ilk, bt="{:x}".format(toad), br=cuts, ba=adds, ) return Serder(ked=ked) def issue( vcdig, regk, version=Version, kind=Serials.json, ): vs = Versify(version=version, kind=kind, size=0) ked = dict(v=vs, i=vcdig, s="{:x}".format(0), t=Ilks.iss, ri=regk, dt=helping.nowIso8601() ) return Serder(ked=ked) def revoke( vcdig, regk, dig, version=Version, kind=Serials.json, ): vs = Versify(version=version, kind=kind, size=0) isn = 1 ilk = Ilks.rev ked = dict(v=vs, i=vcdig, s="{:x}".format(isn), t=ilk, ri=regk, p=dig, dt=helping.nowIso8601() ) return Serder(ked=ked) def backerIssue( vcdig, regk, regsn, regd, version=Version, kind=Serials.json, ): vs = Versify(version=version, kind=kind, size=0) isn = 0 ilk = Ilks.bis seal = SealEvent(regk, regsn, regd) ked = dict(v=vs, i=vcdig, ii=regk, s="{:x}".format(isn), t=ilk, ra=seal._asdict(), dt=helping.nowIso8601(), ) return Serder(ked=ked) def backerRevoke( vcdig, regk, regsn, regd, dig, version=Version, kind=Serials.json, ): vs = Versify(version=version, kind=kind, size=0) isn = 1 ilk = Ilks.brv seal = SealEvent(regk, regsn, regd) ked = dict(v=vs, i=vcdig, s="{:x}".format(isn), t=ilk, p=dig, ra=seal._asdict(), dt=helping.nowIso8601(), ) return Serder(ked=ked) def query(regk, vcid, res, dt=None, dta=None, dtb=None, version=Version, kind=Serials.json): vs = Versify(version=version, kind=kind, size=0) ilk = Ilks.req qry = dict( i=vcid, ri=regk ) if dt is not None: qry["dt"] = dt if dta is not None: qry["dta"] = dt if dtb is not None: qry["dtb"] = dt ked = dict(v=vs, t=ilk, r=res, q=qry ) return Serder(ked=ked) class Tever: NoBackers = False def __init__(self, serder, seqner=None, diger=None, bigers=None, db=None, reger=None, noBackers=None, regk=None, local=False): self.reger = reger if reger is not None else Registry() self.db = db if db is not None else basing.Baser() self.version = serder.version self.regk = regk self.local = True if local else False ilk = serder.ked["t"] if ilk not in [Ilks.vcp]: raise ValidationError("Expected ilk {} got {} for evt: {}".format(Ilks.vcp, ilk, serder)) self.ilk = ilk labels = VCP_LABELS for k in labels: if k not in serder.ked: raise ValidationError("Missing element = {} from {} event for " "evt = {}.".format(k, ilk, serder.ked)) self.incept(serder=serder) self.config(serder=serder, noBackers=noBackers) bigers = self.valAnchorBigs(serder=serder, seqner=seqner, diger=diger, bigers=bigers, toad=self.toad, baks=self.baks) self.logEvent(pre=self.prefixer.qb64b, sn=0, serder=serder, seqner=seqner, diger=diger, bigers=bigers, baks=self.baks) def incept(self, serder): ked = serder.ked self.pre = ked["ii"] self.prefixer = Prefixer(qb64=serder.pre) if not self.prefixer.verify(ked=ked, prefixed=True): raise ValidationError("Invalid prefix = {} for registry inception evt = {}." .format(self.prefixer.qb64, ked)) sn = ked["s"] self.sn = validateSN(sn, inceptive=True) self.cuts = [] self.adds = [] baks = ked["b"] if len(oset(baks)) != len(baks): raise ValidationError("Invalid baks = {}, has duplicates for evt = {}." "".format(baks, ked)) self.baks = baks toad = int(ked["bt"], 16) if baks: if toad < 1 or toad > len(baks): raise ValidationError("Invalid toad = {} for baks = {} for evt = {}." "".format(toad, baks, ked)) else: if toad != 0: raise ValidationError("Invalid toad = {} for baks = {} for evt = {}." "".format(toad, baks, ked)) self.toad = toad self.serder = serder def config(self, serder, noBackers=None): self.noBackers = (True if (noBackers if noBackers is not None else self.NoBackers) else False) cnfg = serder.ked["c"] if TraitDex.NoBackers in cnfg: self.noBackers = True def update(self, serder, seqner=None, diger=None, bigers=None): ked = serder.ked ilk = ked["t"] sn = ked["s"] icp = ilk in (Ilks.iss, Ilks.bis) sn = validateSN(sn, inceptive=icp) if ilk in (Ilks.vrt,): if self.noBackers is True: raise ValidationError("invalid rotation evt {} against backerless registry {}". format(ked, self.regk)) toad, baks, cuts, adds = self.rotate(serder, sn=sn) bigers = self.valAnchorBigs(serder=serder, seqner=seqner, diger=diger, bigers=bigers, toad=toad, baks=baks) self.sn = sn self.serder = serder self.ilk = ilk self.toad = toad self.baks = baks self.cuts = cuts self.adds = adds self.logEvent(pre=self.prefixer.qb64b, sn=sn, serder=serder, seqner=seqner, diger=diger, bigers=bigers, baks=self.baks) return elif ilk in (Ilks.iss, Ilks.bis): self.issue(serder, seqner=seqner, diger=diger, sn=sn, bigers=bigers) elif ilk in (Ilks.rev, Ilks.brv): self.revoke(serder, seqner=seqner, diger=diger, sn=sn, bigers=bigers) else: raise ValidationError("Unsupported ilk = {} for evt = {}.".format(ilk, ked)) def rotate(self, serder, sn): ked = serder.ked pre = ked["i"] dig = ked["p"] if serder.pre != self.prefixer.qb64: raise ValidationError("Mismatch event aid prefix = {} expecting" " = {} for evt = {}.".format(ked["i"], self.prefixer.qb64, ked)) if not sn == (self.sn + 1): raise ValidationError("Invalid sn = {} expecting = {} for evt " "= {}.".format(sn, self.sn + 1, ked)) if not self.serder.compare(dig=dig): raise ValidationError("Mismatch event dig = {} with state dig" " = {} for evt = {}.".format(ked["p"], self.serder.diger.qb64, ked)) witset = oset(self.baks) cuts = ked["br"] cutset = oset(cuts) if len(cutset) != len(cuts): raise ValidationError("Invalid cuts = {}, has duplicates for evt = " "{}.".format(cuts, ked)) if (witset & cutset) != cutset: raise ValidationError("Invalid cuts = {}, not all members in baks" " for evt = {}.".format(cuts, ked)) adds = ked["ba"] addset = oset(adds) if len(addset) != len(adds): raise ValidationError("Invalid adds = {}, has duplicates for evt = " "{}.".format(adds, ked)) if cutset & addset: raise ValidationError("Intersecting cuts = {} and adds = {} for " "evt = {}.".format(cuts, adds, ked)) if witset & addset: raise ValidationError("Intersecting baks = {} and adds = {} for " "evt = {}.".format(self.baks, adds, ked)) baks = list((witset - cutset) | addset) if len(baks) != (len(self.baks) - len(cuts) + len(adds)): raise ValidationError("Invalid member combination among baks = {}, cuts ={}, " "and adds = {} for evt = {}.".format(self.baks, cuts, adds, ked)) toad = int(ked["bt"], 16) if baks: if toad < 1 or toad > len(baks): raise ValidationError("Invalid toad = {} for baks = {} for evt " "= {}.".format(toad, baks, ked)) else: if toad != 0: raise ValidationError("Invalid toad = {} for baks = {} for evt " "= {}.".format(toad, baks, ked)) return toad, baks, cuts, adds def issue(self, serder, seqner, diger, sn, bigers=None): ked = serder.ked vcpre = ked["i"] ilk = ked["t"] vci = nsKey([self.prefixer.qb64, vcpre]) labels = ISS_LABELS if ilk == Ilks.iss else BIS_LABELS for k in labels: if k not in ked: raise ValidationError("Missing element = {} from {} event for " "evt = {}.".format(k, ilk, ked)) if ilk == Ilks.iss: if self.noBackers is False: raise ValidationError("invalid simple issue evt {} against backer based registry {}". format(ked, self.regk)) regi = ked["ri"] if regi != self.prefixer.qb64: raise ValidationError("Mismatch event regi prefix = {} expecting" " = {} for evt = {}.".format(regi, self.prefixer.qb64, ked)) if not self.verifyAnchor(serder=serder, seqner=seqner, diger=diger): self.escrowALEvent(serder=serder) raise MissingAnchorError("Failure verify event = {} " "".format(serder.ked, )) self.logEvent(pre=vci, sn=sn, serder=serder, seqner=seqner, diger=diger) elif ilk == Ilks.bis: if self.noBackers is True: raise ValidationError("invalid backer issue evt {} against backerless registry {}". format(ked, self.regk)) rtoad, baks = self.getBackerState(ked) bigers = self.valAnchorBigs(serder=serder, seqner=seqner, diger=diger, bigers=bigers, toad=rtoad, baks=baks) self.logEvent(pre=vci, sn=sn, serder=serder, seqner=seqner, diger=diger, bigers=bigers) else: raise ValidationError("Unsupported ilk = {} for evt = {}.".format(ilk, ked)) def revoke(self, serder, seqner, diger, sn, bigers=None): ked = serder.ked vcpre = ked["i"] ilk = ked["t"] labels = REV_LABELS if ilk == Ilks.rev else BRV_LABELS for k in labels: if k not in ked: raise ValidationError("Missing element = {} from {} event for " "evt = {}.".format(k, ilk, ked)) vci = nsKey([self.prefixer.qb64, vcpre]) dig = self.reger.getTel(snKey(pre=vci, sn=sn - 1)) ievt = self.reger.getTvt(dgKey(pre=vci, dig=dig)) if ievt is None: raise ValidationError("revoke without issue... probably have to escrow") ievt = bytes(ievt) iserder = Serder(raw=ievt) if not iserder.compare(dig=ked["p"]): raise ValidationError("Mismatch event dig = {} with state dig" " = {} for evt = {}.".format(ked["p"], self.serder.diger.qb64, ked)) if ilk is Ilks.rev: if self.noBackers is False: raise ValidationError("invalid simple issue evt {} against backer based registry {}". format(ked, self.regk)) if not self.verifyAnchor(serder=serder, seqner=seqner, diger=diger): self.escrowALEvent(serder=serder) raise MissingAnchorError("Failure verify event = {} " "".format(serder.ked)) self.logEvent(pre=vci, sn=sn, serder=serder, seqner=seqner, diger=diger) elif ilk is Ilks.brv: if self.noBackers is True: raise ValidationError("invalid backer issue evt {} against backerless registry {}". format(ked, self.regk)) rtoad, baks = self.getBackerState(ked) bigers = self.valAnchorBigs(serder=serder, seqner=seqner, diger=diger, bigers=bigers, toad=rtoad, baks=baks) self.logEvent(pre=vci, sn=sn, serder=serder, seqner=seqner, diger=diger, bigers=bigers) else: raise ValidationError("Unsupported ilk = {} for evt = {}.".format(ilk, ked))
Apache License 2.0
bioconda/bioconda-utils
bioconda_utils/githandler.py
BiocondaRepoMixin.get_unblacklisted
python
def get_unblacklisted(self, ref=None, other=None): merge_base = self.get_merge_base(ref, other) orig_blacklist = self.get_blacklisted(merge_base) cur_blacklist = self.get_blacklisted(ref) return orig_blacklist.difference(cur_blacklist)
Get recipes unblacklisted by a merge of **ref** into **other** Args: ref: Branch or commit or reference, defaults to current branch other: Same as **ref**, defaults to ``origin/master`` Returns: `set` of unblacklisted recipes (full path to repo root)
https://github.com/bioconda/bioconda-utils/blob/df49b2169672255d5937b181cb86fbe08f7ebaaa/bioconda_utils/githandler.py#L450-L463
import asyncio import atexit import logging import os import re import tempfile import subprocess from typing import List, Union import git import yaml from . import utils logger = logging.getLogger(__name__) def install_gpg_key(key) -> str: proc = subprocess.run(['gpg', '--import'], input=key, stderr=subprocess.PIPE, encoding='ascii') for line in proc.stderr.splitlines(): match = re.match(r'gpg: key ([\dA-F]{8,16}): ' r'(secret key imported|already in secret keyring)', line) if match: keyid = match.group(1) break else: if r'\n' in key: return install_gpg_key(key.replace(r'\n', '\n')) raise ValueError(f"Unable to import GPG key: {proc.stderr}") return keyid class GitHandlerFailure(Exception): class GitHandlerBase(): def __init__(self, repo: git.Repo, dry_run: bool, home='bioconda/bioconda-recipes', fork=None, allow_dirty=False) -> None: self.repo: git.Repo = repo if not allow_dirty and self.repo.is_dirty(): raise RuntimeError("Repository is in dirty state. Bailing out") self.dry_run = dry_run self.home_remote = self.get_remote(home) if fork is not None: self.fork_remote = self.get_remote(fork) else: self.fork_remote = self.home_remote self.lock_working_dir = asyncio.Semaphore(1) self._sign: Union[bool, str] = False self.actor: git.Actor = None def close(self): self.repo.close() def __str__(self): def get_name(remote): url = next(remote.urls) return url[url.rfind('/', 0, url.rfind('/'))+1:] name = get_name(self.home_remote) if self.fork_remote != self.home_remote: name = f"{name} <- {get_name(self.fork_remote)}" return f"{self.__class__.__name__}({name})" def enable_signing(self, key: Union[bool, str] = True) -> None: self._sign = key def get_remote(self, desc: str): if desc in [r.name for r in self.repo.remotes]: return self.repo.remotes[desc] with self.repo.config_reader() as reader: for section in reader.sections(): if section.startswith("url "): new = section.lstrip("url ").strip('"') try: old = reader.get(section, 'insteadOf') desc = desc.replace(old, new) except KeyError: pass remotes = [r for r in self.repo.remotes if any(filter(lambda x: desc in x, r.urls))] if not remotes: raise KeyError(f"No remote matching '{desc}' found") if len(remotes) > 1: logger.warning("Multiple remotes found. Using first") return remotes[0] async def branch_is_current(self, branch, path: str, master="master") -> bool: proc = await asyncio.create_subprocess_exec( 'git', 'log', '-1', '--oneline', '--decorate', f'{master}...{branch.name}', '--', path, stdout=asyncio.subprocess.PIPE) stdout, _ = await proc.communicate() return branch.name in stdout.decode('ascii') def delete_local_branch(self, branch) -> None: git.Reference.delete(self.repo, branch) def delete_remote_branch(self, branch_name: str) -> None: if not self.dry_run: logger.info("Deleting branch %s", branch_name) self.fork_remote.push(":" + branch_name) else: logger.info("Would delete branch %s", branch_name) def get_local_branch(self, branch_name: str): if branch_name in self.repo.branches: return self.repo.branches[branch_name] try: return self.repo.commit(branch_name) except git.BadName: pass return None @staticmethod def is_sha(ref: str) -> bool: if len(ref) == 40: try: int(ref, 16) return True except ValueError: pass return False def get_remote_branch(self, branch_name: str, try_fetch=False): if branch_name in self.fork_remote.refs: return self.fork_remote.refs[branch_name] if not self.is_sha(branch_name): return None depths = (0, 50, 200) if try_fetch else (None,) for depth in depths: logger.info("Trying depth %s", depth) try: if depth: self.fork_remote.fetch(depth=depth) remote_refs = self.fork_remote.fetch(branch_name, depth=depth) else: remote_refs = self.fork_remote.fetch(branch_name) break except git.GitCommandError: pass else: logger.info("Failed to fetch %s", branch_name) return None for remote_ref in remote_refs: if remote_ref.remote_ref_path == branch_name: return remote_ref.ref def get_latest_master(self): return self.home_remote.fetch('master')[0].commit def read_from_branch(self, branch, file_name: str) -> str: abs_file_name = os.path.abspath(file_name) abs_repo_root = os.path.abspath(self.repo.working_dir) if not abs_file_name.startswith(abs_repo_root): raise RuntimeError( f"File {abs_file_name} not inside {abs_repo_root}" ) rel_file_name = abs_file_name[len(abs_repo_root):].lstrip("/") commit = getattr(branch, 'commit', branch) blob = commit.tree / rel_file_name if blob: return blob.data_stream.read().decode("utf-8") logger.error("File %s not found on branch %s commit %s", rel_file_name, branch, commit) return None def create_local_branch(self, branch_name: str, remote_branch: str = None): if remote_branch is None: remote_branch = self.get_remote_branch(branch_name, try_fetch=False) else: remote_branch = self.get_remote_branch(remote_branch, try_fetch=False) if remote_branch is None: return None self.repo.create_head(branch_name, remote_branch) return self.get_local_branch(branch_name) def get_merge_base(self, ref=None, other=None, try_fetch=False): if not ref: ref = self.repo.active_branch.commit if not other: other = self.home_remote.refs.master depths = (0, 50, 200) if try_fetch else (0,) for depth in depths: if depth: self.fork_remote.fetch(ref, depth=depth) self.home_remote.fetch('master', depth=depth) merge_bases = self.repo.merge_base(other, ref) if merge_bases: break logger.debug("No merge base found for %s and master at depth %i", ref, depth) else: logger.error("No merge base found for %s and master", ref) return None if len(merge_bases) > 1: logger.error("Multiple merge bases found for %s and master: %s", ref, merge_bases) return merge_bases[0] def list_changed_files(self, ref=None, other=None): if not ref: ref = self.repo.active_branch.commit merge_base = self.get_merge_base(ref, other) for diffobj in merge_base.diff(ref): if not diffobj.deleted_file: yield diffobj.b_path def list_modified_files(self): seen = set() for diffobj in self.repo.index.diff(None): for fname in (diffobj.a_path, diffobj.b_path): if fname not in seen: seen.add(fname) yield fname def prepare_branch(self, branch_name: str) -> None: if branch_name not in self.repo.heads: logger.info("Creating new branch %s", branch_name) from_commit = self.get_latest_master() self.repo.create_head(branch_name, from_commit) logger.info("Checking out branch %s", branch_name) branch = self.repo.heads[branch_name] branch.checkout() def commit_and_push_changes(self, files: List[str], branch_name: str, msg: str, sign=False) -> bool: if branch_name is None: branch_name = self.repo.active_branch.name if not files: files = list(self.list_modified_files()) self.repo.index.add(files) if not self.repo.index.diff("HEAD"): return False if self._sign and not sign: sign = self._sign if sign: args = [ '-S' + sign if isinstance(sign, str) else '-S', '-m', msg, ] if self.actor: args += ['--author', f'{self.actor.name} <{self.actor.email}>'] self.repo.index.write() self.repo.git.commit(*args) else: if self.actor: self.repo.index.commit(msg, author=self.actor) else: self.repo.index.commit(msg) if not self.dry_run: logger.info("Pushing branch %s", branch_name) try: res = self.fork_remote.push(branch_name) failed = res[0].flags & ~(git.PushInfo.FAST_FORWARD | git.PushInfo.NEW_HEAD) text = res[0].summary except git.GitCommandError as exc: failed = True text = str(exc) if failed: logger.error("Failed to push branch %s: %s", branch_name, text) raise GitHandlerFailure(text) else: logger.info("Would push branch %s", branch_name) return True def set_user(self, user: str, email: str = None) -> None: self.actor = git.Actor(user, email) class BiocondaRepoMixin(GitHandlerBase): recipes_folder = "recipes" config_file = "config.yml" def get_changed_recipes(self, ref=None, other=None, files=None): if files is None: files = ['meta.yaml', 'build.sh'] changed = set() for path in self.list_changed_files(ref, other): if not path.startswith(self.recipes_folder): continue for fname in files: if os.path.basename(path) == fname: changed.add(os.path.dirname(path)) return list(changed) def get_blacklisted(self, ref=None): if ref is None: branch = self.repo.active_branch elif isinstance(ref, str): branch = self.get_local_branch(ref) else: branch = ref config_data = self.read_from_branch(branch, self.config_file) config = yaml.safe_load(config_data) blacklists = config['blacklists'] blacklisted = set() for blacklist in blacklists: blacklist_data = self.read_from_branch(branch, blacklist) for line in blacklist_data.splitlines(): if line.startswith("#") or not line.strip(): continue recipe_folder, _, _ = line.partition(" #") blacklisted.add(recipe_folder.strip()) return blacklisted
MIT License
ebranca/owasp-pysec
demo/import/paypal/exceptions.py
PayPalError._get_message
python
def _get_message(self): return self._message
get the message from error
https://github.com/ebranca/owasp-pysec/blob/163e10a146db04f40648979e8d7c0c10e7737781/demo/import/paypal/exceptions.py#L22-L26
class PayPalError(Exception): def __init__(self, message, error_code=None): self.message = message self.error_code = error_code def __str__(self): if self.error_code: return "%s (Error Code: %s)" % (repr(self.message), self.error_code) else: return repr(self.message)
Apache License 2.0
ictu/quality-time
components/collector/src/source_collectors/gitlab/merge_requests.py
GitLabMergeRequests._get_merge_request_response
python
async def _get_merge_request_response( self, client: GraphQLClient, approved_field: str, cursor: str = "" ) -> tuple[aiohttp.ClientResponse, bool, str]: pagination = f'(after: "{cursor}")' if cursor else "" merge_request_query = MERGE_REQUEST_QUERY.format(pagination=pagination, approved=approved_field) response = await client.execute(merge_request_query, variables=dict(projectId=self._parameter("project"))) json = await response.json() page_info = json["data"]["project"]["mergeRequests"]["pageInfo"] return response, page_info["hasNextPage"], page_info.get("endCursor", "")
Return the merge request response, whether there are more pages, and a cursor to the next page, if any.
https://github.com/ictu/quality-time/blob/4bd5df14f584dcc174276da0d2ddb6fcfaf1d427/components/collector/src/source_collectors/gitlab/merge_requests.py#L94-L103
from typing import cast import aiohttp from aiogqlc import GraphQLClient from collector_utilities.functions import match_string_or_regular_expression from collector_utilities.type import URL, Value from model import Entities, Entity, SourceResponses from .base import GitLabBase MERGE_REQUEST_FIELDS_QUERY = """ { __type(name: "MergeRequest") { fields { name } } } """ MERGE_REQUEST_QUERY = """ query MRs($projectId: ID!) {{ project(fullPath: $projectId) {{ mergeRequests{pagination} {{ count pageInfo {{ endCursor hasNextPage }} nodes {{ id state title targetBranch webUrl upvotes downvotes createdAt updatedAt mergedAt {approved} }} }} }} }} """ class GitLabMergeRequests(GitLabBase): APPROVED_FIELD = "approved" async def _landing_url(self, responses: SourceResponses) -> URL: return URL(f"{str(await super()._landing_url(responses))}/{self._parameter('project')}/-/merge_requests") async def _get_source_responses(self, *urls: URL, **kwargs) -> SourceResponses: api_url = await self._api_url() timeout = aiohttp.ClientTimeout(total=120) async with aiohttp.ClientSession(raise_for_status=True, timeout=timeout, headers=self._headers()) as session: client = GraphQLClient(f"{api_url}/api/graphql", session=session) approved_field = await self._approved_field(client) responses, has_next_page, cursor = SourceResponses(), True, "" while has_next_page: response, has_next_page, cursor = await self._get_merge_request_response(client, approved_field, cursor) responses.append(response) return responses @classmethod async def _approved_field(cls, client: GraphQLClient) -> str: response = await client.execute(MERGE_REQUEST_FIELDS_QUERY) json = await response.json() fields = [field["name"] for field in json["data"]["__type"]["fields"]] return cls.APPROVED_FIELD if cls.APPROVED_FIELD in fields else ""
Apache License 2.0
lscsoft/bilby
bilby/gw/conversion.py
convert_to_lal_binary_black_hole_parameters
python
def convert_to_lal_binary_black_hole_parameters(parameters): converted_parameters = parameters.copy() original_keys = list(converted_parameters.keys()) if 'redshift' in converted_parameters.keys(): converted_parameters['luminosity_distance'] = redshift_to_luminosity_distance(parameters['redshift']) elif 'comoving_distance' in converted_parameters.keys(): converted_parameters['luminosity_distance'] = comoving_distance_to_luminosity_distance( parameters['comoving_distance']) for key in original_keys: if key[-7:] == '_source': if 'redshift' not in converted_parameters.keys(): converted_parameters['redshift'] = luminosity_distance_to_redshift( parameters['luminosity_distance']) converted_parameters[key[:-7]] = converted_parameters[key] * ( 1 + converted_parameters['redshift']) if 'chirp_mass' in converted_parameters.keys(): if "mass_1" in converted_parameters.keys(): converted_parameters["mass_ratio"] = chirp_mass_and_primary_mass_to_mass_ratio( converted_parameters["chirp_mass"], converted_parameters["mass_1"]) if 'total_mass' in converted_parameters.keys(): converted_parameters['symmetric_mass_ratio'] = chirp_mass_and_total_mass_to_symmetric_mass_ratio( converted_parameters['chirp_mass'], converted_parameters['total_mass']) if 'symmetric_mass_ratio' in converted_parameters.keys(): converted_parameters['mass_ratio'] = symmetric_mass_ratio_to_mass_ratio( converted_parameters['symmetric_mass_ratio']) if 'total_mass' not in converted_parameters.keys(): converted_parameters['total_mass'] = chirp_mass_and_mass_ratio_to_total_mass( converted_parameters['chirp_mass'], converted_parameters['mass_ratio']) converted_parameters['mass_1'], converted_parameters['mass_2'] = total_mass_and_mass_ratio_to_component_masses( converted_parameters['mass_ratio'], converted_parameters['total_mass']) elif 'total_mass' in converted_parameters.keys(): if 'symmetric_mass_ratio' in converted_parameters.keys(): converted_parameters['mass_ratio'] = symmetric_mass_ratio_to_mass_ratio( converted_parameters['symmetric_mass_ratio']) if 'mass_ratio' in converted_parameters.keys(): converted_parameters['mass_1'], converted_parameters['mass_2'] = total_mass_and_mass_ratio_to_component_masses( converted_parameters['mass_ratio'], converted_parameters['total_mass']) elif 'mass_1' in converted_parameters.keys(): converted_parameters['mass_2'] = converted_parameters['total_mass'] - converted_parameters['mass_1'] elif 'mass_2' in converted_parameters.keys(): converted_parameters['mass_1'] = converted_parameters['total_mass'] - converted_parameters['mass_2'] elif 'symmetric_mass_ratio' in converted_parameters.keys(): converted_parameters['mass_ratio'] = symmetric_mass_ratio_to_mass_ratio( converted_parameters['symmetric_mass_ratio']) if 'mass_1' in converted_parameters.keys(): converted_parameters['mass_2'] = converted_parameters['mass_1'] * converted_parameters['mass_ratio'] elif 'mass_2' in converted_parameters.keys(): converted_parameters['mass_1'] = converted_parameters['mass_2'] / converted_parameters['mass_ratio'] elif 'mass_ratio' in converted_parameters.keys(): if 'mass_1' in converted_parameters.keys(): converted_parameters['mass_2'] = converted_parameters['mass_1'] * converted_parameters['mass_ratio'] if 'mass_2' in converted_parameters.keys(): converted_parameters['mass_1'] = converted_parameters['mass_2'] / converted_parameters['mass_ratio'] for idx in ['1', '2']: key = 'chi_{}'.format(idx) if key in original_keys: if "chi_{}_in_plane".format(idx) in original_keys: converted_parameters["a_{}".format(idx)] = ( converted_parameters[f"chi_{idx}"] ** 2 + converted_parameters[f"chi_{idx}_in_plane"] ** 2 ) ** 0.5 converted_parameters[f"cos_tilt_{idx}"] = ( converted_parameters[f"chi_{idx}"] / converted_parameters[f"a_{idx}"] ) elif "a_{}".format(idx) not in original_keys: converted_parameters['a_{}'.format(idx)] = abs( converted_parameters[key]) converted_parameters['cos_tilt_{}'.format(idx)] = np.sign(converted_parameters[key]) converted_parameters['phi_jl'] = 0.0 converted_parameters['phi_12'] = 0.0 else: converted_parameters[f"cos_tilt_{idx}"] = ( converted_parameters[key] / converted_parameters[f"a_{idx}"] ) for angle in ['tilt_1', 'tilt_2', 'theta_jn']: cos_angle = str('cos_' + angle) if cos_angle in converted_parameters.keys(): with np.errstate(invalid="ignore"): converted_parameters[angle] = np.arccos(converted_parameters[cos_angle]) if "delta_phase" in original_keys: with np.errstate(invalid="ignore"): converted_parameters["phase"] = np.mod( converted_parameters["delta_phase"] - np.sign(np.cos(converted_parameters["theta_jn"])) * converted_parameters["psi"], 2 * np.pi ) added_keys = [key for key in converted_parameters.keys() if key not in original_keys] return converted_parameters, added_keys
Convert parameters we have into parameters we need. This is defined by the parameters of bilby.source.lal_binary_black_hole() Mass: mass_1, mass_2 Spin: a_1, a_2, tilt_1, tilt_2, phi_12, phi_jl Extrinsic: luminosity_distance, theta_jn, phase, ra, dec, geocent_time, psi This involves popping a lot of things from parameters. The keys in added_keys should be popped after evaluating the waveform. Parameters ========== parameters: dict dictionary of parameter values to convert into the required parameters Returns ======= converted_parameters: dict dict of the required parameters added_keys: list keys which are added to parameters during function call
https://github.com/lscsoft/bilby/blob/b1e02f1dfae03d4939cae9c95eff300c22919689/bilby/gw/conversion.py#L121-L273
import sys import multiprocessing import numpy as np from pandas import DataFrame from ..core.likelihood import MarginalizedLikelihoodReconstructionError from ..core.utils import logger, solar_mass from ..core.prior import DeltaFunction from .utils import lalsim_SimInspiralTransformPrecessingNewInitialConditions from .eos.eos import SpectralDecompositionEOS, EOSFamily, IntegrateTOV from .cosmology import get_cosmology def redshift_to_luminosity_distance(redshift, cosmology=None): cosmology = get_cosmology(cosmology) return cosmology.luminosity_distance(redshift).value def redshift_to_comoving_distance(redshift, cosmology=None): cosmology = get_cosmology(cosmology) return cosmology.comoving_distance(redshift).value @np.vectorize def luminosity_distance_to_redshift(distance, cosmology=None): from astropy import units from astropy.cosmology import z_at_value cosmology = get_cosmology(cosmology) return z_at_value(cosmology.luminosity_distance, distance * units.Mpc) @np.vectorize def comoving_distance_to_redshift(distance, cosmology=None): from astropy import units from astropy.cosmology import z_at_value cosmology = get_cosmology(cosmology) return z_at_value(cosmology.comoving_distance, distance * units.Mpc) def comoving_distance_to_luminosity_distance(distance, cosmology=None): cosmology = get_cosmology(cosmology) redshift = comoving_distance_to_redshift(distance, cosmology) return redshift_to_luminosity_distance(redshift, cosmology) def luminosity_distance_to_comoving_distance(distance, cosmology=None): cosmology = get_cosmology(cosmology) redshift = luminosity_distance_to_redshift(distance, cosmology) return redshift_to_comoving_distance(redshift, cosmology) def bilby_to_lalsimulation_spins( theta_jn, phi_jl, tilt_1, tilt_2, phi_12, a_1, a_2, mass_1, mass_2, reference_frequency, phase): if (a_1 == 0 or tilt_1 in [0, np.pi]) and (a_2 == 0 or tilt_2 in [0, np.pi]): spin_1x = 0 spin_1y = 0 spin_1z = a_1 * np.cos(tilt_1) spin_2x = 0 spin_2y = 0 spin_2z = a_2 * np.cos(tilt_2) iota = theta_jn else: iota, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z = transform_precessing_spins( theta_jn, phi_jl, tilt_1, tilt_2, phi_12, a_1, a_2, mass_1, mass_2, reference_frequency, phase) return iota, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z @np.vectorize def transform_precessing_spins(theta_jn, phi_jl, tilt_1, tilt_2, phi_12, a_1, a_2, mass_1, mass_2, reference_frequency, phase): iota, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z = ( lalsim_SimInspiralTransformPrecessingNewInitialConditions( theta_jn, phi_jl, tilt_1, tilt_2, phi_12, a_1, a_2, mass_1, mass_2, reference_frequency, phase)) return iota, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z
MIT License
lorraine2/implicit-hyper-opt
train_augment_net_graph.py
init_ax
python
def init_ax(fontsize=24, nrows=1, ncols=1): font = {'family': 'Times New Roman'} mpl.rc('font', **font) mpl.rcParams['legend.fontsize'] = fontsize mpl.rcParams['axes.labelsize'] = fontsize mpl.rcParams['xtick.labelsize'] = fontsize mpl.rcParams['ytick.labelsize'] = fontsize mpl.rcParams['axes.grid'] = False fig = plt.figure(figsize=(6.4 / np.sqrt(nrows), 4.8 * nrows / np.sqrt(nrows))) axs = [fig.add_subplot(nrows, ncols, i + 1) for i in range(nrows * ncols)] for ax in axs: ax.tick_params(axis='x', which='both', bottom=False, top=False) ax.tick_params(axis='y', which='both', left=False, right=False) ax.grid(False) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) return fig, axs
:param fontsize: :return:
https://github.com/lorraine2/implicit-hyper-opt/blob/7e8ccadd2a1b66ff4814301b3cf091c448214182/train_augment_net_graph.py#L110-L132
from tqdm import tqdm import copy import torch import csv import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl def save_learned(datas, is_mnist, batch_size, name, path='images'): saturation_multiple = 1 if not is_mnist: saturation_multiple = 1 datas = torch.sigmoid(datas.detach() * saturation_multiple).cpu().numpy() col_size = 10 row_size = batch_size // col_size if batch_size % row_size != 0: row_size += 1 fig = plt.figure(figsize=(col_size, row_size)) for i, data in enumerate(datas): ax = plt.subplot(row_size, col_size, i + 1) if is_mnist: plt.imshow(data[0], cmap='gray', interpolation='gaussian') else: plt.imshow(np.transpose(data, (1, 2, 0)), interpolation='gaussian') plt.xticks([]) plt.yticks([]) ax.set_aspect('auto') plt.subplots_adjust(wspace=0.05 * col_size / row_size, hspace=0.05) plt.draw() fig.savefig(f'{path}/{name}.pdf') plt.close(fig) def save_images(images, labels, augment_net, args): is_mnist = False if args.dataset == 'mnist': is_mnist = True num_save = 10 save_learned(images[:num_save], is_mnist, num_save, 'image_Original', path=args.save_loc) num_sample = 10 augs = torch.zeros(num_sample, num_save, images.shape[1], images.shape[2], images.shape[3]) for i in range(num_sample): augs[i] = augment_net(images[:num_save], class_label=labels[:num_save]) aug_1 = augment_net(images[:num_save], class_label=labels[:num_save]) save_learned(aug_1, is_mnist, num_save, 'image_Augment', path=args.save_loc) aug_2 = augment_net(images[:num_save], use_zero_noise=True, class_label=labels[:num_save]) save_learned(aug_2, is_mnist, num_save, 'image_Augment2', path=args.save_loc) std_augs = torch.std(augs, dim=0).cuda() std_augs = torch.log(std_augs) save_learned(std_augs, is_mnist, num_save, 'image_AugmentDiff', path=args.save_loc) mean_augs = torch.mean(augs, dim=0).cuda() save_learned(mean_augs - images[:num_save], is_mnist, num_save, 'image_OriginalDiff', path=args.save_loc) def graph_single_args(args, save_loc=None): from train_augment_net_multiple import get_id if save_loc is None: args.save_loc = finetune_location + get_id(args) else: args.save_loc = save_loc args.load_baseline_checkpoint = None args.load_finetune_checkpoint = args.save_loc + '/checkpoint.pt' args.data_augmentation = False from train_augment_net2 import get_models model, train_loader, val_loader, test_loader, augment_net, reweighting_net, checkpoint = get_models(args) progress_bar = tqdm(train_loader) for i, (images, labels) in enumerate(progress_bar): images, labels = images.cuda(), labels.cuda() save_images(images, labels, augment_net, args)
MIT License
openvax/varcode
varcode/effects/effect_collection.py
EffectCollection.top_priority_effect_per_transcript_id
python
def top_priority_effect_per_transcript_id(self): return OrderedDict( (transcript_id, top_priority_effect(variant_effects)) for (transcript_id, variant_effects) in self.groupby_transcript_id().items())
Highest priority effect for each unique transcript ID
https://github.com/openvax/varcode/blob/a51a7dd0868ef05aee4962e66a9226ab70935a3d/varcode/effects/effect_collection.py#L218-L223
from __future__ import print_function, division, absolute_import from collections import OrderedDict import pandas as pd from sercol import Collection from .effect_ordering import ( effect_priority, multi_gene_effect_sort_key, top_priority_effect, transcript_effect_priority_dict ) class EffectCollection(Collection): def __init__(self, effects, distinct=False, sort_key=None, sources=set([])): self.effects = effects Collection.__init__( self, elements=effects, distinct=distinct, sort_key=sort_key, sources=sources) def to_dict(self): return dict( effects=self.effects, sort_key=self.sort_key, distinct=self.distinct, sources=self.sources) def clone_with_new_elements(self, new_elements): return Collection.clone_with_new_elements( self, new_elements, rename_dict={"elements": "effects"}) def groupby_variant(self): return self.groupby(key_fn=lambda effect: effect.variant) def groupby_transcript(self): return self.groupby(key_fn=lambda effect: effect.transcript) def groupby_transcript_name(self): return self.groupby(key_fn=lambda effect: effect.transcript_name) def groupby_transcript_id(self): return self.groupby(key_fn=lambda effect: effect.transcript_id) def groupby_gene(self): return self.groupby(key_fn=lambda effect: effect.gene) def groupby_gene_name(self): return self.groupby(key_fn=lambda effect: effect.gene_name) def groupby_gene_id(self): return self.groupby(key_fn=lambda effect: effect.gene_id) def gene_counts(self): return { gene_name: len(group) for (gene_name, group) in self.groupby_gene_name().items() } def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0): return self.filter_above_threshold( key_fn=lambda effect: effect.transcript_id, value_dict=transcript_expression_dict, threshold=min_expression_value) def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0): return self.filter_above_threshold( key_fn=lambda effect: effect.gene_id, value_dict=gene_expression_dict, threshold=min_expression_value) def filter_by_effect_priority(self, min_priority_class): min_priority = transcript_effect_priority_dict[min_priority_class] return self.filter( lambda effect: effect_priority(effect) >= min_priority) def drop_silent_and_noncoding(self): return self.filter(lambda effect: effect.modifies_protein_sequence) def detailed_string(self): lines = [] for variant, variant_effects in self.groupby_variant().items(): lines.append("\n%s" % variant) gene_effects_groups = variant_effects.groupby_gene_id() for (gene_id, gene_effects) in gene_effects_groups.items(): if gene_id: gene_name = variant.ensembl.gene_name_of_gene_id(gene_id) lines.append(" Gene: %s (%s)" % (gene_name, gene_id)) for effect in sorted( gene_effects, key=effect_priority, reverse=True): lines.append(" -- %s" % effect) if len(variant_effects) > 1: best = variant_effects.top_priority_effect() lines.append(" Highest Priority Effect: %s" % best) return "\n".join(lines) def top_priority_effect(self): return top_priority_effect(self.elements) def top_priority_effect_per_variant(self): return OrderedDict( (variant, top_priority_effect(variant_effects)) for (variant, variant_effects) in self.groupby_variant().items())
Apache License 2.0
openstack/pbr
pbr/git.py
_iter_changelog
python
def _iter_changelog(changelog): first_line = True current_release = None yield current_release, "CHANGES\n=======\n\n" for hash, tags, msg in changelog: if tags: current_release = _get_highest_tag(tags) underline = len(current_release) * '-' if not first_line: yield current_release, '\n' yield current_release, ( "%(tag)s\n%(underline)s\n\n" % dict(tag=current_release, underline=underline)) if not msg.startswith("Merge "): if msg.endswith("."): msg = msg[:-1] msg = _clean_changelog_message(msg) yield current_release, "* %(msg)s\n" % dict(msg=msg) first_line = False
Convert a oneline log iterator to formatted strings. :param changelog: An iterator of one line log entries like that given by _iter_log_oneline. :return: An iterator over (release, formatted changelog) tuples.
https://github.com/openstack/pbr/blob/4849d82da7e2f689206f980a6e609bc474fc6b48/pbr/git.py#L166-L191
from __future__ import unicode_literals import distutils.errors from distutils import log import errno import io import os import re import subprocess import time import pkg_resources from pbr import options from pbr import version def _run_shell_command(cmd, throw_on_error=False, buffer=True, env=None): if buffer: out_location = subprocess.PIPE err_location = subprocess.PIPE else: out_location = None err_location = None newenv = os.environ.copy() if env: newenv.update(env) output = subprocess.Popen(cmd, stdout=out_location, stderr=err_location, env=newenv) out = output.communicate() if output.returncode and throw_on_error: raise distutils.errors.DistutilsError( "%s returned %d" % (cmd, output.returncode)) if len(out) == 0 or not out[0] or not out[0].strip(): return '' return out[0].strip().decode('utf-8', 'replace') def _run_git_command(cmd, git_dir, **kwargs): if not isinstance(cmd, (list, tuple)): cmd = [cmd] return _run_shell_command( ['git', '--git-dir=%s' % git_dir] + cmd, **kwargs) def _get_git_directory(): try: return _run_shell_command(['git', 'rev-parse', '--git-dir']) except OSError as e: if e.errno == errno.ENOENT: return '' raise def _git_is_installed(): try: _run_shell_command(['git', '--version']) except OSError: return False return True def _get_highest_tag(tags): return max(tags, key=pkg_resources.parse_version) def _find_git_files(dirname='', git_dir=None): file_list = [] if git_dir is None: git_dir = _run_git_functions() if git_dir: log.info("[pbr] In git context, generating filelist from git") file_list = _run_git_command(['ls-files', '-z'], git_dir) file_list = file_list.split(b'\x00'.decode('utf-8')) return [f for f in file_list if f] def _get_raw_tag_info(git_dir): describe = _run_git_command(['describe', '--always'], git_dir) if "-" in describe: return describe.rsplit("-", 2)[-2] if "." in describe: return 0 return None def get_is_release(git_dir): return _get_raw_tag_info(git_dir) == 0 def _run_git_functions(): git_dir = None if _git_is_installed(): git_dir = _get_git_directory() return git_dir or None def get_git_short_sha(git_dir=None): if not git_dir: git_dir = _run_git_functions() if git_dir: return _run_git_command( ['log', '-n1', '--pretty=format:%h'], git_dir) return None def _clean_changelog_message(msg): msg = msg.replace('*', r'\*') msg = msg.replace('_', r'\_') msg = msg.replace('`', r'\`') return msg
Apache License 2.0
scottfreellc/alphapy
alphapy/transforms.py
dminus
python
def dminus(f): c1 = 'downmove' f[c1] = -net(f, 'low') c2 = 'upmove' f[c2] = net(f, 'high') new_column = f.apply(gtval0, axis=1, args=[c1, c2]) return new_column
r"""Calculate the Minus Directional Movement (-DM). Parameters ---------- f : pandas.DataFrame Dataframe with columns ``high`` and ``low``. Returns ------- new_column : pandas.Series (float) The array containing the new feature. References ---------- *Directional movement is negative (minus) when the prior low minus the current low is greater than the current high minus the prior high. This so-called Minus Directional Movement (-DM) equals the prior low minus the current low, provided it is positive. A negative value would simply be entered as zero* [SC_ADX]_.
https://github.com/scottfreellc/alphapy/blob/fb0a52a822f3c5c58a9adf4c038204dea7d78f29/alphapy/transforms.py#L304-L331
from alphapy.calendrical import biz_day_month from alphapy.calendrical import biz_day_week from alphapy.globals import NULLTEXT from alphapy.globals import BSEP, PSEP, USEP from alphapy.variables import vexec import itertools import logging import math import numpy as np import pandas as pd logger = logging.getLogger(__name__) def abovema(f, c, p = 50): new_column = f[c] > ma(f, c, p) return new_column def adx(f, p = 14): c1 = 'diplus' vexec(f, c1) c2 = 'diminus' vexec(f, c2) dip = f[c1] dim = f[c2] didiff = abs(dip - dim) disum = dip + dim new_column = 100 * didiff.ewm(span=p).mean() / disum return new_column def belowma(f, c, p = 50): new_column = f[c] < ma(f, c, p) return new_column def c2max(f, c1, c2): max_val = max(f[c1], f[c2]) return max_val def c2min(f, c1, c2): min_val = min(f[c1], f[c2]) return min_val def diff(f, c, n = 1): new_column = np.diff(f[c], n) return new_column def diminus(f, p = 14): tr = 'truerange' vexec(f, tr) atr = USEP.join(['atr', str(p)]) vexec(f, atr) dmm = 'dmminus' f[dmm] = dminus(f) new_column = 100 * dminus(f).ewm(span=p).mean() / f[atr] return new_column def diplus(f, p = 14): tr = 'truerange' vexec(f, tr) atr = USEP.join(['atr', str(p)]) vexec(f, atr) dmp = 'dmplus' vexec(f, dmp) new_column = 100 * f[dmp].ewm(span=p).mean() / f[atr] return new_column
Apache License 2.0
swissdatasciencecenter/renku-python
renku/core/management/migrations/m_0004__submodules.py
_fetch_file_metadata
python
def _fetch_file_metadata(client, path): paths = glob.glob(f"{client.path}/.renku/datasets/*/*.yml" "") for dataset in _fetch_datasets(client, client.repo.head.commit, paths, [])[0]: for file in dataset.files: if file.entity.path == path: return file
Return metadata for a single file.
https://github.com/swissdatasciencecenter/renku-python/blob/5e43e2eff67cdf20fc2805799fe2822e23bc503d/renku/core/management/migrations/m_0004__submodules.py#L159-L165
import glob import os import shutil from pathlib import Path from git import GitError, Repo from renku.core import errors from renku.core.management.client import LocalClient from renku.core.management.command_builder.command import inject from renku.core.management.interface.client_dispatcher import IClientDispatcher from renku.core.management.interface.database_dispatcher import IDatabaseDispatcher from renku.core.management.migrations.m_0009__new_metadata_storage import _fetch_datasets from renku.core.management.migrations.models.v3 import DatasetFileSchemaV3, get_client_datasets from renku.core.management.migrations.models.v9 import ( DatasetFile, OldDatasetFileSchema, generate_file_id, generate_label, ) from renku.core.utils.urls import remove_credentials def migrate(migration_context): _migrate_submodule_based_datasets(migration_context.client) @inject.autoparams() def _migrate_submodule_based_datasets( client, client_dispatcher: IClientDispatcher, database_dispatcher: IDatabaseDispatcher ): from renku.core.management.migrate import is_project_unsupported, migrate submodules = client.repo.submodules if not submodules: return repo_paths = [] symlinks = [] for dataset in get_client_datasets(client): for file_ in dataset.files: path = client.path / file_.path if not path.is_symlink(): continue target = path.resolve() if "/.renku/vendors/" not in str(target): continue repo = Repo(target.parent, search_parent_directories=True) repo_path = repo.working_dir if repo_path not in repo_paths: repo_paths.append(repo_path) symlinks.append((file_.path, target, repo_path)) if not symlinks: return for s in submodules: try: s.update() except GitError: pass submodules_urls = {s.path: s.url for s in submodules} remote_clients = {p: LocalClient(p) for p in repo_paths} for remote_client in remote_clients.values(): client_dispatcher.push_created_client_to_stack(remote_client) database_dispatcher.push_database_to_stack(remote_client.database_path, commit=True) try: if not is_project_unsupported(): migrate(skip_template_update=True, skip_docker_update=True) finally: database_dispatcher.pop_database() client_dispatcher.pop_client() metadata = {} for path, target, repo_path in symlinks: remote_client = remote_clients[repo_path] path_within_repo = target.relative_to(repo_path) repo_is_remote = ".renku/vendors/local" not in repo_path based_on = None submodule_path = Path(repo_path).relative_to(client.path) url = submodules_urls.get(str(submodule_path), "") if repo_is_remote: based_on = _fetch_file_metadata(remote_client, path_within_repo) if based_on: based_on.url = url based_on.based_on = None else: based_on = DatasetFile.from_revision(remote_client, path=path_within_repo, url=url) data = OldDatasetFileSchema(client=remote_client).dump(based_on) based_on = DatasetFileSchemaV3(client=remote_client).load(data) else: if url: full_path = Path(url) / path_within_repo rel_path = os.path.relpath(full_path, client.path) url = f"file://{rel_path}" metadata[path] = (based_on, url) path = client.path / path path.unlink() try: shutil.move(target, path) except FileNotFoundError: raise errors.InvalidFileOperation(f"File was not found: {target}") for s in submodules: if s.path.startswith(".renku/vendors/"): try: s.remove(force=True) except ValueError: pass for dataset in get_client_datasets(client): for file_ in dataset.files: if file_.path in metadata: based_on, url = metadata[file_.path] file_.based_on = based_on file_.url = remove_credentials(url) file_.commit = client.find_previous_commit(file_.path) file_._id = generate_file_id(client, hexsha=file_.commit.hexsha, path=file_.path) file_._label = generate_label(file_.path, file_.commit.hexsha) dataset.to_yaml()
Apache License 2.0
gabstopper/smc-python
smc/policy/interface.py
InterfaceRule.layer2_ipv6_access_rules
python
def layer2_ipv6_access_rules(self): pass
Layer 2 IPv6 access rule
https://github.com/gabstopper/smc-python/blob/54386c8a710727cc1acf69334a57b155d2f5408c/smc/policy/interface.py#L34-L41
from smc.base.collection import rule_collection from smc.policy.policy import Policy from smc.policy.rule import IPv4Layer2Rule, EthernetRule from smc.api.exceptions import ElementNotFound, LoadPolicyFailed, CreateElementFailed, CreatePolicyFailed from smc.base.model import ElementCreator class InterfaceRule(object): @property def layer2_ipv4_access_rules(self): return rule_collection( self.get_relation('l2_interface_ipv4_access_rules'), IPv4Layer2Rule) @property
Apache License 2.0
vovanbo/aiohttp_json_api
aiohttp_json_api/context.py
JSONAPIContext.get_filter
python
def get_filter(self, field: str, name: str, default: Any = None) -> Any: return self.filters.get((field, name), default)
Get filter from request context by name and field. If the filter *name* has been applied on the *field*, the *filter* is returned and *default* otherwise. :arg str field: Name of field :arg str name: Name of filter :arg Any default: A fallback rule value for the filter.
https://github.com/vovanbo/aiohttp_json_api/blob/1d4864a0f73e4df33278e16d499642a60fa89aaa/aiohttp_json_api/context.py#L324-L338
import json import re from collections import OrderedDict from typing import Any, Optional, Tuple, Union import inflection from aiohttp import web from multidict import MultiDict from .common import Event, FilterRule, JSONAPI, logger, SortDirection from .errors import HTTPBadRequest, HTTPNotFound from .abc.schema import SchemaABC from .typings import ( RequestFields, RequestFilters, RequestIncludes, RequestSorting ) class JSONAPIContext: FILTER_KEY = re.compile(r"filter\[(?P<field>\w[-\w_]*)\]") FILTER_VALUE = re.compile(r"(?P<name>[a-z]+):(?P<value>.*)") FIELDS_RE = re.compile(r"fields\[(?P<name>\w[-\w_]*)\]") inflect = inflection.underscore def __init__(self, request: web.Request, resource_type: str = None) -> None: self.__request = request self.__resource_type = resource_type if self.__resource_type is None: self.__resource_type = self.__request.match_info.get('type', None) if (self.__resource_type is None or self.__resource_type not in self.registry): raise HTTPNotFound() self.__pagination = None self.__filters = self.parse_request_filters(request) self.__fields = self.parse_request_fields(request) self.__include = self.parse_request_includes(request) self.__sorting = self.parse_request_sorting(request) self.__event = None if self.__request.method in Event.__members__: self.__event = Event[self.__request.method] schema_cls, controller_cls = self.registry.get(self.resource_type) self.__controller = controller_cls(self) self.__schema = schema_cls(self) logger.debug('Request context info:\n' 'Filters: %s\n' 'Fields: %s\n' 'Includes: %s\n' 'Sorting: %s\n' 'Event: %s\n' 'Schema: %s\n' 'Controller: %s\n', self.filters, self.fields, self.include, self.sorting, self.event, schema_cls.__name__, controller_cls.__name__) @property def request(self): return self.__request @property def app(self): return self.__request.app @property def resource_type(self): return self.__resource_type @property def registry(self): return self.app[JSONAPI]['registry'] @property def schema(self) -> Optional[SchemaABC]: return self.__schema @property def controller(self): return self.__controller @property def filters(self): return self.__filters @property def fields(self): return self.__fields @property def include(self): return self.__include @property def sorting(self): return self.__sorting @property def event(self): return self.__event @property def pagination(self): if self.__pagination is not None: return self.__pagination if self.schema is not None: pagination_type = self.schema.opts.pagination if pagination_type: self.__pagination = pagination_type(self.__request) return self.__pagination return None @classmethod def convert_field_name(cls, field_name): return cls.inflect(field_name) if cls.inflect is not None else field_name @classmethod def parse_request_filters(cls, request: web.Request) -> RequestFilters: filters = MultiDict() for key, value in request.query.items(): key_match = re.fullmatch(cls.FILTER_KEY, key) value_match = re.fullmatch(cls.FILTER_VALUE, value) if key_match and not value_match: field = key_match.group('field') raise HTTPBadRequest( detail=f"The filter '{field}' is not correct applied.", source_parameter=key ) elif key_match and value_match: field = key_match.group('field') name = value_match.group('name') value = value_match.group('value') try: value = json.loads(value) except Exception as err: logger.debug(str(err), exc_info=False) raise HTTPBadRequest( detail=f"The value '{value}' is not JSON serializable", source_parameter=key ) filters.add(cls.convert_field_name(field), FilterRule(name=name, value=value)) return filters @classmethod def parse_request_fields(cls, request: web.Request) -> RequestFields: fields = OrderedDict() for key, value in request.query.items(): match = re.fullmatch(cls.FIELDS_RE, key) if match: typename = match.group('name') fields[typename] = tuple( cls.convert_field_name(item.strip()) for item in value.split(',') if item.strip() ) return fields @classmethod def parse_request_includes(cls, request: web.Request) -> RequestIncludes: return tuple( tuple(cls.convert_field_name(p) for p in path.split('.')) for path in request.query.get('include', '').split(',') if path ) @classmethod def parse_request_sorting(cls, request: web.Request) -> RequestSorting: sort = OrderedDict() if 'sort' not in request.query: return sort direction = SortDirection.ASC for field in request.query.get('sort').split(','): if field.startswith(('+', '-')): direction = SortDirection(field[0]) field = field[1:] field = tuple(cls.convert_field_name(e.strip()) for e in field.split('.')) sort[field] = direction return sort def has_filter(self, field: str, name: str) -> bool: return (field, name) in self.filters
MIT License
lorien/user_agent
user_agent/base.py
generate_navigator
python
def generate_navigator(os=None, navigator=None, platform=None, device_type=None): if platform is not None: os = platform warn('The `platform` option is deprecated.' ' Use `os` option instead.', stacklevel=3) device_type, os_id, navigator_id = ( pick_config_ids(device_type, os, navigator) ) system = build_system_components( device_type, os_id, navigator_id) app = build_app_components(os_id, navigator_id) ua_template = choose_ua_template( device_type, navigator_id, app) user_agent = ua_template.format(system=system, app=app) app_version = build_navigator_app_version( os_id, navigator_id, system['platform_version'], user_agent) return { 'os_id': os_id, 'navigator_id': navigator_id, 'platform': system['platform'], 'oscpu': system['oscpu'], 'build_version': app['build_version'], 'build_id': app['build_id'], 'app_version': app_version, 'app_name': app['name'], 'app_code_name': 'Mozilla', 'product': 'Gecko', 'product_sub': app['product_sub'], 'vendor': app['vendor'], 'vendor_sub': '', 'user_agent': user_agent, }
Generates web navigator's config :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (os, name, platform, oscpu, build_version, build_id, app_version, app_name, app_code_name, product, product_sub, vendor, vendor_sub, user_agent) :raises InvalidOption: if could not generate user-agent for any combination of allowed platforms and navigators :raise InvalidOption: if any of passed options is invalid
https://github.com/lorien/user_agent/blob/6d697db1782320f49bf8f4482182dcdbf588daf9/user_agent/base.py#L509-L566
from random import SystemRandom from datetime import datetime, timedelta from itertools import product import six from .warning import warn from .device import SMARTPHONE_DEV_IDS, TABLET_DEV_IDS from .error import InvalidOption __all__ = ['generate_user_agent', 'generate_navigator', 'generate_navigator_js'] randomizer = SystemRandom() DEVICE_TYPE_OS = { 'desktop': ('win', 'mac', 'linux'), 'smartphone': ('android',), 'tablet': ('android',), } OS_DEVICE_TYPE = { 'win': ('desktop',), 'linux': ('desktop',), 'mac': ('desktop',), 'android': ('smartphone', 'tablet'), } DEVICE_TYPE_NAVIGATOR = { 'desktop': ('chrome', 'firefox', 'ie'), 'smartphone': ('firefox', 'chrome'), 'tablet': ('firefox', 'chrome'), } NAVIGATOR_DEVICE_TYPE = { 'ie': ('desktop',), 'chrome': ('desktop', 'smartphone', 'tablet'), 'firefox': ('desktop', 'smartphone', 'tablet'), } OS_PLATFORM = { 'win': ( 'Windows NT 5.1', 'Windows NT 6.1', 'Windows NT 6.2', 'Windows NT 6.3', 'Windows NT 10.0', ), 'mac': ( 'Macintosh; Intel Mac OS X 10.8', 'Macintosh; Intel Mac OS X 10.9', 'Macintosh; Intel Mac OS X 10.10', 'Macintosh; Intel Mac OS X 10.11', 'Macintosh; Intel Mac OS X 10.12', ), 'linux': ( 'X11; Linux', 'X11; Ubuntu; Linux', ), 'android': ( 'Android 4.4', 'Android 4.4.1', 'Android 4.4.2', 'Android 4.4.3', 'Android 4.4.4', 'Android 5.0', 'Android 5.0.1', 'Android 5.0.2', 'Android 5.1', 'Android 5.1.1', 'Android 6.0', 'Android 6.0.1', ), } OS_CPU = { 'win': ( '', 'Win64; x64', 'WOW64', ), 'linux': ( 'i686', 'x86_64', 'i686 on x86_64', ), 'mac': ( '', ), 'android': ( 'armv7l', 'armv8l', ), } OS_NAVIGATOR = { 'win': ('chrome', 'firefox', 'ie'), 'mac': ('firefox', 'chrome'), 'linux': ('chrome', 'firefox'), 'android': ('firefox', 'chrome'), } NAVIGATOR_OS = { 'chrome': ('win', 'linux', 'mac', 'android'), 'firefox': ('win', 'linux', 'mac', 'android'), 'ie': ('win',), } FIREFOX_VERSION = ( ('45.0', datetime(2016, 3, 8)), ('46.0', datetime(2016, 4, 26)), ('47.0', datetime(2016, 6, 7)), ('48.0', datetime(2016, 8, 2)), ('49.0', datetime(2016, 9, 20)), ('50.0', datetime(2016, 11, 15)), ('51.0', datetime(2017, 1, 24)), ) CHROME_BUILD = ''' 80.0.3987.132 80.0.3987.149 80.0.3987.99 81.0.4044.117 81.0.4044.138 83.0.4103.101 83.0.4103.106 83.0.4103.96 84.0.4147.105 84.0.4147.111 84.0.4147.125 84.0.4147.135 84.0.4147.89 85.0.4183.101 85.0.4183.102 85.0.4183.120 85.0.4183.121 85.0.4183.127 85.0.4183.81 85.0.4183.83 86.0.4240.110 86.0.4240.111 86.0.4240.114 86.0.4240.183 86.0.4240.185 86.0.4240.75 86.0.4240.78 86.0.4240.80 86.0.4240.96 86.0.4240.99 '''.strip().splitlines() IE_VERSION = ( (8, 'MSIE 8.0', '4.0'), (9, 'MSIE 9.0', '5.0'), (10, 'MSIE 10.0', '6.0'), (11, 'MSIE 11.0', '7.0'), ) USER_AGENT_TEMPLATE = { 'firefox': ( 'Mozilla/5.0' ' ({system[ua_platform]}; rv:{app[build_version]})' ' Gecko/{app[geckotrail]}' ' Firefox/{app[build_version]}' ), 'chrome': ( 'Mozilla/5.0' ' ({system[ua_platform]}) AppleWebKit/537.36' ' (KHTML, like Gecko)' ' Chrome/{app[build_version]} Safari/537.36' ), 'chrome_smartphone': ( 'Mozilla/5.0' ' ({system[ua_platform]}) AppleWebKit/537.36' ' (KHTML, like Gecko)' ' Chrome/{app[build_version]} Mobile Safari/537.36' ), 'chrome_tablet': ( 'Mozilla/5.0' ' ({system[ua_platform]}) AppleWebKit/537.36' ' (KHTML, like Gecko)' ' Chrome/{app[build_version]} Safari/537.36' ), 'ie_less_11': ( 'Mozilla/5.0' ' (compatible; {app[build_version]}; {system[ua_platform]};' ' Trident/{app[trident_version]})' ), 'ie_11': ( 'Mozilla/5.0' ' ({system[ua_platform]}; Trident/{app[trident_version]};' ' rv:11.0) like Gecko' ), } def get_firefox_build(): build_ver, date_from = randomizer.choice(FIREFOX_VERSION) try: idx = FIREFOX_VERSION.index((build_ver, date_from)) _, date_to = FIREFOX_VERSION[idx + 1] except IndexError: date_to = date_from + timedelta(days=1) sec_range = (date_to - date_from).total_seconds() - 1 build_rnd_time = ( date_from + timedelta(seconds=randomizer.randint(0, sec_range)) ) return build_ver, build_rnd_time.strftime('%Y%m%d%H%M%S') def get_chrome_build(): return randomizer.choice(CHROME_BUILD) def get_ie_build(): return randomizer.choice(IE_VERSION) MACOSX_CHROME_BUILD_RANGE = { '10.8': (0, 8), '10.9': (0, 5), '10.10': (0, 5), '10.11': (0, 6), '10.12': (0, 2) } def fix_chrome_mac_platform(platform): ver = platform.split('OS X ')[1] build_range = range(*MACOSX_CHROME_BUILD_RANGE[ver]) build = randomizer.choice(build_range) mac_ver = ver.replace('.', '_') + '_' + str(build) return 'Macintosh; Intel Mac OS X %s' % mac_ver def build_system_components(device_type, os_id, navigator_id): if os_id == 'win': platform_version = randomizer.choice(OS_PLATFORM['win']) cpu = randomizer.choice(OS_CPU['win']) if cpu: platform = '%s; %s' % (platform_version, cpu) else: platform = platform_version res = { 'platform_version': platform_version, 'platform': platform, 'ua_platform': platform, 'oscpu': platform, } elif os_id == 'linux': cpu = randomizer.choice(OS_CPU['linux']) platform_version = randomizer.choice(OS_PLATFORM['linux']) platform = '%s %s' % (platform_version, cpu) res = { 'platform_version': platform_version, 'platform': platform, 'ua_platform': platform, 'oscpu': 'Linux %s' % cpu, } elif os_id == 'mac': cpu = randomizer.choice(OS_CPU['mac']) platform_version = randomizer.choice(OS_PLATFORM['mac']) platform = platform_version if navigator_id == 'chrome': platform = fix_chrome_mac_platform(platform) res = { 'platform_version': platform_version, 'platform': 'MacIntel', 'ua_platform': platform, 'oscpu': 'Intel Mac OS X %s' % platform.split(' ')[-1], } elif os_id == 'android': assert navigator_id in ('firefox', 'chrome') assert device_type in ('smartphone', 'tablet') platform_version = randomizer.choice(OS_PLATFORM['android']) if navigator_id == 'firefox': if device_type == 'smartphone': ua_platform = '%s; Mobile' % platform_version elif device_type == 'tablet': ua_platform = '%s; Tablet' % platform_version elif navigator_id == 'chrome': device_id = randomizer.choice(SMARTPHONE_DEV_IDS) ua_platform = 'Linux; %s; %s' % (platform_version, device_id) oscpu = 'Linux %s' % randomizer.choice(OS_CPU['android']) res = { 'platform_version': platform_version, 'ua_platform': ua_platform, 'platform': oscpu, 'oscpu': oscpu, } return res def build_app_components(os_id, navigator_id): if navigator_id == 'firefox': build_version, build_id = get_firefox_build() if os_id in ('win', 'linux', 'mac'): geckotrail = '20100101' else: geckotrail = build_version res = { 'name': 'Netscape', 'product_sub': '20100101', 'vendor': '', 'build_version': build_version, 'build_id': build_id, 'geckotrail': geckotrail, } elif navigator_id == 'chrome': res = { 'name': 'Netscape', 'product_sub': '20030107', 'vendor': 'Google Inc.', 'build_version': get_chrome_build(), 'build_id': None, } elif navigator_id == 'ie': num_ver, build_version, trident_version = get_ie_build() if num_ver >= 11: app_name = 'Netscape' else: app_name = 'Microsoft Internet Explorer' res = { 'name': app_name, 'product_sub': None, 'vendor': '', 'build_version': build_version, 'build_id': None, 'trident_version': trident_version, } return res def get_option_choices(opt_name, opt_value, default_value, all_choices): choices = [] if isinstance(opt_value, six.string_types): choices = [opt_value] elif isinstance(opt_value, (list, tuple)): choices = list(opt_value) elif opt_value is None: choices = default_value else: raise InvalidOption('Option %s has invalid' ' value: %s' % (opt_name, opt_value)) if 'all' in choices: choices = all_choices for item in choices: if item not in all_choices: raise InvalidOption('Choices of option %s contains invalid' ' item: %s' % (opt_name, item)) return choices def pick_config_ids(device_type, os, navigator): if os is None: default_dev_types = ['desktop'] else: default_dev_types = list(DEVICE_TYPE_OS.keys()) dev_type_choices = get_option_choices( 'device_type', device_type, default_dev_types, list(DEVICE_TYPE_OS.keys()) ) os_choices = get_option_choices('os', os, list(OS_NAVIGATOR.keys()), list(OS_NAVIGATOR.keys())) nav_choices = get_option_choices('navigator', navigator, list(NAVIGATOR_OS.keys()), list(NAVIGATOR_OS.keys())) variants = [] for dev, os, nav in product(dev_type_choices, os_choices, nav_choices): if (os in DEVICE_TYPE_OS[dev] and nav in DEVICE_TYPE_NAVIGATOR[dev] and nav in OS_NAVIGATOR[os]): variants.append((dev, os, nav)) if not variants: raise InvalidOption('Options device_type, os and navigator' ' conflicts with each other') device_type, os_id, navigator_id = randomizer.choice(variants) assert os_id in OS_PLATFORM assert navigator_id in NAVIGATOR_OS assert device_type in DEVICE_TYPE_OS return device_type, os_id, navigator_id def choose_ua_template(device_type, navigator_id, app): tpl_name = navigator_id if navigator_id == 'ie': tpl_name = ('ie_11' if app['build_version'] == 'MSIE 11.0' else 'ie_less_11') if navigator_id == 'chrome': if device_type == 'smartphone': tpl_name = 'chrome_smartphone' if device_type == 'tablet': tpl_name = 'chrome_tablet' return USER_AGENT_TEMPLATE[tpl_name] def build_navigator_app_version(os_id, navigator_id, platform_version, user_agent): if navigator_id in ('chrome', 'ie'): assert user_agent.startswith('Mozilla/') app_version = user_agent.split('Mozilla/', 1)[1] elif navigator_id == 'firefox': if os_id == 'android': app_version = '5.0 (%s)' % platform_version else: os_token = { 'win': 'Windows', 'mac': 'Macintosh', 'linux': 'X11', }[os_id] app_version = '5.0 (%s)' % os_token return app_version
MIT License
centerforopenscience/osf.io
addons/base/models.py
BaseCitationsNodeSettings.complete
python
def complete(self): return bool(self.has_auth and self.user_settings.verify_oauth_access( node=self.owner, external_account=self.external_account, metadata={'folder': self.list_id}, ))
Boolean indication of addon completeness
https://github.com/centerforopenscience/osf.io/blob/6552a01fe250997cd3eb67cf72fc7157d9bc5af6/addons/base/models.py#L881-L887
import abc import os import time import markupsafe import requests from django.db import models from django.utils import timezone from framework.auth import Auth from framework.auth.decorators import must_be_logged_in from framework.exceptions import HTTPError, PermissionsError from mako.lookup import TemplateLookup from osf.models.base import BaseModel, ObjectIDMixin from osf.models.external import ExternalAccount from osf.models.node import AbstractNode from osf.models.user import OSFUser from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField from osf.utils.fields import NonNaiveDateTimeField from website import settings from addons.base import logger, serializer from website.oauth.signals import oauth_complete lookup = TemplateLookup( directories=[ settings.TEMPLATES_PATH ], default_filters=[ 'unicode', 'temp_ampersand_fixer', 'h', ], imports=[ 'from website.util.sanitize import temp_ampersand_fixer', ] ) class BaseAddonSettings(ObjectIDMixin, BaseModel): is_deleted = models.BooleanField(default=False) deleted = NonNaiveDateTimeField(null=True, blank=True) class Meta: abstract = True @property def config(self): return self._meta.app_config @property def short_name(self): return self.config.short_name def delete(self, save=True): self.is_deleted = True self.deleted = timezone.now() self.on_delete() if save: self.save() def undelete(self, save=True): self.is_deleted = False self.deleted = None self.on_add() if save: self.save() def to_json(self, user): return { 'addon_short_name': self.config.short_name, 'addon_full_name': self.config.full_name, } def on_add(self): pass def on_delete(self): pass class BaseUserSettings(BaseAddonSettings): owner = models.OneToOneField(OSFUser, related_name='%(app_label)s_user_settings', blank=True, null=True, on_delete=models.CASCADE) class Meta: abstract = True @property def public_id(self): return None @property def has_auth(self): return False @property def nodes_authorized(self): model = self.config.node_settings if not model: return [] return [obj.owner for obj in model.objects.filter(user_settings=self, owner__is_deleted=False).select_related('owner')] @property def can_be_merged(self): return hasattr(self, 'merge') def to_json(self, user): ret = super(BaseUserSettings, self).to_json(user) ret['has_auth'] = self.has_auth ret.update({ 'nodes': [ { '_id': node._id, 'url': node.url, 'title': node.title, 'registered': node.is_registration, 'api_url': node.api_url } for node in self.nodes_authorized ] }) return ret def __repr__(self): if self.owner: return '<{cls} owned by user {uid}>'.format(cls=self.__class__.__name__, uid=self.owner._id) return '<{cls} with no owner>'.format(cls=self.__class__.__name__) @oauth_complete.connect def oauth_complete(provider, account, user): if not user or not account: return user.add_addon(account.provider) user.save() class BaseOAuthUserSettings(BaseUserSettings): oauth_grants = DateTimeAwareJSONField(default=dict, blank=True) oauth_provider = None serializer = serializer.OAuthAddonSerializer class Meta: abstract = True @property def has_auth(self): return self.external_accounts.exists() @property def external_accounts(self): return self.owner.external_accounts.filter(provider=self.oauth_provider.short_name) def delete(self, save=True): for account in self.external_accounts.filter(provider=self.config.short_name): self.revoke_oauth_access(account, save=False) super(BaseOAuthUserSettings, self).delete(save=save) def grant_oauth_access(self, node, external_account, metadata=None): if not self.owner.external_accounts.filter(id=external_account.id).exists(): raise PermissionsError() metadata = metadata or {} if node._id not in self.oauth_grants: self.oauth_grants[node._id] = {} if external_account._id not in self.oauth_grants[node._id]: self.oauth_grants[node._id][external_account._id] = {} for key, value in metadata.items(): self.oauth_grants[node._id][external_account._id][key] = value self.save() @must_be_logged_in def revoke_oauth_access(self, external_account, auth, save=True): for node in self.get_nodes_with_oauth_grants(external_account): try: node.get_addon(external_account.provider, is_deleted=True).deauthorize(auth=auth) except AttributeError: pass if external_account.osfuser_set.count() == 1 and external_account.osfuser_set.filter(id=auth.user.id).exists(): self.revoke_remote_oauth_access(external_account) for key in self.oauth_grants: self.oauth_grants[key].pop(external_account._id, None) if save: self.save() def revoke_remote_oauth_access(self, external_account): pass def verify_oauth_access(self, node, external_account, metadata=None): metadata = metadata or {} try: grants = self.oauth_grants[node._id][external_account._id] except KeyError: return False for key, value in metadata.items(): if key not in grants or grants[key] != value: return False return True def get_nodes_with_oauth_grants(self, external_account): for node_id, grants in self.oauth_grants.items(): node = AbstractNode.load(node_id) if external_account._id in grants.keys() and not node.is_deleted: yield node def get_attached_nodes(self, external_account): for node in self.get_nodes_with_oauth_grants(external_account): if node is None: continue node_settings = node.get_addon(self.oauth_provider.short_name) if node_settings is None: continue if node_settings.external_account == external_account: yield node def merge(self, user_settings): if user_settings.__class__ is not self.__class__: raise TypeError('Cannot merge different addons') for node_id, data in user_settings.oauth_grants.items(): if node_id not in self.oauth_grants: self.oauth_grants[node_id] = data else: node_grants = user_settings.oauth_grants[node_id].items() for ext_acct, meta in node_grants: if ext_acct not in self.oauth_grants[node_id]: self.oauth_grants[node_id][ext_acct] = meta else: for k, v in meta: if k not in self.oauth_grants[node_id][ext_acct]: self.oauth_grants[node_id][ext_acct][k] = v user_settings.oauth_grants = {} user_settings.save() try: config = settings.ADDONS_AVAILABLE_DICT[ self.oauth_provider.short_name ] Model = config.models['nodesettings'] except KeyError: pass else: Model.objects.filter(user_settings=user_settings).update(user_settings=self) self.save() def to_json(self, user): ret = super(BaseOAuthUserSettings, self).to_json(user) ret['accounts'] = self.serializer( user_settings=self ).serialized_accounts return ret def on_delete(self): super(BaseOAuthUserSettings, self).on_delete() nodes = [AbstractNode.load(node_id) for node_id in self.oauth_grants.keys()] for node in nodes: node_addon = node.get_addon(self.oauth_provider.short_name) if node_addon and node_addon.user_settings == self: node_addon.clear_auth() class BaseNodeSettings(BaseAddonSettings): owner = models.OneToOneField(AbstractNode, related_name='%(app_label)s_node_settings', null=True, blank=True, on_delete=models.CASCADE) class Meta: abstract = True @property def complete(self): raise NotImplementedError() @property def configured(self): return self.complete @property def has_auth(self): return False def to_json(self, user): ret = super(BaseNodeSettings, self).to_json(user) ret.update({ 'user': { 'permissions': self.owner.get_permissions(user) }, 'node': { 'id': self.owner._id, 'api_url': self.owner.api_url, 'url': self.owner.url, 'is_registration': self.owner.is_registration, }, 'node_settings_template': os.path.basename(self.config.node_settings_template), }) return ret def before_page_load(self, node, user): pass def before_remove_contributor(self, node, removed): pass def after_remove_contributor(self, node, removed, auth=None): pass def before_make_public(self, node): pass def before_make_private(self, node): pass def after_set_privacy(self, node, permissions): pass def before_fork(self, node, user): if hasattr(self, 'user_settings'): if self.user_settings is None: return ( u'Because you have not configured the {addon} add-on, your authentication will not be ' u'transferred to the forked {category}. You may authorize and configure the {addon} add-on ' u'in the new fork on the settings page.' ).format( addon=self.config.full_name, category=node.project_or_component, ) elif self.user_settings and self.user_settings.owner == user: return ( u'Because you have authorized the {addon} add-on for this ' u'{category}, forking it will also transfer your authentication to ' u'the forked {category}.' ).format( addon=self.config.full_name, category=node.project_or_component, ) else: return ( u'Because the {addon} add-on has been authorized by a different ' u'user, forking it will not transfer authentication to the forked ' u'{category}. You may authorize and configure the {addon} add-on ' u'in the new fork on the settings page.' ).format( addon=self.config.full_name, category=node.project_or_component, ) def after_fork(self, node, fork, user, save=True): clone = self.clone() clone.user_settings = None clone.owner = fork if save: clone.save() return clone def before_register(self, node, user): pass def after_register(self, node, registration, user, save=True): return None, None def after_delete(self, user): pass class GenericRootNode(object): path = '/' name = '' class BaseStorageAddon(object): root_node = GenericRootNode() class Meta: abstract = True @property def archive_folder_name(self): name = 'Archive of {addon}'.format(addon=self.config.full_name) folder_name = getattr(self, 'folder_name', '').lstrip('/').strip() if folder_name: name = name + ': {folder}'.format(folder=folder_name) return name def _get_fileobj_child_metadata(self, filenode, user, cookie=None, version=None): from api.base.utils import waterbutler_api_url_for kwargs = {} if version: kwargs['version'] = version if cookie: kwargs['cookie'] = cookie elif user: kwargs['cookie'] = user.get_or_create_cookie().decode() metadata_url = waterbutler_api_url_for( self.owner._id, self.config.short_name, path=filenode.get('path', '/'), user=user, view_only=True, _internal=True, base_url=self.owner.osfstorage_region.waterbutler_url, **kwargs ) res = requests.get(metadata_url) if res.status_code != 200: raise HTTPError(res.status_code, data={'error': res.json()}) time.sleep(1.0 / 5.0) data = res.json().get('data', None) if data: return [child['attributes'] for child in data] return [] def _get_file_tree(self, filenode=None, user=None, cookie=None, version=None): filenode = filenode or { 'path': '/', 'kind': 'folder', 'name': self.root_node.name, } if filenode.get('kind') == 'file': return filenode kwargs = { 'version': version, 'cookie': cookie, } filenode['children'] = [ self._get_file_tree(child, user, cookie=cookie) for child in self._get_fileobj_child_metadata(filenode, user, **kwargs) ] return filenode class BaseOAuthNodeSettings(BaseNodeSettings): external_account = models.ForeignKey(ExternalAccount, null=True, blank=True, related_name='%(app_label)s_node_settings', on_delete=models.CASCADE) oauth_provider = None class Meta: abstract = True @abc.abstractproperty def folder_id(self): raise NotImplementedError( "BaseOAuthNodeSettings subclasses must expose a 'folder_id' property." ) @abc.abstractproperty def folder_name(self): raise NotImplementedError( "BaseOAuthNodeSettings subclasses must expose a 'folder_name' property." ) @abc.abstractproperty def folder_path(self): raise NotImplementedError( "BaseOAuthNodeSettings subclasses must expose a 'folder_path' property." ) def fetch_folder_name(self): return self.folder_name @property def nodelogger(self): auth = None if self.user_settings: auth = Auth(self.user_settings.owner) self._logger_class = getattr( self, '_logger_class', type( '{0}NodeLogger'.format(self.config.short_name.capitalize()), (logger.AddonNodeLogger,), {'addon_short_name': self.config.short_name} ) ) return self._logger_class( node=self.owner, auth=auth ) @property def complete(self): return bool( self.has_auth and self.external_account and self.user_settings.verify_oauth_access( node=self.owner, external_account=self.external_account, ) ) @property def configured(self): return bool( self.complete and (self.folder_id or self.folder_name or self.folder_path) ) @property def has_auth(self): return bool( self.user_settings and self.user_settings.has_auth ) and bool( self.external_account and self.user_settings.verify_oauth_access( node=self.owner, external_account=self.external_account ) ) def clear_settings(self): raise NotImplementedError( "BaseOAuthNodeSettings subclasses must expose a 'clear_settings' method." ) def set_auth(self, external_account, user, metadata=None, log=True): user_settings = user.get_or_add_addon(self.oauth_provider.short_name) user_settings.grant_oauth_access( node=self.owner, external_account=external_account, metadata=metadata ) user_settings.save() self.user_settings = user_settings self.external_account = external_account if log: self.nodelogger.log(action='node_authorized', save=True) self.save() def deauthorize(self, auth=None, add_log=False): self.clear_auth() def clear_auth(self): self.external_account = None self.user_settings = None self.save() def before_remove_contributor_message(self, node, removed): if self.has_auth and self.user_settings.owner == removed: return ( u'The {addon} add-on for this {category} is authenticated by {name}. ' u'Removing this user will also remove write access to {addon} ' u'unless another contributor re-authenticates the add-on.' ).format( addon=self.config.full_name, category=node.project_or_component, name=removed.fullname, ) before_remove_contributor = before_remove_contributor_message def after_remove_contributor(self, node, removed, auth=None): if self.user_settings and self.user_settings.owner == removed: self.user_settings.oauth_grants[self.owner._id].pop(self.external_account._id) self.user_settings.save() self.clear_auth() message = ( u'Because the {addon} add-on for {category} "{title}" was authenticated ' u'by {user}, authentication information has been deleted.' ).format( addon=self.config.full_name, category=markupsafe.escape(node.category_display), title=markupsafe.escape(node.title), user=markupsafe.escape(removed.fullname) ) if not auth or auth.user != removed: url = node.web_url_for('node_addons') message += ( u' You can re-authenticate on the <u><a href="{url}">add-ons</a></u> page.' ).format(url=url) return message def after_fork(self, node, fork, user, save=True): clone = super(BaseOAuthNodeSettings, self).after_fork( node=node, fork=fork, user=user, save=False, ) if self.has_auth and self.user_settings.owner == user: metadata = None if self.complete: try: metadata = self.user_settings.oauth_grants[node._id][self.external_account._id] except (KeyError, AttributeError): pass clone.set_auth(self.external_account, user, metadata=metadata, log=False) else: clone.clear_settings() if save: clone.save() return clone def before_register_message(self, node, user): if self.has_auth: return ( u'The contents of {addon} add-ons cannot be registered at this time; ' u'the {addon} add-on linked to this {category} will not be included ' u'as part of this registration.' ).format( addon=self.config.full_name, category=node.project_or_component, ) before_register = before_register_message def serialize_waterbutler_credentials(self): raise NotImplementedError("BaseOAuthNodeSettings subclasses must implement a \ 'serialize_waterbutler_credentials' method.") def serialize_waterbutler_settings(self): raise NotImplementedError("BaseOAuthNodeSettings subclasses must implement a \ 'serialize_waterbutler_settings' method.") class BaseCitationsNodeSettings(BaseOAuthNodeSettings): class Meta: abstract = True def serialize_waterbutler_settings(self, *args, **kwargs): pass def serialize_waterbutler_credentials(self, *args, **kwargs): pass def create_waterbutler_log(self, *args, **kwargs): pass @property def api(self): if self._api is None: self._api = self.oauth_provider(account=self.external_account) return self._api @property
Apache License 2.0
ucfopen/canvasapi
canvasapi/blueprint.py
BlueprintTemplate.get_unsynced_changes
python
def get_unsynced_changes(self, **kwargs): return PaginatedList( ChangeRecord, self._requester, "GET", "courses/{}/blueprint_templates/{}/unsynced_changes".format( self.course_id, self.id ), kwargs=combine_kwargs(**kwargs), )
Return changes made to associated courses of a blueprint course. :calls: `GET /api/v1/courses/:course_id/blueprint_templates/:template_id/unsynced_changes \ <https://canvas.instructure.com/doc/api/blueprint_courses.html#method.master_courses\ /master_templates.unsynced_changes>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.blueprint.ChangeRecord`
https://github.com/ucfopen/canvasapi/blob/2ac9979d17979932a3f43eb8737b7648566c1c68/canvasapi/blueprint.py#L87-L107
from canvasapi.canvas_object import CanvasObject from canvasapi.paginated_list import PaginatedList from canvasapi.util import combine_kwargs, obj_or_id class BlueprintTemplate(CanvasObject): def __str__(self): return "{}".format(self.id) def associated_course_migration(self, **kwargs): response = self._requester.request( "POST", "courses/{}/blueprint_templates/{}/migrations".format( self.course_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) response_json = response.json() response_json.update({"course_id": self.course_id}) return BlueprintMigration(self._requester, response_json) def change_blueprint_restrictions( self, content_type, content_id, restricted, **kwargs ): kwargs["content_type"] = content_type kwargs["content_id"] = content_id kwargs["restricted"] = restricted response = self._requester.request( "PUT", "courses/{}/blueprint_templates/{}/restrict_item".format( self.course_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) return response.json().get("success", False) def get_associated_courses(self, **kwargs): from canvasapi.course import Course return PaginatedList( Course, self._requester, "GET", "courses/{}/blueprint_templates/{}/associated_courses".format( self.course_id, self.id ), _kwargs=combine_kwargs(**kwargs), )
MIT License
microsoft/seismic-deeplearning
interpretation/deepseismic_interpretation/segyconverter/convert_segy.py
filter_data
python
def filter_data(output_dir, stddev_file, k, min_range, max_range, clip, normalize): txt_file = os.path.join(output_dir, stddev_file) if not os.path.isfile(txt_file): raise Exception("Std Deviation file could not be found") with open(os.path.join(txt_file), "r") as f: metadatastr = f.read() try: metadata = json.loads(metadatastr) stddev = float(metadata["stddev"]) mean = float(metadata["mean"]) except ValueError: raise Exception("stddev value not valid: {}".format(metadatastr)) npy_files = list(f for f in os.listdir(output_dir) if f.endswith(".npy")) for local_filename in npy_files: cube = np.load(os.path.join(output_dir, local_filename)) if normalize or clip: cube = dataprep.apply(cube, stddev, mean, k, min_range, max_range, clip=clip, normalize=normalize) np.save(os.path.join(output_dir, local_filename), cube)
Normalization step on all files in output_dir. This function overwrites the existing data file :param str output_dir: Directory path of all npy files to normalize :param str stddev_file: txt file containing standard deviation result :param int k: number of standard deviation to be used in normalization :param float min_range: minium range value :param float max_range: maximum range value :param clip: flag to turn on/off clip :param normalize: flag to turn on/off normalization.
https://github.com/microsoft/seismic-deeplearning/blob/3f74face5d087a3947419a698a6181733d8be8fd/interpretation/deepseismic_interpretation/segyconverter/convert_segy.py#L19-L49
import os import timeit import argparse import numpy as np from deepseismic_interpretation.segyconverter.utils import segyextract, dataprep import json K = 12 MIN_VAL = 0 MAX_VAL = 1
MIT License
quantmind/pulsar
pulsar/apps/wsgi/route.py
Route.url
python
def url(self, **urlargs): if self.defaults: d = self.defaults.copy() d.update(urlargs) urlargs = d url = '/'.join(self._url_generator(urlargs)) if not url: return '/' else: url = '/' + url return url if self.is_leaf else url + '/'
Build a ``url`` from ``urlargs`` key-value parameters
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/wsgi/route.py#L308-L320
import re from collections import namedtuple from pulsar.api import Http404 from pulsar.utils.httpurl import (iri_to_uri, remove_double_slash, ENCODE_URL_METHODS, ENCODE_BODY_METHODS) from pulsar.utils.string import to_string from pulsar.utils.slugify import slugify class rule_info(namedtuple('rinfo', 'rule method parameters position order')): def override(self, parent): if self.position is None: return rule_info(self.rule, self.method, self.parameters, parent.position, parent.order) else: return self _rule_re = re.compile(r''' (?: (?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name (?:\((?P<args>.*?)\))? # converter parameters \: # variable delimiter )? (?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name ''', re.VERBOSE) _converter_args_re = re.compile(r''' ((?P<name>\w+)\s*=\s*)? (?P<value> True|False| \d+.\d+| \d+.| \d+| \w+| [urUR]?(?P<stringval>"[^"]*?"|'[^']*') )\s*, ''', re.VERBOSE | re.UNICODE) _PYTHON_CONSTANTS = { 'None': None, 'True': True, 'False': False } def _pythonize(value): if value in _PYTHON_CONSTANTS: return _PYTHON_CONSTANTS[value] for convert in int, float: try: return convert(value) except ValueError: pass if value[:1] == value[-1:] and value[0] in '"\'': value = value[1:-1] return str(value) def parse_rule(rule): m = _rule_re.match(rule) if m is None or m.end() < len(rule): raise ValueError('Error while parsing rule {0}'.format(rule)) data = m.groupdict() converter = data['converter'] or 'default' return converter, data['args'] or None, data['variable'] class route: creation_count = 0 def __init__(self, rule=None, method=None, defaults=None, position=None, re=False, **parameters): self.__class__.creation_count += 1 self.position = position self.creation_count = self.__class__.creation_count self.rule = rule self.re = re self.defaults = defaults self.method = method self.parameters = parameters @property def order(self): return self.creation_count if self.position is None else self.position def __call__(self, callable): bits = callable.__name__.split('_') method = None if len(bits) > 1: m = bits[0].upper() if m in ENCODE_URL_METHODS or m in ENCODE_BODY_METHODS: method = m bits = bits[1:] name = self.parameters.get('name', '_'.join(bits)) self.parameters['name'] = name method = self.method or method or 'get' if isinstance(method, (list, tuple)): method = tuple((m.lower() for m in method)) else: method = method.lower() rule = Route(self.rule or name, defaults=self.defaults, is_re=self.re) callable.rule_method = rule_info(rule, method, self.parameters, self.position, self.order) return callable class Route: def __init__(self, rule, defaults=None, is_re=False): rule = remove_double_slash('/%s' % rule) self.defaults = defaults if defaults is not None else {} self.is_leaf = not rule.endswith('/') self.rule = rule[1:] self.variables = set(map(str, self.defaults)) breadcrumbs = [] self._converters = {} regex_parts = [] if self.rule: for bit in self.rule.split('/'): if not bit: continue s = bit[0] e = bit[-1] if s == '<' or e == '>': if s + e != '<>': raise ValueError( 'malformed rule {0}'.format(self.rule)) converter, parameters, variable = parse_rule(bit[1:-1]) if variable in self._converters: raise ValueError('variable name {0} used twice in ' 'rule {1}.'.format(variable, self.rule)) convobj = get_converter(converter, parameters) regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex)) breadcrumbs.append((True, variable)) self._converters[variable] = convobj self.variables.add(str(variable)) else: variable = bit if is_re else re.escape(bit) regex_parts.append(variable) breadcrumbs.append((False, bit)) self.breadcrumbs = tuple(breadcrumbs) self._regex_string = '/'.join(regex_parts) if self._regex_string and not self.is_leaf: self._regex_string += '/' self._regex = re.compile(self.regex, re.UNICODE) @property def level(self): return len(self.breadcrumbs) @property def path(self): return '/' + self.rule @property def name(self): return slugify(self.rule, separator='_') @property def regex(self): if self.is_leaf: return '^' + self._regex_string + '$' else: return '^' + self._regex_string @property def bits(self): return tuple((b[1] for b in self.breadcrumbs)) @property def ordered_variables(self): return tuple((b for dyn, b in self.breadcrumbs if dyn)) def __hash__(self): return hash(self.rule) def __repr__(self): return self.path def __eq__(self, other): if isinstance(other, self.__class__): return str(self) == str(other) else: return False def __lt__(self, other): if isinstance(other, self.__class__): return to_string(self) < to_string(other) else: raise TypeError('Cannot compare {0} with {1}'.format(self, other)) def _url_generator(self, values): for is_dynamic, val in self.breadcrumbs: if is_dynamic: val = self._converters[val].to_url(values[val]) yield val
BSD 3-Clause New or Revised License
openstack/rally-openstack
rally_openstack/common/services/storage/block.py
BlockStorage.delete_metadata
python
def delete_metadata(self, volume, keys, deletes=10, delete_size=3): self._impl.delete_metadata(volume, keys, deletes=deletes, delete_size=delete_size)
Delete volume metadata keys. Note that ``len(keys)`` must be greater than or equal to ``deletes * delete_size``. :param volume: The volume to delete metadata from :param deletes: how many operations to perform :param delete_size: number of metadata keys to delete in each operation :param keys: a list of keys to choose deletion candidates from
https://github.com/openstack/rally-openstack/blob/d52e165320d87860930d6fbcca105e19bec0d879/rally_openstack/common/services/storage/block.py#L156-L168
from rally.common import cfg from rally.common import logging from rally.task import service CONF = cfg.CONF LOG = logging.getLogger(__name__) Volume = service.make_resource_cls( "Volume", properties=["id", "name", "size", "status"]) VolumeSnapshot = service.make_resource_cls( "VolumeSnapshot", properties=["id", "name", "volume_id", "status"]) VolumeBackup = service.make_resource_cls( "VolumeBackup", properties=["id", "name", "volume_id", "status"]) VolumeTransfer = service.make_resource_cls( "VolumeTransfer", properties=["id", "name", "volume_id", "auth_key"]) VolumeEncryptionType = service.make_resource_cls( "VolumeEncryptionType", properties=["id", "volume_type_id"]) QoSSpecs = service.make_resource_cls( "QoSSpecs", properties=["id", "name", "specs"]) class BlockStorage(service.UnifiedService): @service.should_be_overridden def create_volume(self, size, consistencygroup_id=None, group_id=None, snapshot_id=None, source_volid=None, name=None, description=None, volume_type=None, user_id=None, project_id=None, availability_zone=None, metadata=None, imageRef=None, scheduler_hints=None, source_replica=None, backup_id=None): if source_replica: LOG.warning("The argument `source_replica` would be ignored" " because it was removed from cinder api.") return self._impl.create_volume( size, consistencygroup_id=consistencygroup_id, group_id=group_id, snapshot_id=snapshot_id, source_volid=source_volid, name=name, description=description, volume_type=volume_type, user_id=user_id, project_id=project_id, availability_zone=availability_zone, metadata=metadata, imageRef=imageRef, scheduler_hints=scheduler_hints, backup_id=backup_id) @service.should_be_overridden def list_volumes(self, detailed=True, search_opts=None, marker=None, limit=None, sort=None): return self._impl.list_volumes( detailed=detailed, search_opts=search_opts, marker=marker, limit=limit, sort=sort) @service.should_be_overridden def get_volume(self, volume_id): return self._impl.get_volume(volume_id) @service.should_be_overridden def update_volume(self, volume_id, name=None, description=None): return self._impl.update_volume( volume_id, name=name, description=description) @service.should_be_overridden def delete_volume(self, volume): self._impl.delete_volume(volume) @service.should_be_overridden def extend_volume(self, volume, new_size): return self._impl.extend_volume(volume, new_size=new_size) @service.should_be_overridden def list_snapshots(self, detailed=True): return self._impl.list_snapshots(detailed=detailed) @service.should_be_overridden def list_types(self, search_opts=None, is_public=None): return self._impl.list_types(search_opts=search_opts, is_public=is_public) @service.should_be_overridden def set_metadata(self, volume, sets=10, set_size=3): return self._impl.set_metadata(volume, sets=sets, set_size=set_size) @service.should_be_overridden
Apache License 2.0
netflix/dispatch
src/dispatch/incident_role/service.py
get_all
python
def get_all(*, db_session): return db_session.query(IncidentRole)
Gets all incident role.
https://github.com/netflix/dispatch/blob/e30705938e970d8ef0dfdd04246a3f3004a6a44f/src/dispatch/incident_role/service.py#L32-L34
import logging from typing import List, Optional from operator import attrgetter from pydantic.error_wrappers import ErrorWrapper, ValidationError from dispatch.incident.models import Incident, ProjectRead from dispatch.exceptions import NotFoundError from dispatch.participant_role.models import ParticipantRoleType from dispatch.tag import service as tag_service from dispatch.incident_type import service as incident_type_service from dispatch.incident_priority import service as incident_priority_service from dispatch.individual import service as individual_contact_service from dispatch.project import service as project_service from dispatch.service import service as service_service from .models import ( IncidentRole, IncidentRoleCreateUpdate, ) log = logging.getLogger(__name__) def get(*, db_session, incident_role_id: int) -> Optional[IncidentRole]: return db_session.query(IncidentRole).filter(IncidentRole.id == incident_role_id).one_or_none()
Apache License 2.0
bloomreach/briefly
src/briefly/process.py
LocalProcess.execute_error
python
def execute_error(self, exp): try: os.remove(self.output) except: pass
Remove partial output when error
https://github.com/bloomreach/briefly/blob/78e9b6682ce936b77e4ff3fef0344beabe4b582a/src/briefly/process.py#L105-L110
import os import sys import hashlib import fs from node import * PROCESS_DEFS = [] class Process(Node): def __init__(self, *args, **kargs): super(Process, self).__init__() self.args = args self.kargs = kargs self.hashcode = None def build_dep(self): super(Process, self).build_dep() args = [] kargs = {} for arg in self.args: if isinstance(arg, Node): arg.check_configure() args.append(self.add_dep(arg)) for k, v in self.kargs.iteritems(): if isinstance(v, Node): v.check_configure() kargs[k] = self.add_dep(v) self.args = args self.kargs = kargs def hash(self): if self.hashcode: return self.hashcode op = type(self).__name__ elems = [] for v in self.deps + list(self.args) + self.kargs.values(): if isinstance(v, Process): elems.append(v.hash()) else: elems.append(str(v)) m = hashlib.md5() m.update('%s(%s)' % (op, ','.join(elems))) self.hashcode = '%s-%s' % (op, m.hexdigest()[:16]) return self.hashcode def process_args(self, *args): results = [] for arg in args: results.append(self.prop.sub(arg, input=self.main_src.output, output=self.output)) return results def execute(self): raise NotImplementedError() class LocalProcess(Process): def add_dep(self, dep): if isinstance(dep, Node) and (not dep.output is None) and fs.is_s3url(dep.output): s3p = dep | S3Process() dep = s3p.check_configure() return super(LocalProcess, self).add_dep(dep) def execute(self): raise NotImplementedError()
Apache License 2.0
scikit-hep/cabinetry
src/cabinetry/histo.py
Histogram.save
python
def save(self, histo_path: pathlib.Path) -> None: log.debug(f"saving histogram to {histo_path.with_suffix('.npz')}") if not os.path.exists(histo_path.parent): os.mkdir(histo_path.parent) np.savez( histo_path.with_suffix(".npz"), yields=self.yields, stdev=self.stdev, bins=self.bins, )
Saves a histogram to disk. Args: histo_path (pathlib.Path): where to save the histogram
https://github.com/scikit-hep/cabinetry/blob/b89bb09b97608e984864cde6d45f9b8901612a06/src/cabinetry/histo.py#L176-L192
import logging import os import pathlib from typing import Any, Dict, List, Optional, Type, TypeVar, Union import boost_histogram as bh import numpy as np import cabinetry from cabinetry._typing import Literal log = logging.getLogger(__name__) H = TypeVar("H", bound="Histogram") class Histogram(bh.Histogram, family=cabinetry): @classmethod def from_arrays( cls: Type[H], bins: Union[List[float], np.ndarray], yields: Union[List[float], np.ndarray], stdev: Union[List[float], np.ndarray], ) -> H: if len(bins) != len(yields) + 1: raise ValueError("bin edges need one more entry than yields") if len(yields) != len(stdev): raise ValueError("yields and stdev need to have the same shape") out = cls( bh.axis.Variable(bins, underflow=False, overflow=False), storage=bh.storage.Weight(), ) yields = np.asarray(yields) stdev = np.asarray(stdev) out[...] = np.stack([yields, stdev ** 2], axis=-1) return out @classmethod def from_path( cls: Type[H], histo_path: pathlib.Path, *, modified: bool = True ) -> H: if modified: histo_path_modified = histo_path.parent / (histo_path.name + "_modified") if not histo_path_modified.with_suffix(".npz").exists(): log.warning( f"the modified histogram {histo_path_modified.with_suffix('.npz')} " "does not exist" ) log.warning("loading the un-modified histogram instead!") else: histo_path = histo_path_modified histogram_npz = np.load(histo_path.with_suffix(".npz")) bins = histogram_npz["bins"] yields = histogram_npz["yields"] stdev = histogram_npz["stdev"] return cls.from_arrays(bins, yields, stdev) @classmethod def from_config( cls: Type[H], histo_folder: Union[str, pathlib.Path], region: Dict[str, Any], sample: Dict[str, Any], systematic: Dict[str, Any], *, template: Optional[Literal["Up", "Down"]] = None, modified: bool = True, ) -> H: histo_name = name(region, sample, systematic, template=template) histo_path = pathlib.Path(histo_folder) / histo_name return cls.from_path(histo_path, modified=modified) @property def yields(self) -> np.ndarray: return self.values() @yields.setter def yields(self, value: np.ndarray) -> None: self.view().value = value @property def stdev(self) -> np.ndarray: return np.sqrt(self.variances()) @stdev.setter def stdev(self, value: np.ndarray) -> None: self.view().variance = value ** 2 @property def bins(self) -> np.ndarray: return self.axes[0].edges
BSD 3-Clause New or Revised License
climdyn/qgs
qgs/inner_products/symbolic.py
AtmosphericSymbolicInnerProducts.s
python
def s(self, i, j): if self.connected_to_ocean: extra_subs = self.oceanic_basis.substitutions elif self.connected_to_ground: extra_subs = self.ground_basis.substitutions else: extra_subs = None if self.connected_to_ocean or self.connected_to_ground: if self.stored and self._s is not None: return self._s[i, j] else: res = self.iip.symbolic_inner_product(self._F(i), self._phi(j)) return float(res.subs(self.subs).subs(self.atmospheric_basis.substitutions).subs(extra_subs)) else: return 0
Function to compute the forcing (thermal) of the ocean on the atmosphere: :math:`s_{i,j} = (F_i, \phi_j)`.
https://github.com/climdyn/qgs/blob/33d79b1fa360de22b7ae595c142dbe9b6a8fb53a/qgs/inner_products/symbolic.py#L416-L432
import sparse as sp from pebble import ProcessPool as Pool from concurrent.futures import TimeoutError from multiprocessing import cpu_count from qgs.params.params import QgParams from qgs.inner_products.base import AtmosphericInnerProducts, OceanicInnerProducts, GroundInnerProducts from qgs.inner_products.definition import StandardSymbolicInnerProductDefinition from sympy import lambdify from scipy.integrate import dblquad class AtmosphericSymbolicInnerProducts(AtmosphericInnerProducts): def __init__(self, params=None, stored=True, inner_product_definition=None, interaction_inner_product_definition=None, num_threads=None, quadrature=True, timeout=None): AtmosphericInnerProducts.__init__(self) if quadrature: timeout = True if params is not None: if isinstance(params, QgParams): self.n = params.scale_params.n self.atmospheric_basis = params.atmospheric_basis if params.oceanic_basis is not None: goc_basis = params.oceanic_basis oog = "ocean" elif params.ground_basis is not None: goc_basis = params.ground_basis oog = "ground" oro_basis = params.ground_params.orographic_basis else: goc_basis = None oog = "" oro_basis = params.ground_params.orographic_basis else: self.n = params[0] self.atmospheric_basis = params[1] goc_basis = params[2] oog = params[3] oro_basis = params[4] self._gh = None else: self.n = None self.atmospheric_basis = None goc_basis = None oog = "" self._gh = None stored = False oro_basis = "" self.oceanic_basis = None self.connected_to_ocean = False self.ground_basis = None self.connected_to_ground = False self.subs = [('n', self.n)] if inner_product_definition is None: self.ip = StandardSymbolicInnerProductDefinition() else: self.ip = inner_product_definition if interaction_inner_product_definition is None: self.iip = self.ip else: self.iip = interaction_inner_product_definition self.stored = stored if stored: self.compute_inner_products(num_threads, timeout) if goc_basis is not None: if oog == 'ocean': self.connect_to_ocean(goc_basis, num_threads, timeout) else: self.connect_to_ground(goc_basis, oro_basis, num_threads, timeout) def _F(self, i): if self.atmospheric_basis is not None: return self.atmospheric_basis.functions[i] def _phi(self, i): if self.oceanic_basis is not None: return self.oceanic_basis.functions[i] def connect_to_ocean(self, ocean_basis, num_threads=None, timeout=None): if isinstance(ocean_basis, OceanicSymbolicInnerProducts): ocean_basis = ocean_basis.oceanic_basis self.ground_basis = None self.connected_to_ground = False self.oceanic_basis = ocean_basis self.connected_to_ocean = True if self.stored: if num_threads is None: num_threads = cpu_count() with Pool(max_workers=num_threads) as pool: subs = self.subs + self.atmospheric_basis.substitutions + self.oceanic_basis.substitutions noc = len(ocean_basis) self._gh = None self._d = sp.zeros((self.natm, noc), dtype=float, format='dok') self._s = sp.zeros((self.natm, noc), dtype=float, format='dok') args_list = [[(i, j), self.iip.ip_lap, (self._F(i), self._phi(j))] for i in range(self.natm) for j in range(noc)] _parallel_compute(pool, args_list, subs, self._d, timeout) args_list = [[(i, j), self.iip.symbolic_inner_product, (self._F(i), self._phi(j))] for i in range(self.natm) for j in range(noc)] _parallel_compute(pool, args_list, subs, self._s, timeout) self._s = self._s.to_coo() self._d = self._d.to_coo() def connect_to_ground(self, ground_basis, orographic_basis, num_threads=None, timeout=None): if isinstance(ground_basis, GroundSymbolicInnerProducts): ground_basis = ground_basis.ground_basis self.oceanic_basis = None self.connected_to_ocean = False self.ground_basis = ground_basis self.connected_to_ground = True if self.stored: if num_threads is None: num_threads = cpu_count() with Pool(max_workers=num_threads) as pool: subs = self.subs + self.atmospheric_basis.substitutions + self.ground_basis.substitutions ngr = len(ground_basis) if orographic_basis == "atmospheric": self._gh = None else: self._gh = sp.zeros((self.natm, self.natm, ngr), dtype=float, format='dok') self._d = None self._s = sp.zeros((self.natm, ngr), dtype=float, format='dok') args_list = [[(i, j), self.iip.symbolic_inner_product, (self._F(i), self._phi(j))] for i in range(self.natm) for j in range(ngr)] _parallel_compute(pool, args_list, subs, self._s, timeout) args_list = [[(i, j, k), self.iip.ip_jac, (self._F(i), self._F(j), self._phi(k))] for i in range(self.natm) for j in range(self.natm) for k in range(ngr)] _parallel_compute(pool, args_list, subs, self._gh, timeout) self._s = self._s.to_coo() if self._gh is not None: self._gh = self._gh.to_coo() def compute_inner_products(self, num_threads=None, timeout=None): self._a = sp.zeros((self.natm, self.natm), dtype=float, format='dok') self._u = sp.zeros((self.natm, self.natm), dtype=float, format='dok') self._c = sp.zeros((self.natm, self.natm), dtype=float, format='dok') self._b = sp.zeros((self.natm, self.natm, self.natm), dtype=float, format='dok') self._g = sp.zeros((self.natm, self.natm, self.natm), dtype=float, format='dok') if self.stored: if num_threads is None: num_threads = cpu_count() with Pool(max_workers=num_threads) as pool: subs = self.subs + self.atmospheric_basis.substitutions args_list = [[(i, j), self.ip.ip_lap, (self._F(i), self._F(j))] for i in range(self.natm) for j in range(self.natm)] _parallel_compute(pool, args_list, subs, self._a, timeout) args_list = [[(i, j), self.ip.symbolic_inner_product, (self._F(i), self._F(j))] for i in range(self.natm) for j in range(self.natm)] _parallel_compute(pool, args_list, subs, self._u, timeout) args_list = [[(i, j), self.ip.ip_diff_x, (self._F(i), self._F(j))] for i in range(self.natm) for j in range(self.natm)] _parallel_compute(pool, args_list, subs, self._c, timeout) args_list = [[(i, j, k), self.ip.ip_jac_lap, (self._F(i), self._F(j), self._F(k))] for i in range(self.natm) for j in range(self.natm) for k in range(self.natm)] _parallel_compute(pool, args_list, subs, self._b, timeout) args_list = [[(i, j, k), self.ip.ip_jac, (self._F(i), self._F(j), self._F(k))] for i in range(self.natm) for j in range(self.natm) for k in range(self.natm)] _parallel_compute(pool, args_list, subs, self._g, timeout) self._a = self._a.to_coo() self._u = self._u.to_coo() self._c = self._c.to_coo() self._g = self._g.to_coo() self._b = self._b.to_coo() @property def natm(self): return len(self.atmospheric_basis.functions) def a(self, i, j): if not self.stored: res = self.ip.ip_lap(self._F(i), self._F(j)) return float(res.subs(self.subs).subs(self.atmospheric_basis.substitutions)) else: return self._a[i, j] def u(self, i, j): if not self.stored: res = self.ip.symbolic_inner_product(self._F(i), self._F(j)) return float(res.subs(self.subs).subs(self.atmospheric_basis.substitutions)) else: return self._u[i, j] def b(self, i, j, k): if not self.stored: res = self.ip.ip_jac_lap(self._F(i), self._F(j), self._F(k)) return float(res.subs(self.subs).subs(self.atmospheric_basis.substitutions)) else: return self._b[i, j, k] def c(self, i, j): if not self.stored: res = self.ip.ip_diff_x(self._F(i), self._F(j)) return float(res.subs(self.subs).subs(self.atmospheric_basis.substitutions)) else: return self._c[i, j] def g(self, i, j, k): if not self.stored: res = self.ip.ip_jac(self._F(i), self._F(j), self._F(k)) return float(res.subs(self.subs).subs(self.atmospheric_basis.substitutions)) else: return self._g[i, j, k] def gh(self, i, j, k): if self.connected_to_ocean: extra_subs = self.oceanic_basis.substitutions elif self.connected_to_ground: extra_subs = self.ground_basis.substitutions else: extra_subs = None if self.oceanic_basis or self.connected_to_ground: if self.stored and self._gh is not None: return self._gh[i, j, k] else: res = self.iip.ip_jac(self._F(i), self._F(j), self._phi(k)) return float(res.subs(self.subs).subs(self.atmospheric_basis.substitutions).subs(extra_subs)) else: return 0
MIT License
algorand/py-algorand-sdk
algosdk/v2client/models/account.py
Account.apps_total_schema
python
def apps_total_schema(self, apps_total_schema): self._apps_total_schema = apps_total_schema
Sets the apps_total_schema of this Account. :param apps_total_schema: The apps_total_schema of this Account. # noqa: E501 :type apps_total_schema: ApplicationStateSchema
https://github.com/algorand/py-algorand-sdk/blob/e444328616d99c88b830366dedcbf75e9795dcb3/algosdk/v2client/models/account.py#L221-L229
import pprint class Account(object): openapi_types = { "address": "str", "amount": "int", "amount_without_pending_rewards": "int", "apps_local_state": "list[ApplicationLocalState]", "apps_total_schema": "ApplicationStateSchema", "assets": "list[AssetHolding]", "created_apps": "list[Application]", "created_assets": "list[Asset]", "participation": "AccountParticipation", "pending_rewards": "int", "reward_base": "int", "rewards": "int", "round": "int", "status": "str", "sig_type": "str", "auth_addr": "str", } attribute_map = { "address": "address", "amount": "amount", "amount_without_pending_rewards": "amount-without-pending-rewards", "apps_local_state": "apps-local-state", "apps_total_schema": "apps-total-schema", "assets": "assets", "created_apps": "created-apps", "created_assets": "created-assets", "participation": "participation", "pending_rewards": "pending-rewards", "reward_base": "reward-base", "rewards": "rewards", "round": "round", "status": "status", "sig_type": "sig-type", "auth_addr": "auth-addr", } def __init__( self, address=None, amount=None, amount_without_pending_rewards=None, apps_local_state=None, apps_total_schema=None, assets=None, created_apps=None, created_assets=None, participation=None, pending_rewards=None, reward_base=None, rewards=None, round=None, status=None, sig_type=None, auth_addr=None, ): self._address = None self._amount = None self._amount_without_pending_rewards = None self._apps_local_state = None self._apps_total_schema = None self._assets = None self._created_apps = None self._created_assets = None self._participation = None self._pending_rewards = None self._reward_base = None self._rewards = None self._round = None self._status = None self._sig_type = None self._auth_addr = None self.address = address self.amount = amount self.amount_without_pending_rewards = amount_without_pending_rewards if apps_local_state is not None: self.apps_local_state = apps_local_state if apps_total_schema is not None: self.apps_total_schema = apps_total_schema if assets is not None: self.assets = assets if created_apps is not None: self.created_apps = created_apps if created_assets is not None: self.created_assets = created_assets if participation is not None: self.participation = participation self.pending_rewards = pending_rewards if reward_base is not None: self.reward_base = reward_base self.rewards = rewards self.round = round self.status = status if sig_type is not None: self.sig_type = sig_type if auth_addr is not None: self.auth_addr = auth_addr @property def address(self): return self._address @address.setter def address(self, address): self._address = address @property def amount(self): return self._amount @amount.setter def amount(self, amount): self._amount = amount @property def amount_without_pending_rewards(self): return self._amount_without_pending_rewards @amount_without_pending_rewards.setter def amount_without_pending_rewards(self, amount_without_pending_rewards): self._amount_without_pending_rewards = amount_without_pending_rewards @property def apps_local_state(self): return self._apps_local_state @apps_local_state.setter def apps_local_state(self, apps_local_state): self._apps_local_state = apps_local_state @property def apps_total_schema(self): return self._apps_total_schema @apps_total_schema.setter
MIT License
nonegg/aredis
aredis/commands/server.py
ServerCommandMixin.slowlog_len
python
async def slowlog_len(self): return await self.execute_command('SLOWLOG LEN')
Gets the number of items in the slowlog
https://github.com/nonegg/aredis/blob/b46e67163692cd0796763e5c9e17394821d9280c/aredis/commands/server.py#L292-L294
import datetime from aredis.exceptions import RedisError from aredis.utils import (b, bool_ok, nativestr, dict_merge, string_keys_to_dict, list_keys_to_dict, pairs_to_dict, NodeFlag) def parse_slowlog_get(response, **options): return [{ 'id': item[0], 'start_time': int(item[1]), 'duration': int(item[2]), 'command': b(' ').join(item[3]) } for item in response] def parse_client_list(response, **options): clients = [] for c in nativestr(response).splitlines(): clients.append(dict([pair.split('=', 1) for pair in c.split(' ')])) return clients def parse_config_get(response, **options): response = [nativestr(i) if i is not None else None for i in response] return response and pairs_to_dict(response) or {} def timestamp_to_datetime(response): if not response: return None try: response = int(response) except ValueError: return None return datetime.datetime.fromtimestamp(response) def parse_debug_object(response): response = nativestr(response) response = 'type:' + response response = dict([kv.split(':') for kv in response.split()]) int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle') for field in int_fields: if field in response: response[field] = int(response[field]) return response def parse_info(response): info = {} response = nativestr(response) def get_value(value): if ',' not in value or '=' not in value: try: if '.' in value: return float(value) else: return int(value) except ValueError: return value else: sub_dict = {} for item in value.split(','): k, v = item.rsplit('=', 1) sub_dict[k] = get_value(v) return sub_dict for line in response.splitlines(): if line and not line.startswith('#'): if line.find(':') != -1: key, value = line.split(':', 1) info[key] = get_value(value) else: info.setdefault('__raw__', []).append(line) return info def parse_role(response): role = nativestr(response[0]) def _parse_master(response): offset, slaves = response[1:] res = { 'role': role, 'offset': offset, 'slaves': [] } for slave in slaves: host, port, offset = slave res['slaves'].append({ 'host': host, 'port': int(port), 'offset': int(offset) }) return res def _parse_slave(response): host, port, status, offset = response[1:] return { 'role': role, 'host': host, 'port': port, 'status': status, 'offset': offset } def _parse_sentinel(response): return { 'role': role, 'masters': response[1:] } parser = { 'master': _parse_master, 'slave': _parse_slave, 'sentinel': _parse_sentinel }[role] return parser(response) class ServerCommandMixin: RESPONSE_CALLBACKS = dict_merge( string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True), string_keys_to_dict( 'FLUSHALL FLUSHDB SAVE ' 'SHUTDOWN SLAVEOF', bool_ok ), { 'ROLE': parse_role, 'SLOWLOG GET': parse_slowlog_get, 'SLOWLOG LEN': int, 'SLOWLOG RESET': bool_ok, 'CLIENT GETNAME': lambda r: r and nativestr(r), 'CLIENT KILL': bool_ok, 'CLIENT LIST': parse_client_list, 'CLIENT SETNAME': bool_ok, 'CLIENT PAUSE': bool_ok, 'CONFIG GET': parse_config_get, 'CONFIG RESETSTAT': bool_ok, 'CONFIG SET': bool_ok, 'DEBUG OBJECT': parse_debug_object, 'INFO': parse_info, 'LASTSAVE': timestamp_to_datetime, 'TIME': lambda x: (int(x[0]), int(x[1])), } ) async def bgrewriteaof(self): return await self.execute_command('BGREWRITEAOF') async def bgsave(self): return await self.execute_command('BGSAVE') async def client_kill(self, address): return await self.execute_command('CLIENT KILL', address) async def client_list(self): return await self.execute_command('CLIENT LIST') async def client_getname(self): return await self.execute_command('CLIENT GETNAME') async def client_setname(self, name): return await self.execute_command('CLIENT SETNAME', name) async def client_pause(self, timeout=0): return await self.execute_command('CLIENT PAUSE', timeout) async def config_get(self, pattern="*"): return await self.execute_command('CONFIG GET', pattern) async def config_set(self, name, value): return await self.execute_command('CONFIG SET', name, value) async def config_resetstat(self): return await self.execute_command('CONFIG RESETSTAT') async def config_rewrite(self): return await self.execute_command('CONFIG REWRITE') async def dbsize(self): return await self.execute_command('DBSIZE') async def debug_object(self, key): return await self.execute_command('DEBUG OBJECT', key) async def flushall(self): return await self.execute_command('FLUSHALL') async def flushdb(self): return await self.execute_command('FLUSHDB') async def info(self, section=None): if section is None: return await self.execute_command('INFO') else: return await self.execute_command('INFO', section) async def lastsave(self): return await self.execute_command('LASTSAVE') async def save(self): return await self.execute_command('SAVE') async def shutdown(self): try: await self.execute_command('SHUTDOWN') except ConnectionError: return raise RedisError("SHUTDOWN seems to have failed.") async def slaveof(self, host=None, port=None): if host is None and port is None: return await self.execute_command('SLAVEOF', b('NO'), b('ONE')) return await self.execute_command('SLAVEOF', host, port) async def slowlog_get(self, num=None): args = ['SLOWLOG GET'] if num is not None: args.append(num) return await self.execute_command(*args)
MIT License
onecommons/unfurl
unfurl/runtime.py
NodeInstance.get_self_and_descendents
python
def get_self_and_descendents(self): yield self for r in self.instances: for descendent in r.get_self_and_descendents(): yield descendent
Recursive descendent including self
https://github.com/onecommons/unfurl/blob/68aa0a6e08e154de2159653bba017ff15b52d2a2/unfurl/runtime.py#L746-L751
import six from collections.abc import Mapping from ansible.parsing.dataloader import DataLoader from .util import UnfurlError, load_class, to_enum, make_temp_dir, ChainMap from .result import ResourceRef, ChangeAware from .support import AttributeManager, Defaults, Status, Priority, NodeState, Templar from .tosca import ( CapabilitySpec, RelationshipSpec, NodeSpec, TopologySpec, ArtifactSpec, ) import logging logger = logging.getLogger("unfurl") class Operational(ChangeAware): @property def priority(self): return Defaults.shouldRun @property def local_status(self): return Status.unknown @property def state(self): return NodeState.initial def get_operational_dependencies(self): return () def get_operational_dependents(self): return () @property def manual_overide_status(self): return None @property def operational(self): return self.status == Status.ok or self.status == Status.degraded @property def active(self): return self.status <= Status.error @property def present(self): return self.operational or self.status == Status.error @property def missing(self): return self.status == Status.pending or self.status == Status.absent @property def status(self): if self.manual_overide_status is not None: status = self.manual_overide_status if status >= Status.error: return status else: status = self.local_status if not status: return Status.unknown if status >= Status.error: return status dependentStatus = self.aggregate_status(self.get_operational_dependencies()) if dependentStatus is None: return status else: return max(status, dependentStatus) @property def required(self): return self.priority == Priority.required @property def last_state_change(self): return None @property def last_config_change(self): return None @property def last_change(self): if not self.last_state_change: return self.last_config_change elif not self.last_config_change: return self.last_state_change else: return max(self.last_state_change, self.last_config_change) def has_changed(self, changeset): if not self.last_change: return False if not changeset: return True return self.last_change > changeset.changeId @staticmethod def aggregate_status(statuses): aggregate = None for status in statuses: assert isinstance(status, Operational), status if aggregate is None: aggregate = Status.ok if status.priority == Priority.ignore: continue elif status.required and not status.operational: if status.status == Status.pending: aggregate = Status.pending else: aggregate = Status.error break else: if aggregate <= Status.degraded: if not status.operational or status.status == Status.degraded: aggregate = Status.degraded return aggregate class OperationalInstance(Operational): def __init__( self, status=None, priority=None, manualOveride=None, lastStateChange=None, lastConfigChange=None, state=None, ): if isinstance(status, OperationalInstance): self._localStatus = status._localStatus self._manualOverideStatus = status._manualOverideStatus self._priority = status._priority self._lastStateChange = status._lastStateChange self._lastConfigChange = status._lastConfigChange self._state = status._state self.dependencies = status.dependencies else: self._localStatus = to_enum(Status, status) self._manualOverideStatus = to_enum(Status, manualOveride) self._priority = to_enum(Priority, priority) self._lastStateChange = lastStateChange self._lastConfigChange = lastConfigChange self._state = state self.dependencies = [] def get_operational_dependencies(self): return self.dependencies def local_status(): doc = "The local_status property." def fget(self): return self._localStatus def fset(self, value): self._localStatus = value def fdel(self): del self._localStatus return locals() local_status = property(**local_status()) def manual_overide_status(): doc = "The manualOverideStatus property." def fget(self): return self._manualOverideStatus def fset(self, value): self._manualOverideStatus = value def fdel(self): del self._manualOverideStatus return locals() manual_overide_status = property(**manual_overide_status()) def priority(): doc = "The priority property." def fget(self): return Defaults.shouldRun if self._priority is None else self._priority def fset(self, value): self._priority = value def fdel(self): del self._priority return locals() priority = property(**priority()) @property def last_state_change(self): return self._lastStateChange @property def last_config_change(self): return self._lastConfigChange def state(): doc = "The state property." def fget(self): return self._state def fset(self, value): self._state = to_enum(NodeState, value) return locals() state = property(**state()) class _ChildResources(Mapping): def __init__(self, resource): self.resource = resource def __getitem__(self, key): return self.resource.find_resource(key) def __iter__(self): return iter(r.name for r in self.resource.get_self_and_descendents()) def __len__(self): return len(tuple(self.resource.get_self_and_descendents())) class EntityInstance(OperationalInstance, ResourceRef): attributeManager = None created = None shadow = None imports = None envRules = None _baseDir = "" def __init__( self, name="", attributes=None, parent=None, template=None, status=Status.ok ): OperationalInstance.__init__(self, status) self.name = name self._attributes = attributes or {} self.parent = parent if parent: p = getattr(parent, self.parentRelation) p.append(self) self.template = template or self.templateType() def _resolve(self, key): self.attributes[key] return self.attributes._attributes[key] def query(self, expr, vars=None, wantList=False): from .eval import Ref, RefContext return Ref(expr).resolve(RefContext(self, vars=vars), wantList) def local_status(): doc = "The working_dir property." def fget(self): return self._localStatus def fset(self, value): if self.root.attributeManager: self.root.attributeManager.set_status(self, value) self._localStatus = value def fdel(self): del self._localStatus return locals() local_status = property(**local_status()) def get_operational_dependencies(self): if self.parent and self.parent is not self.root: yield self.parent for d in self.dependencies: yield d @property def key(self): return f"{self.parent.key}::.{self.parentRelation[1:]}::{self.name}" def as_ref(self, options=None): return {"ref": self.key} @property def tosca_id(self): return self.key @property def tosca_name(self): return self.template.name @property def type(self): return self.template.type @property def base_dir(self): if self.shadow: return self.shadow.base_dir else: return self.root._baseDir @property def artifacts(self): return {} @property def attributes(self): if not self.root.attributeManager: if not self.attributeManager: self.attributeManager = AttributeManager() return self.attributeManager.get_attributes(self) return self.root.attributeManager.get_attributes(self) @property def names(self): return self.attributes def get_default_relationships(self, relation=None): return self.root.get_default_relationships(relation) def __eq__(self, other): if self is other: return True if self.__class__ != other.__class__: return False if self.root is not other.root: if self.shadow and self.shadow == other: return True if other.shadow and other.shadow == self: return True else: return False if not self.last_change: return False return self.last_change == other.last_change and self.key == other.key def __getstate__(self): state = self.__dict__.copy() if state.get("_templar"): del state["_templar"] if state.get("_interfaces"): state["_interfaces"] = {} if "attributeManager" in state: del state["attributeManager"] return state def __repr__(self): return f"{self.__class__}('{self.name}')" class CapabilityInstance(EntityInstance): parentRelation = "_capabilities" templateType = CapabilitySpec _relationships = None @property def relationships(self): if self._relationships is None: self._relationships = [] if len(self._relationships) != len(self.template.relationships): instantiated = {id(c.template) for c in self._relationships} for template in self.template.relationships: if id(template) not in instantiated: rel = RelationshipInstance( template.name, parent=self, template=template ) assert rel in self._relationships sourceNode = self.root.find_resource(template.source.name) if sourceNode: rel.source = sourceNode return self._relationships @property def key(self): return f"{self.parent.key}::.{'capabilities'}::[.name={self.name}]" class RelationshipInstance(EntityInstance): parentRelation = "_relationships" templateType = RelationshipSpec source = None @property def target(self): return self.parent.parent if self.parent else None @property def key(self): if self.source: return f"{self.source.key}::.requirements::[.name={self.name}]" elif self.parent is self.root: return f"::.requirements::[.name={self.name}]" else: return f"{self.parent.key}::.relationships::[.name={self.name}]" def merge_props(self, matchfn): env = {} capability = self.parent for name, val in capability.template.find_props(capability.attributes, matchfn): if val is not None: env[name] = val for name, val in self.template.find_props(self.attributes, matchfn): if val is not None: env[name] = val return env class ArtifactInstance(EntityInstance): parentRelation = "_artifacts" templateType = ArtifactSpec def __init__( self, name="", attributes=None, parent=None, template=None, status=None ): EntityInstance.__init__(self, name, attributes, parent, template, status) @property def base_dir(self): return self.template.base_dir @property def file(self): return self.template.file @property def repository(self): return self.template.repository def get_path(self, resolver=None): return self.template.get_path_and_fragment(resolver) def get_path_and_fragment(self, resolver=None, tpl=None): return self.template.get_path_and_fragment(resolver, tpl) def as_import_spec(self): return self.template.as_import_spec() def get_operational_dependencies(self): for d in self.dependencies: yield d class NodeInstance(EntityInstance): templateType = NodeSpec parentRelation = "instances" def __init__( self, name="", attributes=None, parent=None, template=None, status=None ): if parent: if parent.root.find_resource(name): raise UnfurlError( f'can not create node instance "{name}", its name is already in use' ) self._capabilities = [] self._requirements = [] self._artifacts = [] self._named_artifacts = None self.instances = [] EntityInstance.__init__(self, name, attributes, parent, template, status) if self.root is self: self._all = _ChildResources(self) self._templar = Templar(DataLoader()) self._interfaces = {} self.get_interface("inherit") self.get_interface("default") def _find_relationship(self, relationship): assert relationship and relationship.capability and relationship.target targetNodeInstance = self.root.find_resource(relationship.target.name) assert ( targetNodeInstance ), f"target instance {relationship.target.name} should have been already created" for cap in targetNodeInstance.capabilities: if cap.template is relationship.capability: for relInstance in cap.relationships: if relInstance.template is relationship: return relInstance return None @property def requirements(self): if len(self._requirements) != len(self.template.requirements): instantiated = {id(r.template) for r in self._requirements} for name, template in self.template.requirements.items(): assert template.relationship if id(template.relationship) not in instantiated: relInstance = self._find_relationship(template.relationship) if not relInstance: raise UnfurlError( f'can not find relation instance for requirement "{name}" on node "{self.name}"' ) assert template.relationship is relInstance.template assert self.template is relInstance.template.source self._requirements.append(relInstance) return self._requirements def get_requirements(self, match): if isinstance(match, six.string_types): return [r for r in self.requirements if r.template.name == match] elif isinstance(match, NodeInstance): return [r for r in self.requirements if r.target == match] elif isinstance(match, CapabilityInstance): return [r for r in self.requirements if r.parent == match] elif isinstance(match, ArtifactInstance): return [] else: raise UnfurlError(f'invalid match for get_requirements: "{match}"') @property def capabilities(self): if len(self._capabilities) != len(self.template.capabilities): instantiated = {id(c.template) for c in self._capabilities} for name, template in self.template.capabilities.items(): if id(template) not in instantiated: cap = CapabilityInstance( template.name, parent=self, template=template ) assert cap in self._capabilities return self._capabilities def get_capabilities(self, name): return [ capability for capability in self.capabilities if capability.template.name == name ] @property def artifacts(self) -> dict: if self._named_artifacts is None: self._named_artifacts = {} instantiated = {a.name: a for a in self._artifacts} for name, template in self.template.artifacts.items(): artifact = instantiated.get(name) if not artifact: artifact = ArtifactInstance( template.name, parent=self, template=template ) assert artifact in self._artifacts self._named_artifacts[template.name] = artifact return self._named_artifacts def _get_default_relationships(self, relation=None): if self.root is self: return for rel in self.root.get_default_relationships(relation): for capability in self.capabilities: if rel.template.matches_target(capability.template): yield rel def get_default_relationships(self, relation=None): return list(self._get_default_relationships(relation)) @property def names(self): return ChainMap( self.attributes, {r.name: r.parent for r in self.requirements}, {c.name: c for c in self.capabilities}, ) def _resolve(self, key): try: self.attributes[key] return self.attributes._attributes[key] except KeyError: try: inherit = self._interfaces.get("inherit") if inherit: return inherit(key) else: raise except KeyError: default = self._interfaces.get("default") if default: return default(key) else: raise @property def key(self): return f"::{self.name}" def get_operational_dependencies(self): for dep in super().get_operational_dependencies(): yield dep for instance in self.requirements: if instance is not self.parent: yield instance def get_operational_dependents(self): seen = set() for cap in self.capabilities: for rel in cap.relationships: dep = rel.source if dep and id(dep) not in seen: seen.add(id(dep)) yield dep for instance in self.instances: if id(instance) not in seen: seen.add(id(instance)) yield instance
MIT License
microsoft/sara
transform.py
find_component
python
def find_component(ui_xml, bounds, rid): print(rid, bounds) components = ui_xml.findAll(attrs={ 'resource-id': rid }) if len(components) == 0: components = ui_xml.findAll(attrs={ 'bounds': bounds }) def _sort(x): if 'bounds' not in x.attrs: return -1 return check_bounds_overlap(parse_bounds(x['bounds']), parse_bounds(bounds)) components.sort(key=_sort, reverse=True) return components
Find components with bounds and resource-id
https://github.com/microsoft/sara/blob/4d4636a93fb0356686ca143722ec29a87205cd97/transform.py#L244-L262
import re import os import bs4 import math import json import codecs import argparse from lxml import etree from pprint import pprint from bs4 import BeautifulSoup source_width, source_height, source_dpi = None, None, None target_width, target_height, target_dpi = None, None, None BOUND_PATTERN = re.compile(r'\[([-+]?\d+),([-+]?\d+)\]\[[-+]?(\d+),([-+]?\d+)\]') VIEW_ATTR_PATTERN = re.compile(r'^(?P<classname>.*)\{(?P<address>.*)\s(?P<visible>.)(?P<focusable>.)(?P<enabled>.)(?P<draw_mask>.)(?P<scroll_horiz>.)(?P<scroll_verti>.)(?P<clickable>.)(?P<long_clickable>.)((?P<context_clickable>.)\s|\s)(.+)\s(?P<left>-?\d+),(?P<top>-?\d+)\-(?P<right>-?\d+),(?P<bottom>-?\d+)((\s(?P<view_id>#[a-zA-Z0-9]+)\s(?P<resource_id>.+))|(\s*(.*)))\}') REPLAY_ACTION_PATTERN = re.compile(r'\[ReplayAction\]-(?P<action_count>\d+):\s+\[(?P<action>.*)\]\-(?P<info>.*)') REPLAY_VIEW_INSTRUMENTATION_PATTERN = re.compile(r'\[ReplayViewInstrumentation\]:\s+(.*)') REPLAY_TIME_INTERVAL_PATTERN = re.compile(r'\[ReplayTimeInterval\]-(\d+):\s+(.*)') def replace_package_name(rid, package_name): if rid is None: return '' idx = rid.find(':id/') if idx == -1: return rid prefix = rid[:idx] if prefix.lower() == 'app': return package_name + rid[idx:] return rid def parse_resolution(resolution): resolution = resolution.strip() splits = resolution.split(',') return int(splits[0]), int(splits[1]), int(splits[2]) def parse_bounds(bounds): match = BOUND_PATTERN.match(bounds) return int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4)) def get_bounds(left, top, right, bottom): return '[%d,%d][%d,%d]' % (left, top, right, bottom) def parse_view(line): view_str = line.strip() match = VIEW_ATTR_PATTERN.match(view_str) if match is None: return None view_attrs = dict() for attr in ['classname', 'address', 'visible', 'focusable', 'enabled', 'draw_mask', 'scroll_horiz', 'scroll_verti', 'clickable', 'long_clickable', 'context_clickable', 'left', 'top', 'right', 'bottom', 'view_id', 'resource_id']: if attr in ['visible', 'focusable', 'enabled', 'draw_mask', 'scroll_horiz', 'scroll_verti', 'clickable', 'long_clickable', 'context_clickable']: value = match.group(attr) if attr == 'visible': if value == 'V': view_attrs[attr] = 'true' else: view_attrs[attr] = 'false' else: if value == '.': view_attrs[attr] = 'false' else: view_attrs[attr] = 'true' else: view_attrs[attr] = match.group(attr) return view_attrs def read_ui_xml(xml_file): with codecs.open(xml_file, 'r', encoding='utf8') as f: return BeautifulSoup(f.read(), 'lxml') def lxml_read_ui_xml(xml_file): return etree.parse(xml_file) def px2dp(px, dpi): return (px * 160) / dpi def dp2px(dp, dpi): return dp * (dpi / 160) def transform_coordinate(source_x, source_y): source_x_dp, source_y_dp = px2dp(source_x, source_dpi), px2dp(source_y, source_dpi) target_x, target_y = dp2px(source_x_dp, target_dpi), dp2px(source_y_dp, target_dpi) return target_x, target_y def parse_action(log, match): action_count = int(match.group('action_count')) action = match.group('action') info = json.loads(match.group('info')) action_block = { 'action_count': action_count, 'action': action, 'info': info, 'plid': info['plid'], 'package': info['package'] } return action_block def parse_instrumentation(log, match): info = json.loads(match.group(1))['payload'] if 'action_count' not in info: return None return info def parse_time_interval(log, match): action_count = int(match.group(1)) info = json.loads(match.group(2)) info['action_count'] = action_count return info def parse_trace(trace_file): actions = list() instrumentations = list() time_intervals = list() with open(trace_file, 'r') as f: for log in f: log = log.strip() match = REPLAY_ACTION_PATTERN.match(log) if match: actions.append(parse_action(log, match)) continue match = REPLAY_VIEW_INSTRUMENTATION_PATTERN.match(log) if match: instrument = parse_instrumentation(log, match) if instrument: instrumentations.append(instrument) continue match = REPLAY_TIME_INTERVAL_PATTERN.match(log) if match: time_intervals.append(parse_time_interval(log, match)) continue for instrumentation in reversed(instrumentations): action_count = int(instrumentation['action_count']) for action in actions: if action['action_count'] == action_count: if 'instrumentation' not in action: action['instrumentation'] = list() action['instrumentation'].append(instrumentation) for ti in time_intervals: action_count = ti['action_count'] _candidate = None _candidate_action_count = -1 for action in actions: if action['action_count'] == action_count: action['time_interval'] = ti['interval'] _candidate = None break if action['action_count'] < action_count and action['action_count'] > _candidate_action_count: _candidate = action _candidate_action_count = action['action_count'] if _candidate is not None: _candidate['time_interval'] += ti['interval'] pprint(actions) return actions def get_view_hierarchy_component_bounds(component): width, height = int(component['right']) - int(component['left']), int(component['bottom']) - int(component['top']) elements = list(component.parents) elements.reverse() elements.append(component) left, top = 0, 0 for element in elements: if 'left' in element.attrs and len(element['left']) > 0: left += int(element['left']) top += int(element['top']) return get_bounds(left, top, left + width, top + height) def find_best_match_candidate(candidates, x, y): best_match_candidate = None best_match_x_dis, best_match_y_dis = math.inf, math.inf for cc in candidates: left, top, right, bottom = cc['abs_left'], cc['abs_top'], cc['abs_right'], cc['abs_bottom'] if left <= x <= right and top <= y <= bottom: x_dis = min(x - left, right - x) y_dis = min(y - top, bottom - y) if x_dis <= best_match_x_dis and y_dis <= best_match_y_dis: best_match_candidate = cc best_match_x_dis = x_dis best_match_y_dis = y_dis return best_match_candidate def check_bounds_overlap(bounds_1, bounds_2): left_1, top_1, right_1, bottom_1 = bounds_1[0], bounds_1[1], bounds_1[2], bounds_1[3] left_2, top_2, right_2, bottom_2 = bounds_2[0], bounds_2[1], bounds_2[2], bounds_2[3] width_1 = right_1 - left_1 height_1 = bottom_1 - top_1 width_2 = right_2 - left_2 height_2 = bottom_2 - top_2 overlap_width = width_1 + width_2 - (max(left_1 + width_1, left_2 + width_2) - min(left_1, left_2)) overlap_height = height_1 + height_2 - (max(top_1 + height_1, top_2 + height_2) - min(top_1, top_2)) if overlap_height <= 0 or overlap_width <= 0: return False overlap_area = overlap_height * overlap_width bounds_1_area = width_1 * height_1 bounds_2_area = width_2 * height_2 ratio = overlap_area / (bounds_1_area + bounds_2_area - overlap_area) return ratio
MIT License
kubernetes-client/python
kubernetes/client/models/v1_ingress_class_list.py
V1IngressClassList.metadata
python
def metadata(self, metadata): self._metadata = metadata
Sets the metadata of this V1IngressClassList. :param metadata: The metadata of this V1IngressClassList. # noqa: E501 :type: V1ListMeta
https://github.com/kubernetes-client/python/blob/96dade6021dc2e9ee1430172e1b65d9e9e232b10/kubernetes/client/models/v1_ingress_class_list.py#L151-L159
import pprint import re import six from kubernetes.client.configuration import Configuration class V1IngressClassList(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'items': 'list[V1IngressClass]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def items(self): return self._items @items.setter def items(self, items): if self.local_vars_configuration.client_side_validation and items is None: raise ValueError("Invalid value for `items`, must not be `None`") self._items = items @property def kind(self): return self._kind @kind.setter def kind(self, kind): self._kind = kind @property def metadata(self): return self._metadata @metadata.setter
Apache License 2.0
fastnlp/fastnlp
fastNLP/models/bert.py
BertForMultipleChoice.predict
python
def predict(self, words): logits = self.forward(words)[Const.OUTPUT] return {Const.OUTPUT: torch.argmax(logits, dim=-1)}
r""" :param torch.LongTensor words: [batch_size, num_choices, seq_len] :return: { :attr:`fastNLP.Const.OUTPUT` : logits}: torch.LongTensor [batch_size]
https://github.com/fastnlp/fastnlp/blob/3cb01d15d8bc7d10f292b12e8fa803087d37e887/fastNLP/models/bert.py#L185-L191
__all__ = [ "BertForSequenceClassification", "BertForSentenceMatching", "BertForMultipleChoice", "BertForTokenClassification", "BertForQuestionAnswering" ] import warnings import torch from torch import nn from .base_model import BaseModel from ..core._logger import logger from ..core.const import Const from ..embeddings.bert_embedding import BertEmbedding class BertForSequenceClassification(BaseModel): def __init__(self, embed: BertEmbedding, num_labels: int=2, dropout=0.1): super(BertForSequenceClassification, self).__init__() self.num_labels = num_labels self.bert = embed self.dropout = nn.Dropout(p=dropout) self.classifier = nn.Linear(self.bert.embedding_dim, num_labels) if not self.bert.model.include_cls_sep: self.bert.model.include_cls_sep = True warn_msg = "Bert for sequence classification excepts BertEmbedding `include_cls_sep` True, " "but got False. FastNLP has changed it to True." logger.warning(warn_msg) warnings.warn(warn_msg) def forward(self, words): hidden = self.dropout(self.bert(words)) cls_hidden = hidden[:, 0] logits = self.classifier(cls_hidden) if logits.size(-1) == 1: logits = logits.squeeze(-1) return {Const.OUTPUT: logits} def predict(self, words): logits = self.forward(words)[Const.OUTPUT] if self.num_labels > 1: return {Const.OUTPUT: torch.argmax(logits, dim=-1)} else: return {Const.OUTPUT: logits} class BertForSentenceMatching(BaseModel): def __init__(self, embed: BertEmbedding, num_labels: int=2, dropout=0.1): super(BertForSentenceMatching, self).__init__() self.num_labels = num_labels self.bert = embed self.dropout = nn.Dropout(p=dropout) self.classifier = nn.Linear(self.bert.embedding_dim, num_labels) if not self.bert.model.include_cls_sep: self.bert.model.include_cls_sep = True warn_msg = "Bert for sentence matching excepts BertEmbedding `include_cls_sep` True, " "but got False. FastNLP has changed it to True." logger.warning(warn_msg) warnings.warn(warn_msg) def forward(self, words): hidden = self.bert(words) cls_hidden = self.dropout(hidden[:, 0]) logits = self.classifier(cls_hidden) return {Const.OUTPUT: logits} def predict(self, words): logits = self.forward(words)[Const.OUTPUT] return {Const.OUTPUT: torch.argmax(logits, dim=-1)} class BertForMultipleChoice(BaseModel): def __init__(self, embed: BertEmbedding, num_choices=2, dropout=0.1): super(BertForMultipleChoice, self).__init__() self.num_choices = num_choices self.bert = embed self.dropout = nn.Dropout(p=dropout) self.classifier = nn.Linear(self.bert.embedding_dim, 1) if not self.bert.model.include_cls_sep: self.bert.model.include_cls_sep = True warn_msg = "Bert for multiple choice excepts BertEmbedding `include_cls_sep` True, " "but got False. FastNLP has changed it to True." logger.warning(warn_msg) warnings.warn(warn_msg) def forward(self, words): batch_size, num_choices, seq_len = words.size() input_ids = words.view(batch_size * num_choices, seq_len) hidden = self.bert(input_ids) pooled_output = self.dropout(hidden[:, 0]) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) return {Const.OUTPUT: reshaped_logits}
Apache License 2.0
seetaresearch/dragon
tensorrt/python/core/engine.py
check_input_validity
python
def check_input_validity(index, array, binding): binding._set_shape(array.shape) if array.dtype != binding.dtype: if array.dtype == numpy.int64 and binding.dtype == numpy.int32: casted_array = numpy.array(array, copy=True, dtype='int32') if numpy.equal(array, casted_array).all(): array = casted_array else: raise TypeError( 'Wrong dtype for input %i.\n' 'Expected %s, got %s. Cannot safely cast.' % (index, binding.dtype, array.dtype)) else: raise TypeError( 'Wrong dtype for input %i.\n' 'Expected %s, got %s.' % (index, binding.dtype, array.dtype)) return array
Check the input validity.
https://github.com/seetaresearch/dragon/blob/3dfb6ea55d90d2fb2da9b1b471f5e1e7d7667810/tensorrt/python/core/engine.py#L418-L437
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy try: from pycuda import driver from pycuda import gpuarray from pycuda import autoinit import tensorrt as trt TRT_LOGGER = trt.Logger(trt.Logger.WARNING) except ImportError: from dragon.core.util import deprecation driver = deprecation.NotInstalled('pycuda') gpuarray = deprecation.NotInstalled('pycuda') autoinit = deprecation.NotInstalled('pycuda') trt = deprecation.NotInstalled('tensorrt') TRT_LOGGER = deprecation.NotInstalled('tensorrt') from dragon.core.framework import device_spec from dragon.core.framework import workspace from dragon.core.framework.tensor import Tensor from dragon.core.util import logging from dragon.core.util import six class Binding(object): def __init__(self, cuda_engine, execution_context, idx_or_name, device_id): self._device_id = device_id self._context = execution_context if isinstance(idx_or_name, six.string_types): self._name = idx_or_name self._index = cuda_engine.get_binding_index(self._name) if self._index == -1: raise IndexError('Binding name not found: %s' % self._name) else: self._index = idx_or_name self._name = cuda_engine.get_binding_name(self._index) if self._name is None: raise IndexError('Binding index out of range: %i' % self._index) dtype_map = { trt.DataType.FLOAT: 'float32', trt.DataType.HALF: 'float16', trt.DataType.INT8: 'int8', } if hasattr(trt.DataType, 'INT32'): dtype_map[trt.DataType.INT32] = 'int32' self._is_input = cuda_engine.binding_is_input(self._index) self._dtype = dtype_map[cuda_engine.get_binding_dtype(self._index)] self._shape = tuple(cuda_engine.get_binding_shape(self.index)) self._has_dynamic_shape = -1 in self._shape self._host_buf, self._device_buf = None, None self._host_tensor, self._device_tensor = None, None self._host_opt, self._device_opt = None, None @property def device_buffer(self): if self._device_buf is None: self._device_buf = gpuarray.empty(self._shape, self._dtype) return self._device_buf @property def device_dlpack(self): if self._device_tensor is None: spec = device_spec.DeviceSpec('cuda', self.device_id) self._device_opt = spec.to_proto(serialized=True) default_ws = workspace.get_workspace() impl = default_ws.create_tensor(scope='DLPack') impl.FromPointer(self._shape, self._dtype, self._device_opt, self.device_buffer.ptr) self._device_tensor = Tensor(impl=impl, deleter=default_ws._handle_pool) return self._device_tensor._impl.ToDLPack(self._device_opt, True) @property def device_id(self): return self._device_id @property def dtype(self): return self._dtype @property def host_buffer(self): if self._host_buf is None: self._host_buf = driver.pagelocked_empty(self.shape, self.dtype) return self._host_buf @property def host_dlpack(self): if self._host_tensor is None: spec = device_spec.DeviceSpec('cpu') self._host_opt = spec.to_proto(serialized=True) default_ws = workspace.get_workspace() impl = default_ws.create_tensor(scope='DLPack') impl.FromPointer(self._shape, self._dtype, self._host_opt, self.host_buffer.ctypes.data) self._host_tensor = Tensor(impl=impl, deleter=default_ws._handle_pool) return self._host_tensor._impl.ToDLPack(self._host_opt, True) @property def index(self): return self._index @property def is_input(self): return self._is_input @property def name(self): return self._name @property def shape(self): return self._shape def get_async(self, stream): src = self.device_buffer dst = self.host_buffer src.get_async(stream, dst) return dst def _check_size(self, new_shape): if self._shape != new_shape: if not (self._shape == (1,) and new_shape == ()): return True return False def _reset_buffer(self): self._host_buf, self._device_buf = None, None self._host_tensor, self._device_tensor = None, None self._host_opt, self._device_opt = None, None def _set_shape(self, new_shape=None): if self._is_input: new_shape = tuple(new_shape) if self._check_size(new_shape): if self._has_dynamic_shape: self._shape = new_shape self._context.set_binding_shape(self._index, new_shape) self._reset_buffer() else: raise ValueError( 'Wrong shape for input "%s".\n' 'Expected %s, got %s.' % (self._name, self._shape, new_shape) ) else: new_shape = tuple(self._context.get_binding_shape(self._index)) if self._check_size(new_shape): self._shape = new_shape self._reset_buffer() class Engine(object): def __init__(self, cuda_engine, device_id=0): self._cuda_engine = cuda_engine self._device_id = device_id self._context = cuda_engine.create_execution_context() self._stream = driver.Stream(0) num_binding = self._cuda_engine.num_bindings self._bindings = [Binding(cuda_engine, self._context, i, device_id) for i in range(num_binding)] self._inputs = [b for b in self._bindings if b.is_input] self._outputs = [b for b in self._bindings if not b.is_input] logging.info('TensorRT engine built.') binding_info = 'InputInfo: {\n' for b in self._inputs: binding_info += ' * Binding("{}", shape={}, dtype={})\n' .format(b.name, b.shape, b.dtype) logging.info(binding_info + '}') binding_info = 'OutputInfo: {\n' for b in self._outputs: binding_info += ' * Binding("{}", shape={}, dtype={})\n' .format(b.name, b.shape, b.dtype) logging.info(binding_info + '}') @property def cuda_engine(self): return self._cuda_engine @property def inputs(self): return self._inputs @property def outputs(self): return self._outputs def get_results(self): return [output.get_async(self._stream) for output in self._outputs] def run(self, inputs, optimization_profile=None): if len(inputs) < len(self.inputs): raise ValueError( 'Not enough inputs. Expected %i, got %i.' % (len(self.inputs), len(inputs))) if isinstance(inputs, dict): inputs = [inputs[b.name] for b in self.inputs] for i, (array, binding) in enumerate(zip(inputs, self.inputs)): array = check_input_validity(i, array, binding) binding.device_buffer.set_async(array, self._stream) for binding in self.outputs: binding._set_shape() if optimization_profile is not None: self._context.active_optimization_profile = optimization_profile binding_pointers = [b.device_buffer.ptr for b in self._bindings] self._context.execute_async_v2(binding_pointers, self._stream.handle) results = self.get_results() self._stream.synchronize() return results def __del__(self): if self._cuda_engine is not None: del self._cuda_engine
BSD 2-Clause Simplified License
dragonfly/dragonfly
examples/synthetic/multiobjective_park/multiobjective_park.py
park1_euc
python
def park1_euc(x): max_val = 25.5872304 x1 = x[0] x2 = x[1] x3 = x[2] x4 = x[3] ret1 = (x1/2) * (np.sqrt(1 + (x2 + x3**2)*x4/(x1**2 + 0.00001)) - 1) ret2 = (x1 + 3*x4) * np.exp(1 + np.sin(x3)) return min(ret1 + ret2, max_val)
Computes the park1 function.
https://github.com/dragonfly/dragonfly/blob/a579b5eadf452e23b07d4caf27b402703b0012b7/examples/synthetic/multiobjective_park/multiobjective_park.py#L26-L35
import numpy as np def _get_01_coords(x): x1 = x[0][0] x2 = x[0][1] x3 = float(x[1]) / (194.0 - 103.0) x4 = x[2] - 10.0 return [x1, x2, x3, x4]
MIT License
flatironinstitute/disbatch
disBatch.py
BatchContext.retireNode
python
def retireNode(self, node, ret): if ret: self.error = True if self.retireCmd: logger.info('Retiring node "%s" with command %s', node, str(self.retireCmd)) env = self.retireEnv(node, ret) try: SUB.check_call(self.retireCmd, close_fds=True, shell=True, env=env) except Exception as e: logger.warn('Retirement planning needs improvement: %s', repr(e)) else: logger.info('Retiring node "%s" (no-op)', node)
Called when a node has exited. May be overridden to release resources.
https://github.com/flatironinstitute/disbatch/blob/7997f3ddcd126df494670d1f905b4c8fcef1785b/disBatch.py#L165-L176
from __future__ import print_function import json, logging, os, random, re, signal, socket, subprocess as SUB, sys, time from multiprocessing import Process as mpProcess, Queue as mpQueue try: from queue import Queue, Empty except ImportError: from Queue import Queue, Empty from threading import BoundedSemaphore, Thread, Lock from ast import literal_eval DisBatchPath, ImportDir, PathsFixed = None, None, False if '__main__' == __name__ and sys.argv[1:] == ["--fix-paths"]: import tempfile DisBatchPath = os.path.realpath(__file__) if not os.path.exists(DisBatchPath): print('Unable to find myself; set DisBatchPath and ImportDir manually at the top of disBatch.py.', file=sys.stderr) sys.exit(1) DisBatchDir = os.path.dirname(DisBatchPath) with open(DisBatchPath, 'r') as fi: with tempfile.NamedTemporaryFile('w', prefix='disBatch.py.', dir=DisBatchDir, delete=False) as fo: found = False for l in fi: if l.startswith('DisBatchPath, ImportDir, PathsFixed ='): assert not found found = True l = 'DisBatchPath, ImportDir, PathsFixed = %r, %r, True\n'%(DisBatchPath, DisBatchDir) print("Changing path info to %r"%l, file=sys.stderr) fo.write(l) assert found os.fchmod(fo.fileno(), os.fstat(fi.fileno()).st_mode) os.rename(DisBatchPath, DisBatchPath+'.prev') os.rename(fo.name, DisBatchPath) sys.exit(0) if not PathsFixed: DisBatchPath = os.path.realpath(__file__) ImportDir = os.path.dirname(DisBatchPath) PythonPath = os.getenv('PYTHONPATH', '') if ImportDir: sys.path.append(ImportDir) PythonPath = PythonPath + ':' + ImportDir if PythonPath else ImportDir os.environ['PYTHONPATH'] = PythonPath try: import kvsstcp except ImportError: if PathsFixed: print('This script is looking in the wrong place for "kvssctp". Try running "%s --fix-paths" or editing it by hand.'%DisBatchPath, file=sys.stderr) else: print(''' Could not find kvsstcp. If there is a "kvsstcp" directory in "%s", try running "%s --fix-paths". Otherwise review the installation instructions. '''%(ImportDir, DisBatchPath), file=sys.stderr) sys.exit(1) myHostname = socket.gethostname() myPid = os.getpid() dbbarrier = re.compile('^#DISBATCH BARRIER(?: (.+)?)?$', re.I) dbcomment = re.compile('^\s*(#|$)') dbprefix = re.compile('^#DISBATCH PREFIX (.*)$', re.I) dbrepeat = re.compile('^#DISBATCH REPEAT\s+(?P<repeat>[0-9]+)(?:\s+start\s+(?P<start>[0-9]+))?(?:\s+step\s+(?P<step>[0-9]+))?(?: (?P<command>.+))?\s*$', re.I) dbsuffix = re.compile('^#DISBATCH SUFFIX (.*)$', re.I) dbpernode = re.compile('^#DISBATCH PERNODE (.*)$', re.I) TaskIdOOB = -1 CmdPoison = '!!Poison!!' def compHostnames(h0, h1): return h0.split('.', 1)[0] == h1.split('.', 1)[0] def logfile(context, suffix=''): f = context.name try: f += "_%s"%context.node except AttributeError: pass if suffix: f += "_%s"%suffix return f def waitTimeout(sub, timeout, interval=1): r = sub.poll() while r is None and timeout > 0: time.sleep(interval) timeout -= interval r = sub.poll() return r def killPatiently(sub, name, timeout=15): if not sub: return r = sub.poll() if r is None: logger.info('Waiting for %s to finish...', name) r = waitTimeout(sub, timeout) if r is None: logger.warn('Terminating %s...', name) try: sub.terminate() except OSError: pass r = waitTimeout(sub, timeout) if r is None: logger.warn('Killing %s.', name) try: sub.kill() except OSError: pass r = sub.wait() if r: logger.info("%s returned %d", name, r) return r class BatchContext(object): def __init__(self, sysid, jobid, nodes, cylinders): self.sysid, self.jobid, self.nodes, self.cylinders = sysid, jobid, nodes, cylinders self.wd = os.getcwd() self.error = False self.retireCmd = None def __str__(self): return 'Batch system: %s\nJobID: %s\nNodes: %r\nCylinders: %r\n'%(self.sysid, self.jobid, self.nodes, self.cylinders) def launch(self, kvs): kvs.put('.context', self) self.engines = dict() for n in self.nodes: self.engines[n] = self.launchNode(n) def poll(self): for n, e in list(self.engines.items()): r = e.poll() if r is not None: logger.info('Engine %s exited: %d', n, r) del self.engines[n] self.retireNode(n, r) def launchNode(self, node): raise NotImplementedError('%s.launchNode is not implemented' % type(self)) def retireEnv(self, node, ret): env = os.environ.copy() env['NODE'] = node env['RETCODE'] = str(ret) env['ACTIVE'] = ','.join(self.engines.keys()) env['RETIRED'] = ','.join(set(self.nodes).difference(self.engines)) return env
Apache License 2.0
blockstream/satellite-api
server/order_helpers.py
compute_auth_token
python
def compute_auth_token(uuid): return hmac_sha256_digest(USER_AUTH_KEY, uuid)
Compute the authentication token for a given UUID
https://github.com/blockstream/satellite-api/blob/0fc668d490f558e02d821fe263722a9567857cb5/server/order_helpers.py#L19-L21
import datetime import logging import os from flask import request from sqlalchemy import and_, func from bidding import calc_ota_msg_len, validate_bid from constants import InvoiceStatus, OrderStatus from database import db from error import get_http_error_resp from models import Order, RxConfirmation, TxConfirmation import constants from utils import hmac_sha256_digest USER_AUTH_KEY = hmac_sha256_digest('user-token', constants.CHARGE_API_TOKEN)
MIT License
meltano/sdk
singer_sdk/streams/core.py
Stream.metadata
python
def metadata(self) -> MetadataMapping: if self._metadata is not None: return self._metadata if self._tap_input_catalog: catalog_entry = self._tap_input_catalog.get_stream(self.tap_stream_id) if catalog_entry: self._metadata = catalog_entry.metadata return self._metadata self._metadata = MetadataMapping.get_standard_metadata( schema=self.schema, replication_method=self.forced_replication_method, key_properties=self.primary_keys or [], valid_replication_keys=( [self.replication_key] if self.replication_key else None ), schema_name=None, ) if self._tap_input_catalog is None: self._metadata.root.selected = True return self._metadata
Get stream metadata. Metadata attributes (`inclusion`, `selected`, etc.) are part of the Singer spec. Metadata from an input catalog will override standard metadata. Returns: A mapping from property breadcrumbs to metadata objects.
https://github.com/meltano/sdk/blob/5916ffee84eab15689558abe15757810dad5419e/singer_sdk/streams/core.py#L445-L478
import abc import copy import datetime import json import logging from os import PathLike from pathlib import Path from types import MappingProxyType from typing import ( Any, Callable, Dict, Iterable, List, Mapping, Optional, Tuple, Type, TypeVar, Union, cast, ) import pendulum import requests import singer from singer import RecordMessage, SchemaMessage, StateMessage from singer.schema import Schema from singer_sdk.exceptions import InvalidStreamSortException, MaxRecordsLimitException from singer_sdk.helpers._catalog import pop_deselected_record_properties from singer_sdk.helpers._compat import final from singer_sdk.helpers._singer import ( Catalog, CatalogEntry, MetadataMapping, SelectionMask, ) from singer_sdk.helpers._state import ( finalize_state_progress_markers, get_starting_replication_value, get_state_partitions_list, get_writeable_state_dict, increment_state, log_sort_error, reset_state_progress_markers, write_replication_key_signpost, write_starting_replication_value, ) from singer_sdk.helpers._typing import conform_record_data_types, is_datetime_type from singer_sdk.helpers._util import utc_now from singer_sdk.mapper import SameRecordTransform, StreamMap from singer_sdk.plugin_base import PluginBase as TapBaseClass REPLICATION_FULL_TABLE = "FULL_TABLE" REPLICATION_INCREMENTAL = "INCREMENTAL" REPLICATION_LOG_BASED = "LOG_BASED" FactoryType = TypeVar("FactoryType", bound="Stream") METRICS_LOG_LEVEL_SETTING = "metrics_log_level" class Stream(metaclass=abc.ABCMeta): STATE_MSG_FREQUENCY = 10000 _MAX_RECORDS_LIMIT: Optional[int] = None parent_stream_type: Optional[Type["Stream"]] = None ignore_parent_replication_key: bool = False def __init__( self, tap: TapBaseClass, schema: Optional[Union[str, PathLike, Dict[str, Any], Schema]] = None, name: Optional[str] = None, ) -> None: if name: self.name: str = name if not self.name: raise ValueError("Missing argument or class variable 'name'.") self.logger: logging.Logger = tap.logger self.tap_name: str = tap.name self._config: dict = dict(tap.config) self._tap = tap self._tap_state = tap.state self._tap_input_catalog: Optional[Catalog] = None self._stream_maps: Optional[List[StreamMap]] = None self.forced_replication_method: Optional[str] = None self._replication_key: Optional[str] = None self._primary_keys: Optional[List[str]] = None self._state_partitioning_keys: Optional[List[str]] = None self._schema_filepath: Optional[Path] = None self._metadata: Optional[MetadataMapping] = None self._mask: Optional[SelectionMask] = None self._schema: dict self.child_streams: List[Stream] = [] if schema: if isinstance(schema, (PathLike, str)): if not Path(schema).is_file(): raise FileNotFoundError( f"Could not find schema file '{self.schema_filepath}'." ) self._schema_filepath = Path(schema) elif isinstance(schema, dict): self._schema = schema elif isinstance(schema, Schema): self._schema = schema.to_dict() else: raise ValueError( f"Unexpected type {type(schema).__name__} for arg 'schema'." ) if self.schema_filepath: self._schema = json.loads(Path(self.schema_filepath).read_text()) if not self.schema: raise ValueError( f"Could not initialize schema for stream '{self.name}'. " "A valid schema object or filepath was not provided." ) @property def stream_maps(self) -> List[StreamMap]: if self._stream_maps: return self._stream_maps if self._tap.mapper: self._stream_maps = self._tap.mapper.stream_maps[self.name] self.logger.info( f"Tap has custom mapper. Using {len(self.stream_maps)} provided map(s)." ) else: self.logger.info( f"No custom mapper provided for '{self.name}'. " "Using SameRecordTransform as default." ) self._stream_maps = [ SameRecordTransform( stream_alias=self.name, raw_schema=self.schema, key_properties=self.primary_keys, ) ] return self.stream_maps @property def is_timestamp_replication_key(self) -> bool: if not self.replication_key: return False type_dict = self.schema.get("properties", {}).get(self.replication_key) return is_datetime_type(type_dict) def get_starting_replication_key_value( self, context: Optional[dict] ) -> Optional[Any]: state = self.get_context_state(context) return get_starting_replication_value(state) def get_starting_timestamp( self, context: Optional[dict] ) -> Optional[datetime.datetime]: value = self.get_starting_replication_key_value(context) if value is None: return None if not self.is_timestamp_replication_key: raise ValueError( f"The replication key {self.replication_key} is not of timestamp type" ) return cast(datetime.datetime, pendulum.parse(value)) @final @property def selected(self) -> bool: return self.mask.get((), True) @final @property def has_selected_descendents(self) -> bool: for child in self.child_streams or []: if child.selected or child.has_selected_descendents: return True return False @final @property def descendent_streams(self) -> List["Stream"]: result: List[Stream] = list(self.child_streams) or [] for child in self.child_streams: result += child.descendent_streams or [] return result def _write_replication_key_signpost( self, context: Optional[dict], value: Union[datetime.datetime, str, int, float], ) -> None: if not value: return state = self.get_context_state(context) write_replication_key_signpost(state, value) def _write_starting_replication_value(self, context: Optional[dict]) -> None: value = None state = self.get_context_state(context) if self.replication_key: replication_key_value = state.get("replication_key_value") if replication_key_value and self.replication_key == state.get( "replication_key" ): value = replication_key_value elif "start_date" in self.config: value = self.config["start_date"] write_starting_replication_value(state, value) def get_replication_key_signpost( self, context: Optional[dict] ) -> Optional[Union[datetime.datetime, Any]]: if self.is_timestamp_replication_key: return utc_now() return None @property def schema_filepath(self) -> Optional[Path]: return self._schema_filepath @property def schema(self) -> dict: return self._schema @property def primary_keys(self) -> Optional[List[str]]: if not self._primary_keys: return [] return self._primary_keys @primary_keys.setter def primary_keys(self, new_value: List[str]) -> None: self._primary_keys = new_value @property def state_partitioning_keys(self) -> Optional[List[str]]: return self._state_partitioning_keys @state_partitioning_keys.setter def state_partitioning_keys(self, new_value: Optional[List[str]]) -> None: self._state_partitioning_keys = new_value @property def replication_key(self) -> Optional[str]: if not self._replication_key: return None return self._replication_key @replication_key.setter def replication_key(self, new_value: str) -> None: self._replication_key = new_value @property def is_sorted(self) -> bool: return False @property
Apache License 2.0
docusign/docusign-python-client
docusign_esign/models/phone_number.py
PhoneNumber.anchor_y_offset
python
def anchor_y_offset(self): return self._anchor_y_offset
Gets the anchor_y_offset of this PhoneNumber. # noqa: E501 Specifies the Y axis location of the tab, in anchorUnits, relative to the anchorString. # noqa: E501 :return: The anchor_y_offset of this PhoneNumber. # noqa: E501 :rtype: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/phone_number.py#L847-L855
import pprint import re import six from docusign_esign.client.configuration import Configuration class PhoneNumber(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'anchor_allow_white_space_in_characters': 'str', 'anchor_allow_white_space_in_characters_metadata': 'PropertyMetadata', 'anchor_case_sensitive': 'str', 'anchor_case_sensitive_metadata': 'PropertyMetadata', 'anchor_horizontal_alignment': 'str', 'anchor_horizontal_alignment_metadata': 'PropertyMetadata', 'anchor_ignore_if_not_present': 'str', 'anchor_ignore_if_not_present_metadata': 'PropertyMetadata', 'anchor_match_whole_word': 'str', 'anchor_match_whole_word_metadata': 'PropertyMetadata', 'anchor_string': 'str', 'anchor_string_metadata': 'PropertyMetadata', 'anchor_tab_processor_version': 'str', 'anchor_tab_processor_version_metadata': 'PropertyMetadata', 'anchor_units': 'str', 'anchor_units_metadata': 'PropertyMetadata', 'anchor_x_offset': 'str', 'anchor_x_offset_metadata': 'PropertyMetadata', 'anchor_y_offset': 'str', 'anchor_y_offset_metadata': 'PropertyMetadata', 'bold': 'str', 'bold_metadata': 'PropertyMetadata', 'conceal_value_on_document': 'str', 'conceal_value_on_document_metadata': 'PropertyMetadata', 'conditional_parent_label': 'str', 'conditional_parent_label_metadata': 'PropertyMetadata', 'conditional_parent_value': 'str', 'conditional_parent_value_metadata': 'PropertyMetadata', 'custom_tab_id': 'str', 'custom_tab_id_metadata': 'PropertyMetadata', 'disable_auto_size': 'str', 'disable_auto_size_metadata': 'PropertyMetadata', 'document_id': 'str', 'document_id_metadata': 'PropertyMetadata', 'error_details': 'ErrorDetails', 'font': 'str', 'font_color': 'str', 'font_color_metadata': 'PropertyMetadata', 'font_metadata': 'PropertyMetadata', 'font_size': 'str', 'font_size_metadata': 'PropertyMetadata', 'form_order': 'str', 'form_order_metadata': 'PropertyMetadata', 'form_page_label': 'str', 'form_page_label_metadata': 'PropertyMetadata', 'form_page_number': 'str', 'form_page_number_metadata': 'PropertyMetadata', 'height': 'str', 'height_metadata': 'PropertyMetadata', 'italic': 'str', 'italic_metadata': 'PropertyMetadata', 'locale_policy': 'LocalePolicyTab', 'locked': 'str', 'locked_metadata': 'PropertyMetadata', 'max_length': 'str', 'max_length_metadata': 'PropertyMetadata', 'merge_field': 'MergeField', 'merge_field_xml': 'str', 'name': 'str', 'name_metadata': 'PropertyMetadata', 'original_value': 'str', 'original_value_metadata': 'PropertyMetadata', 'page_number': 'str', 'page_number_metadata': 'PropertyMetadata', 'recipient_id': 'str', 'recipient_id_guid': 'str', 'recipient_id_guid_metadata': 'PropertyMetadata', 'recipient_id_metadata': 'PropertyMetadata', 'required': 'str', 'required_metadata': 'PropertyMetadata', 'smart_contract_information': 'SmartContractInformation', 'source': 'str', 'status': 'str', 'status_metadata': 'PropertyMetadata', 'tab_group_labels': 'list[str]', 'tab_group_labels_metadata': 'PropertyMetadata', 'tab_id': 'str', 'tab_id_metadata': 'PropertyMetadata', 'tab_label': 'str', 'tab_label_metadata': 'PropertyMetadata', 'tab_order': 'str', 'tab_order_metadata': 'PropertyMetadata', 'tab_type': 'str', 'tab_type_metadata': 'PropertyMetadata', 'template_locked': 'str', 'template_locked_metadata': 'PropertyMetadata', 'template_required': 'str', 'template_required_metadata': 'PropertyMetadata', 'tooltip': 'str', 'tool_tip_metadata': 'PropertyMetadata', 'underline': 'str', 'underline_metadata': 'PropertyMetadata', 'value': 'str', 'value_metadata': 'PropertyMetadata', 'width': 'str', 'width_metadata': 'PropertyMetadata', 'x_position': 'str', 'x_position_metadata': 'PropertyMetadata', 'y_position': 'str', 'y_position_metadata': 'PropertyMetadata' } attribute_map = { 'anchor_allow_white_space_in_characters': 'anchorAllowWhiteSpaceInCharacters', 'anchor_allow_white_space_in_characters_metadata': 'anchorAllowWhiteSpaceInCharactersMetadata', 'anchor_case_sensitive': 'anchorCaseSensitive', 'anchor_case_sensitive_metadata': 'anchorCaseSensitiveMetadata', 'anchor_horizontal_alignment': 'anchorHorizontalAlignment', 'anchor_horizontal_alignment_metadata': 'anchorHorizontalAlignmentMetadata', 'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent', 'anchor_ignore_if_not_present_metadata': 'anchorIgnoreIfNotPresentMetadata', 'anchor_match_whole_word': 'anchorMatchWholeWord', 'anchor_match_whole_word_metadata': 'anchorMatchWholeWordMetadata', 'anchor_string': 'anchorString', 'anchor_string_metadata': 'anchorStringMetadata', 'anchor_tab_processor_version': 'anchorTabProcessorVersion', 'anchor_tab_processor_version_metadata': 'anchorTabProcessorVersionMetadata', 'anchor_units': 'anchorUnits', 'anchor_units_metadata': 'anchorUnitsMetadata', 'anchor_x_offset': 'anchorXOffset', 'anchor_x_offset_metadata': 'anchorXOffsetMetadata', 'anchor_y_offset': 'anchorYOffset', 'anchor_y_offset_metadata': 'anchorYOffsetMetadata', 'bold': 'bold', 'bold_metadata': 'boldMetadata', 'conceal_value_on_document': 'concealValueOnDocument', 'conceal_value_on_document_metadata': 'concealValueOnDocumentMetadata', 'conditional_parent_label': 'conditionalParentLabel', 'conditional_parent_label_metadata': 'conditionalParentLabelMetadata', 'conditional_parent_value': 'conditionalParentValue', 'conditional_parent_value_metadata': 'conditionalParentValueMetadata', 'custom_tab_id': 'customTabId', 'custom_tab_id_metadata': 'customTabIdMetadata', 'disable_auto_size': 'disableAutoSize', 'disable_auto_size_metadata': 'disableAutoSizeMetadata', 'document_id': 'documentId', 'document_id_metadata': 'documentIdMetadata', 'error_details': 'errorDetails', 'font': 'font', 'font_color': 'fontColor', 'font_color_metadata': 'fontColorMetadata', 'font_metadata': 'fontMetadata', 'font_size': 'fontSize', 'font_size_metadata': 'fontSizeMetadata', 'form_order': 'formOrder', 'form_order_metadata': 'formOrderMetadata', 'form_page_label': 'formPageLabel', 'form_page_label_metadata': 'formPageLabelMetadata', 'form_page_number': 'formPageNumber', 'form_page_number_metadata': 'formPageNumberMetadata', 'height': 'height', 'height_metadata': 'heightMetadata', 'italic': 'italic', 'italic_metadata': 'italicMetadata', 'locale_policy': 'localePolicy', 'locked': 'locked', 'locked_metadata': 'lockedMetadata', 'max_length': 'maxLength', 'max_length_metadata': 'maxLengthMetadata', 'merge_field': 'mergeField', 'merge_field_xml': 'mergeFieldXml', 'name': 'name', 'name_metadata': 'nameMetadata', 'original_value': 'originalValue', 'original_value_metadata': 'originalValueMetadata', 'page_number': 'pageNumber', 'page_number_metadata': 'pageNumberMetadata', 'recipient_id': 'recipientId', 'recipient_id_guid': 'recipientIdGuid', 'recipient_id_guid_metadata': 'recipientIdGuidMetadata', 'recipient_id_metadata': 'recipientIdMetadata', 'required': 'required', 'required_metadata': 'requiredMetadata', 'smart_contract_information': 'smartContractInformation', 'source': 'source', 'status': 'status', 'status_metadata': 'statusMetadata', 'tab_group_labels': 'tabGroupLabels', 'tab_group_labels_metadata': 'tabGroupLabelsMetadata', 'tab_id': 'tabId', 'tab_id_metadata': 'tabIdMetadata', 'tab_label': 'tabLabel', 'tab_label_metadata': 'tabLabelMetadata', 'tab_order': 'tabOrder', 'tab_order_metadata': 'tabOrderMetadata', 'tab_type': 'tabType', 'tab_type_metadata': 'tabTypeMetadata', 'template_locked': 'templateLocked', 'template_locked_metadata': 'templateLockedMetadata', 'template_required': 'templateRequired', 'template_required_metadata': 'templateRequiredMetadata', 'tooltip': 'tooltip', 'tool_tip_metadata': 'toolTipMetadata', 'underline': 'underline', 'underline_metadata': 'underlineMetadata', 'value': 'value', 'value_metadata': 'valueMetadata', 'width': 'width', 'width_metadata': 'widthMetadata', 'x_position': 'xPosition', 'x_position_metadata': 'xPositionMetadata', 'y_position': 'yPosition', 'y_position_metadata': 'yPositionMetadata' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._anchor_allow_white_space_in_characters = None self._anchor_allow_white_space_in_characters_metadata = None self._anchor_case_sensitive = None self._anchor_case_sensitive_metadata = None self._anchor_horizontal_alignment = None self._anchor_horizontal_alignment_metadata = None self._anchor_ignore_if_not_present = None self._anchor_ignore_if_not_present_metadata = None self._anchor_match_whole_word = None self._anchor_match_whole_word_metadata = None self._anchor_string = None self._anchor_string_metadata = None self._anchor_tab_processor_version = None self._anchor_tab_processor_version_metadata = None self._anchor_units = None self._anchor_units_metadata = None self._anchor_x_offset = None self._anchor_x_offset_metadata = None self._anchor_y_offset = None self._anchor_y_offset_metadata = None self._bold = None self._bold_metadata = None self._conceal_value_on_document = None self._conceal_value_on_document_metadata = None self._conditional_parent_label = None self._conditional_parent_label_metadata = None self._conditional_parent_value = None self._conditional_parent_value_metadata = None self._custom_tab_id = None self._custom_tab_id_metadata = None self._disable_auto_size = None self._disable_auto_size_metadata = None self._document_id = None self._document_id_metadata = None self._error_details = None self._font = None self._font_color = None self._font_color_metadata = None self._font_metadata = None self._font_size = None self._font_size_metadata = None self._form_order = None self._form_order_metadata = None self._form_page_label = None self._form_page_label_metadata = None self._form_page_number = None self._form_page_number_metadata = None self._height = None self._height_metadata = None self._italic = None self._italic_metadata = None self._locale_policy = None self._locked = None self._locked_metadata = None self._max_length = None self._max_length_metadata = None self._merge_field = None self._merge_field_xml = None self._name = None self._name_metadata = None self._original_value = None self._original_value_metadata = None self._page_number = None self._page_number_metadata = None self._recipient_id = None self._recipient_id_guid = None self._recipient_id_guid_metadata = None self._recipient_id_metadata = None self._required = None self._required_metadata = None self._smart_contract_information = None self._source = None self._status = None self._status_metadata = None self._tab_group_labels = None self._tab_group_labels_metadata = None self._tab_id = None self._tab_id_metadata = None self._tab_label = None self._tab_label_metadata = None self._tab_order = None self._tab_order_metadata = None self._tab_type = None self._tab_type_metadata = None self._template_locked = None self._template_locked_metadata = None self._template_required = None self._template_required_metadata = None self._tooltip = None self._tool_tip_metadata = None self._underline = None self._underline_metadata = None self._value = None self._value_metadata = None self._width = None self._width_metadata = None self._x_position = None self._x_position_metadata = None self._y_position = None self._y_position_metadata = None self.discriminator = None setattr(self, "_{}".format('anchor_allow_white_space_in_characters'), kwargs.get('anchor_allow_white_space_in_characters', None)) setattr(self, "_{}".format('anchor_allow_white_space_in_characters_metadata'), kwargs.get('anchor_allow_white_space_in_characters_metadata', None)) setattr(self, "_{}".format('anchor_case_sensitive'), kwargs.get('anchor_case_sensitive', None)) setattr(self, "_{}".format('anchor_case_sensitive_metadata'), kwargs.get('anchor_case_sensitive_metadata', None)) setattr(self, "_{}".format('anchor_horizontal_alignment'), kwargs.get('anchor_horizontal_alignment', None)) setattr(self, "_{}".format('anchor_horizontal_alignment_metadata'), kwargs.get('anchor_horizontal_alignment_metadata', None)) setattr(self, "_{}".format('anchor_ignore_if_not_present'), kwargs.get('anchor_ignore_if_not_present', None)) setattr(self, "_{}".format('anchor_ignore_if_not_present_metadata'), kwargs.get('anchor_ignore_if_not_present_metadata', None)) setattr(self, "_{}".format('anchor_match_whole_word'), kwargs.get('anchor_match_whole_word', None)) setattr(self, "_{}".format('anchor_match_whole_word_metadata'), kwargs.get('anchor_match_whole_word_metadata', None)) setattr(self, "_{}".format('anchor_string'), kwargs.get('anchor_string', None)) setattr(self, "_{}".format('anchor_string_metadata'), kwargs.get('anchor_string_metadata', None)) setattr(self, "_{}".format('anchor_tab_processor_version'), kwargs.get('anchor_tab_processor_version', None)) setattr(self, "_{}".format('anchor_tab_processor_version_metadata'), kwargs.get('anchor_tab_processor_version_metadata', None)) setattr(self, "_{}".format('anchor_units'), kwargs.get('anchor_units', None)) setattr(self, "_{}".format('anchor_units_metadata'), kwargs.get('anchor_units_metadata', None)) setattr(self, "_{}".format('anchor_x_offset'), kwargs.get('anchor_x_offset', None)) setattr(self, "_{}".format('anchor_x_offset_metadata'), kwargs.get('anchor_x_offset_metadata', None)) setattr(self, "_{}".format('anchor_y_offset'), kwargs.get('anchor_y_offset', None)) setattr(self, "_{}".format('anchor_y_offset_metadata'), kwargs.get('anchor_y_offset_metadata', None)) setattr(self, "_{}".format('bold'), kwargs.get('bold', None)) setattr(self, "_{}".format('bold_metadata'), kwargs.get('bold_metadata', None)) setattr(self, "_{}".format('conceal_value_on_document'), kwargs.get('conceal_value_on_document', None)) setattr(self, "_{}".format('conceal_value_on_document_metadata'), kwargs.get('conceal_value_on_document_metadata', None)) setattr(self, "_{}".format('conditional_parent_label'), kwargs.get('conditional_parent_label', None)) setattr(self, "_{}".format('conditional_parent_label_metadata'), kwargs.get('conditional_parent_label_metadata', None)) setattr(self, "_{}".format('conditional_parent_value'), kwargs.get('conditional_parent_value', None)) setattr(self, "_{}".format('conditional_parent_value_metadata'), kwargs.get('conditional_parent_value_metadata', None)) setattr(self, "_{}".format('custom_tab_id'), kwargs.get('custom_tab_id', None)) setattr(self, "_{}".format('custom_tab_id_metadata'), kwargs.get('custom_tab_id_metadata', None)) setattr(self, "_{}".format('disable_auto_size'), kwargs.get('disable_auto_size', None)) setattr(self, "_{}".format('disable_auto_size_metadata'), kwargs.get('disable_auto_size_metadata', None)) setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None)) setattr(self, "_{}".format('document_id_metadata'), kwargs.get('document_id_metadata', None)) setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None)) setattr(self, "_{}".format('font'), kwargs.get('font', None)) setattr(self, "_{}".format('font_color'), kwargs.get('font_color', None)) setattr(self, "_{}".format('font_color_metadata'), kwargs.get('font_color_metadata', None)) setattr(self, "_{}".format('font_metadata'), kwargs.get('font_metadata', None)) setattr(self, "_{}".format('font_size'), kwargs.get('font_size', None)) setattr(self, "_{}".format('font_size_metadata'), kwargs.get('font_size_metadata', None)) setattr(self, "_{}".format('form_order'), kwargs.get('form_order', None)) setattr(self, "_{}".format('form_order_metadata'), kwargs.get('form_order_metadata', None)) setattr(self, "_{}".format('form_page_label'), kwargs.get('form_page_label', None)) setattr(self, "_{}".format('form_page_label_metadata'), kwargs.get('form_page_label_metadata', None)) setattr(self, "_{}".format('form_page_number'), kwargs.get('form_page_number', None)) setattr(self, "_{}".format('form_page_number_metadata'), kwargs.get('form_page_number_metadata', None)) setattr(self, "_{}".format('height'), kwargs.get('height', None)) setattr(self, "_{}".format('height_metadata'), kwargs.get('height_metadata', None)) setattr(self, "_{}".format('italic'), kwargs.get('italic', None)) setattr(self, "_{}".format('italic_metadata'), kwargs.get('italic_metadata', None)) setattr(self, "_{}".format('locale_policy'), kwargs.get('locale_policy', None)) setattr(self, "_{}".format('locked'), kwargs.get('locked', None)) setattr(self, "_{}".format('locked_metadata'), kwargs.get('locked_metadata', None)) setattr(self, "_{}".format('max_length'), kwargs.get('max_length', None)) setattr(self, "_{}".format('max_length_metadata'), kwargs.get('max_length_metadata', None)) setattr(self, "_{}".format('merge_field'), kwargs.get('merge_field', None)) setattr(self, "_{}".format('merge_field_xml'), kwargs.get('merge_field_xml', None)) setattr(self, "_{}".format('name'), kwargs.get('name', None)) setattr(self, "_{}".format('name_metadata'), kwargs.get('name_metadata', None)) setattr(self, "_{}".format('original_value'), kwargs.get('original_value', None)) setattr(self, "_{}".format('original_value_metadata'), kwargs.get('original_value_metadata', None)) setattr(self, "_{}".format('page_number'), kwargs.get('page_number', None)) setattr(self, "_{}".format('page_number_metadata'), kwargs.get('page_number_metadata', None)) setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None)) setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None)) setattr(self, "_{}".format('recipient_id_guid_metadata'), kwargs.get('recipient_id_guid_metadata', None)) setattr(self, "_{}".format('recipient_id_metadata'), kwargs.get('recipient_id_metadata', None)) setattr(self, "_{}".format('required'), kwargs.get('required', None)) setattr(self, "_{}".format('required_metadata'), kwargs.get('required_metadata', None)) setattr(self, "_{}".format('smart_contract_information'), kwargs.get('smart_contract_information', None)) setattr(self, "_{}".format('source'), kwargs.get('source', None)) setattr(self, "_{}".format('status'), kwargs.get('status', None)) setattr(self, "_{}".format('status_metadata'), kwargs.get('status_metadata', None)) setattr(self, "_{}".format('tab_group_labels'), kwargs.get('tab_group_labels', None)) setattr(self, "_{}".format('tab_group_labels_metadata'), kwargs.get('tab_group_labels_metadata', None)) setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None)) setattr(self, "_{}".format('tab_id_metadata'), kwargs.get('tab_id_metadata', None)) setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None)) setattr(self, "_{}".format('tab_label_metadata'), kwargs.get('tab_label_metadata', None)) setattr(self, "_{}".format('tab_order'), kwargs.get('tab_order', None)) setattr(self, "_{}".format('tab_order_metadata'), kwargs.get('tab_order_metadata', None)) setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None)) setattr(self, "_{}".format('tab_type_metadata'), kwargs.get('tab_type_metadata', None)) setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None)) setattr(self, "_{}".format('template_locked_metadata'), kwargs.get('template_locked_metadata', None)) setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None)) setattr(self, "_{}".format('template_required_metadata'), kwargs.get('template_required_metadata', None)) setattr(self, "_{}".format('tooltip'), kwargs.get('tooltip', None)) setattr(self, "_{}".format('tool_tip_metadata'), kwargs.get('tool_tip_metadata', None)) setattr(self, "_{}".format('underline'), kwargs.get('underline', None)) setattr(self, "_{}".format('underline_metadata'), kwargs.get('underline_metadata', None)) setattr(self, "_{}".format('value'), kwargs.get('value', None)) setattr(self, "_{}".format('value_metadata'), kwargs.get('value_metadata', None)) setattr(self, "_{}".format('width'), kwargs.get('width', None)) setattr(self, "_{}".format('width_metadata'), kwargs.get('width_metadata', None)) setattr(self, "_{}".format('x_position'), kwargs.get('x_position', None)) setattr(self, "_{}".format('x_position_metadata'), kwargs.get('x_position_metadata', None)) setattr(self, "_{}".format('y_position'), kwargs.get('y_position', None)) setattr(self, "_{}".format('y_position_metadata'), kwargs.get('y_position_metadata', None)) @property def anchor_allow_white_space_in_characters(self): return self._anchor_allow_white_space_in_characters @anchor_allow_white_space_in_characters.setter def anchor_allow_white_space_in_characters(self, anchor_allow_white_space_in_characters): self._anchor_allow_white_space_in_characters = anchor_allow_white_space_in_characters @property def anchor_allow_white_space_in_characters_metadata(self): return self._anchor_allow_white_space_in_characters_metadata @anchor_allow_white_space_in_characters_metadata.setter def anchor_allow_white_space_in_characters_metadata(self, anchor_allow_white_space_in_characters_metadata): self._anchor_allow_white_space_in_characters_metadata = anchor_allow_white_space_in_characters_metadata @property def anchor_case_sensitive(self): return self._anchor_case_sensitive @anchor_case_sensitive.setter def anchor_case_sensitive(self, anchor_case_sensitive): self._anchor_case_sensitive = anchor_case_sensitive @property def anchor_case_sensitive_metadata(self): return self._anchor_case_sensitive_metadata @anchor_case_sensitive_metadata.setter def anchor_case_sensitive_metadata(self, anchor_case_sensitive_metadata): self._anchor_case_sensitive_metadata = anchor_case_sensitive_metadata @property def anchor_horizontal_alignment(self): return self._anchor_horizontal_alignment @anchor_horizontal_alignment.setter def anchor_horizontal_alignment(self, anchor_horizontal_alignment): self._anchor_horizontal_alignment = anchor_horizontal_alignment @property def anchor_horizontal_alignment_metadata(self): return self._anchor_horizontal_alignment_metadata @anchor_horizontal_alignment_metadata.setter def anchor_horizontal_alignment_metadata(self, anchor_horizontal_alignment_metadata): self._anchor_horizontal_alignment_metadata = anchor_horizontal_alignment_metadata @property def anchor_ignore_if_not_present(self): return self._anchor_ignore_if_not_present @anchor_ignore_if_not_present.setter def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present): self._anchor_ignore_if_not_present = anchor_ignore_if_not_present @property def anchor_ignore_if_not_present_metadata(self): return self._anchor_ignore_if_not_present_metadata @anchor_ignore_if_not_present_metadata.setter def anchor_ignore_if_not_present_metadata(self, anchor_ignore_if_not_present_metadata): self._anchor_ignore_if_not_present_metadata = anchor_ignore_if_not_present_metadata @property def anchor_match_whole_word(self): return self._anchor_match_whole_word @anchor_match_whole_word.setter def anchor_match_whole_word(self, anchor_match_whole_word): self._anchor_match_whole_word = anchor_match_whole_word @property def anchor_match_whole_word_metadata(self): return self._anchor_match_whole_word_metadata @anchor_match_whole_word_metadata.setter def anchor_match_whole_word_metadata(self, anchor_match_whole_word_metadata): self._anchor_match_whole_word_metadata = anchor_match_whole_word_metadata @property def anchor_string(self): return self._anchor_string @anchor_string.setter def anchor_string(self, anchor_string): self._anchor_string = anchor_string @property def anchor_string_metadata(self): return self._anchor_string_metadata @anchor_string_metadata.setter def anchor_string_metadata(self, anchor_string_metadata): self._anchor_string_metadata = anchor_string_metadata @property def anchor_tab_processor_version(self): return self._anchor_tab_processor_version @anchor_tab_processor_version.setter def anchor_tab_processor_version(self, anchor_tab_processor_version): self._anchor_tab_processor_version = anchor_tab_processor_version @property def anchor_tab_processor_version_metadata(self): return self._anchor_tab_processor_version_metadata @anchor_tab_processor_version_metadata.setter def anchor_tab_processor_version_metadata(self, anchor_tab_processor_version_metadata): self._anchor_tab_processor_version_metadata = anchor_tab_processor_version_metadata @property def anchor_units(self): return self._anchor_units @anchor_units.setter def anchor_units(self, anchor_units): self._anchor_units = anchor_units @property def anchor_units_metadata(self): return self._anchor_units_metadata @anchor_units_metadata.setter def anchor_units_metadata(self, anchor_units_metadata): self._anchor_units_metadata = anchor_units_metadata @property def anchor_x_offset(self): return self._anchor_x_offset @anchor_x_offset.setter def anchor_x_offset(self, anchor_x_offset): self._anchor_x_offset = anchor_x_offset @property def anchor_x_offset_metadata(self): return self._anchor_x_offset_metadata @anchor_x_offset_metadata.setter def anchor_x_offset_metadata(self, anchor_x_offset_metadata): self._anchor_x_offset_metadata = anchor_x_offset_metadata @property
MIT License
pwcazenave/pyfvcom
PyFVCOM/validation.py
TideDB.insert_tide_file
python
def insert_tide_file(self, file_list): for this_file in file_list: print('Inserting data from file: ' + this_file) this_file_obj = BODCAnnualTideFile(this_file) try: site_id = self.select_qry('sites', "site_tla == '" + this_file_obj.site_tla + "'", 'site_id')[0][0] except: try: current_id_max = np.max(self.select_qry('sites', None, 'site_id')) site_id = int(current_id_max + 1) except: site_id = 1 site_data = [(site_id, this_file_obj.site_tla, this_file_obj.site_name, this_file_obj.lon, this_file_obj.lat, '')] self.debug_data = site_data self.insert_into_table('sites', site_data) site_id_list = [site_id] * len(this_file_obj.seconds_from_ref) table_data = list(zip(site_id_list, this_file_obj.seconds_from_ref, this_file_obj.elevation_data, this_file_obj.elevation_flag, this_file_obj.residual_data, this_file_obj.residual_flag)) self.insert_into_table('gauge_obs', table_data)
Add data from a set of files to the database. Parameters ---------- file_list : list List of file names.
https://github.com/pwcazenave/pyfvcom/blob/46b00c66c3802f27d73c2c3009291c75b449aad6/PyFVCOM/validation.py#L378-L407
import datetime as dt import glob as gb import os import sqlite3 as sq import subprocess as sp import matplotlib.path as mplPath import matplotlib.pyplot as plt import numpy as np from pandas import read_hdf from PyFVCOM.grid import get_boundary_polygons, vincenty_distance from PyFVCOM.plot import Time, Plotter from PyFVCOM.read import FileReader from PyFVCOM.stats import calculate_coefficient, rmse SQL_UNIX_EPOCH = dt.datetime(1970, 1, 1, 0, 0, 0) class ValidationDB(object): def __init__(self, db_name): if db_name[-3:] != '.db': db_name += '.db' self.conn = sq.connect(db_name) self.create_table_sql = {} self.retrieve_data_sql = {} self.c = self.conn.cursor() def execute_sql(self, sql_str): self.c.execute(sql_str) return self.c.fetchall() def create_table(self, table_name, col_list): create_str = 'CREATE TABLE IF NOT EXISTS {table} ({cols});'.format(table=table_name, cols=', '.join(col_list)) self.execute_sql(create_str) def insert_into_table(self, table_name, data, column=None): data = np.asarray(data) if np.ndim(data) == 1: no_cols = len(data) no_rows = 1 data = data[np.newaxis, :] else: no_rows, no_cols = data.shape qs_string = '({})'.format(','.join('?' * no_cols)) if column is not None: column = '({})'.format(','.join(column)) else: column = '' if no_rows > 1: self.c.executemany('insert or ignore into {tab} {col} values {val}'.format(tab=table_name, col=column, val=qs_string), data) elif no_rows == 1: self.c.execute('insert into {tab} {col} values {val}'.format(tab=table_name, col=column, val=qs_string), data[0]) self.conn.commit() def select_qry(self, table_name, where_str, select_str='*', order_by_str=None, inner_join_str=None, group_by_str=None): qry_string = 'select {} from {}'.format(select_str, table_name) if inner_join_str: qry_string += ' inner join {}'.format(inner_join_str) if where_str: qry_string += ' where {}'.format(where_str) if order_by_str: qry_string += ' order by {}'.format(order_by_str) if group_by_str: qry_string += ' group by {}'.format(group_by_str) return self.execute_sql(qry_string) def table_exists(self, variable): pass def close_conn(self): self.conn.close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.conn.close() def dt_to_epochsec(time_to_convert): return (time_to_convert - SQL_UNIX_EPOCH).total_seconds() def epochsec_to_dt(time_to_convert): return SQL_UNIX_EPOCH + dt.timedelta(seconds=time_to_convert) def plot_map(fvcom, tide_db_path, threshold=np.inf, legend=False, **kwargs): tide_db = TideDB(tide_db_path) gauge_names, gauge_locations = tide_db.get_gauge_locations(long_names=True) gauges_in_domain = [] fvcom_nodes = [] for gi, gauge in enumerate(gauge_locations): river_index = fvcom.closest_node(gauge, threshold=threshold) if river_index: gauge_id, gauge_dist = tide_db.get_nearest_gauge_id(*gauge) times, data = tide_db.get_tidal_series(gauge_id, np.min(fvcom.time.datetime), np.max(fvcom.time.datetime)) if not np.any(data): continue gauges_in_domain.append(gi) fvcom_nodes.append(river_index) plot = Plotter(fvcom, **kwargs) fx, fy = plot.m(fvcom.grid.lon, fvcom.grid.lat) plot.plot_field(-fvcom.grid.h) plot.axes.plot(fx[fvcom_nodes], fy[fvcom_nodes], 'ro', markersize=3, zorder=202, label='Model') rx, ry = plot.m(gauge_locations[:, 0], gauge_locations[:, 1]) plot.axes.plot(rx, ry, 'wo', label='Gauges') for xx, yy, name in zip(rx, ry, gauge_names[gauges_in_domain]): plot.axes.text(xx, yy, name, fontsize=10, rotation=45, rotation_mode='anchor', zorder=203) if legend: plot.axes.legend(numpoints=1, scatterpoints=1, ncol=2, loc='upper center', fontsize=10) return plot def plot_tides(fvcom, db_name, threshold=500, figsize=(10, 10), **kwargs): tide_db = TideDB(db_name) gauge_names, gauge_locations = tide_db.get_gauge_locations(long_names=True) gauge_obs = {} gauges_in_domain = [] fvcom_nodes = [] for gi, gauge in enumerate(gauge_locations): river_index = fvcom.closest_node(gauge, threshold=threshold) if river_index: current_gauge = {} current_gauge['gauge_id'], current_gauge['gauge_dist'] = tide_db.get_nearest_gauge_id(*gauge) current_gauge['times'], current_gauge['data'] = tide_db.get_tidal_series(current_gauge['gauge_id'], np.min(fvcom.time.datetime), np.max(fvcom.time.datetime)) if not np.any(current_gauge['data']): continue current_gauge['lon'], current_gauge['lat'] = gauge_locations[gi, :] current_gauge['gauge_clean'] = current_gauge['data'][:, 1] == 0 current_gauge['gauge_obs_clean'] = {'times': np.copy(current_gauge['times'])[current_gauge['gauge_clean']], 'data': np.copy(current_gauge['data'])[current_gauge['gauge_clean'], 0]} current_gauge['rescale_zeta'] = fvcom.data.zeta[:, river_index] - np.mean(fvcom.data.zeta[:, river_index]) current_gauge['rescale_gauge_obs'] = current_gauge['gauge_obs_clean']['data'] - np.mean(current_gauge['gauge_obs_clean']['data']) current_gauge['dates_mod'] = np.isin(fvcom.time.datetime, current_gauge['gauge_obs_clean']['times']) current_gauge['dates_obs'] = np.isin(current_gauge['gauge_obs_clean']['times'], fvcom.time.datetime) if not np.any(current_gauge['dates_mod']) or not np.any(current_gauge['dates_obs']): continue current_gauge['r'], current_gauge['p'] = calculate_coefficient(current_gauge['rescale_zeta'][current_gauge['dates_mod']], current_gauge['rescale_gauge_obs'][current_gauge['dates_obs']]) current_gauge['rms'] = rmse(current_gauge['rescale_zeta'][current_gauge['dates_mod']], current_gauge['rescale_gauge_obs'][current_gauge['dates_obs']]) current_gauge['std'] = np.std(current_gauge['rescale_zeta'][current_gauge['dates_mod']] - current_gauge['rescale_gauge_obs'][current_gauge['dates_obs']]) gauges_in_domain.append(gi) fvcom_nodes.append(river_index) name = gauge_names[gi] gauge_obs[name] = current_gauge del current_gauge tide_db.close_conn() if len(gauge_obs) > 5: cols = np.ceil(len(gauge_obs) ** (1.0 / 3)).astype(int) + 1 else: cols = 1 rows = np.ceil(len(gauge_obs) / cols).astype(int) fig = plt.figure(figsize=figsize) for count, site in enumerate(sorted(gauge_obs)): ax = fig.add_subplot(rows, cols, count + 1) time = Time(fvcom, figure=fig, axes=ax, hold=True, **kwargs) time.plot_line(gauge_obs[site]['rescale_zeta'], label='Model', color='k') time.axes.plot(gauge_obs[site]['gauge_obs_clean']['times'], gauge_obs[site]['rescale_gauge_obs'], label='Gauge', color='m') time.axes.set_xlim(fvcom.time.datetime.min(), fvcom.time.datetime.max()) time.axes.set_ylim(np.min((gauge_obs[site]['rescale_gauge_obs'].min(), gauge_obs[site]['rescale_zeta'].min())), np.max((gauge_obs[site]['rescale_gauge_obs'].max(), gauge_obs[site]['rescale_zeta'].max()))) time.axes.set_title(site) return time, gauge_obs def _make_normal_tide_series(h_series): height_series = h_series - np.mean(h_series) return height_series class TideDB(ValidationDB): def make_bodc_tables(self): self._add_sql_strings() for this_key, this_val in self.bodc_tables.items(): self.create_table(this_key, this_val) error_data = [(0, '', 'No error'), (1, 'M', 'Improbable value flagged by QC'), (2, 'N', 'Null Value'), (3, 'T', 'Value interpolated from adjacent values')] self.insert_into_table('error_flags', error_data)
MIT License
cleancut/green
green/process.py
poolRunner
python
def poolRunner( target, queue, coverage_number=None, omit_patterns=[], cov_config_file=True ): saved_tempdir = tempfile.tempdir tempfile.tempdir = tempfile.mkdtemp() def raise_internal_failure(msg): err = sys.exc_info() t = ProtoTest() t.module = "green.loader" t.class_name = "N/A" t.description = msg t.method_name = "poolRunner" result.startTest(t) result.addError(t, err) result.stopTest(t) queue.put(result) cleanup() def cleanup(): tempfile.tempdir = saved_tempdir queue.put(None) if coverage_number: cov.stop() cov.save() if coverage_number: cov = coverage.coverage( data_file=".coverage.{}_{}".format( coverage_number, random.randint(0, 10000) ), omit=omit_patterns, config_file=cov_config_file, ) cov._warn_no_data = False cov.start() already_sent = set() def start_callback(test): test = proto_test(test) if test not in already_sent: queue.put(test) already_sent.add(test) def finalize_callback(test_result): queue.put(test_result) result = ProtoTestResult(start_callback, finalize_callback) test = None try: loader = GreenTestLoader() test = loader.loadTargets(target) except: raise_internal_failure("Green encountered an error loading the unit test.") return if getattr(test, "run", False): try: test.run(result) if ( result and (not result.finalize_callback_called) and getattr(result, "errors", False) ): queue.put(test) queue.put(result) except: if result.errors: queue.put(result) else: try: err = sys.exc_info() result.startTest(test) result.addError(test, err) result.stopTest(test) queue.put(result) except: raise_internal_failure( "Green encountered an error when running the test." ) return else: description = ( 'Test loader returned an un-runnable object. Is "{}" ' "importable from your current location? Maybe you " "forgot an __init__.py in your directory? Unrunnable " "object looks like: {} of type {} with dir {}".format( target, str(test), type(test), dir(test) ) ) err = (TypeError, TypeError(description), None) t = ProtoTest() target_list = target.split(".") t.module = ".".join(target_list[:-2]) if len(target_list) > 1 else target t.class_name = target.split(".")[-2] if len(target_list) > 1 else "UnknownClass" t.description = description t.method_name = ( target.split(".")[-1] if len(target_list) > 1 else "unknown_method" ) result.startTest(t) result.addError(t, err) result.stopTest(t) queue.put(result) cleanup()
I am the function that pool worker processes run. I run one unit test. coverage_config_file is a special option that is either a string specifying the custom coverage config file or the special default value True (which causes coverage to search for it's standard config files).
https://github.com/cleancut/green/blob/55625649869d44f8c9577f5f10626b1cbdcc48ad/green/process.py#L345-L478
from __future__ import unicode_literals import logging import multiprocessing from multiprocessing.pool import Pool, RUN, TERMINATE import platform import random import shutil import sys import tempfile import threading import traceback import coverage from green.exceptions import InitializerOrFinalizerError from green.loader import GreenTestLoader from green.result import proto_test, ProtoTest, ProtoTestResult def ddebug(msg, err=None): import os if err: err = "".join(traceback.format_exception(*err)) else: err = "" sys.__stdout__.write("({}) {} {}".format(os.getpid(), msg, err) + "\n") sys.__stdout__.flush() class ProcessLogger(object): def __init__(self, callable): self.__callable = callable def __call__(self, *args, **kwargs): try: result = self.__callable(*args, **kwargs) except Exception: logger = multiprocessing.get_logger() if not logger.handlers: logger.addHandler(logging.StreamHandler()) logger.error(traceback.format_exc()) logger.handlers[0].flush() raise return result class LoggingDaemonlessPool37(Pool): @staticmethod def Process(*args, **kwargs): kwargs["daemon"] = False return multiprocessing.Process(*args, **kwargs) def apply_async(self, func, args=(), kwds={}, callback=None): return Pool.apply_async(self, ProcessLogger(func), args, kwds, callback) _wrap_exception = True def __init__( self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None, finalizer=None, finalargs=(), ): self._finalizer = finalizer self._finalargs = finalargs super(LoggingDaemonlessPool37, self).__init__( processes, initializer, initargs, maxtasksperchild ) def _repopulate_pool(self): for i in range(self._processes - len(self._pool)): w = self.Process( target=worker, args=( self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, self._finalizer, self._finalargs, ), ) self._pool.append(w) w.name = w.name.replace("Process", "PoolWorker") w.start() util.debug("added worker") class LoggingDaemonlessPool38(Pool): @staticmethod def Process(ctx, *args, **kwds): process = ctx.Process(daemon=False, *args, **kwds) return process def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): return Pool.apply_async( self, ProcessLogger(func), args, kwds, callback, error_callback ) _wrap_exception = True def __init__( self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None, finalizer=None, finalargs=(), ): self._finalizer = finalizer self._finalargs = finalargs super(LoggingDaemonlessPool38, self).__init__( processes, initializer, initargs, maxtasksperchild, context ) def _repopulate_pool(self): return self._repopulate_pool_static( self._ctx, self.Process, self._processes, self._pool, self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception, self._finalizer, self._finalargs, ) @staticmethod def _repopulate_pool_static( ctx, Process, processes, pool, inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, finalizer, finalargs, ): for i in range(processes - len(pool)): w = Process( ctx, target=worker, args=( inqueue, outqueue, initializer, initargs, maxtasksperchild, wrap_exception, finalizer, finalargs, ), ) w.name = w.name.replace("Process", "PoolWorker") w.start() pool.append(w) util.debug("added worker") LoggingDaemonlessPool = LoggingDaemonlessPool38 if tuple(map(int, platform.python_version_tuple()[:2])) < (3, 8): LoggingDaemonlessPool = LoggingDaemonlessPool37 import platform import multiprocessing.pool from multiprocessing import util try: from multiprocessing.pool import MaybeEncodingError except: class MaybeEncodingError(Exception): def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<MaybeEncodingError: %s>" % str(self) if platform.python_version_tuple()[0] == "2": PortableOSError = IOError else: PortableOSError = OSError def worker( inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False, finalizer=None, finalargs=(), ): assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) put = outqueue.put get = inqueue.get if hasattr(inqueue, "_writer"): inqueue._writer.close() outqueue._reader.close() if initializer is not None: try: initializer(*initargs) except InitializerOrFinalizerError as e: print(str(e)) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, PortableOSError): util.debug("worker got EOFError or OSError -- exiting") break if task is None: util.debug("worker got sentinel -- exiting") break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % (wrapped)) put((job, i, (False, wrapped))) completed += 1 if finalizer: try: finalizer(*finalargs) except InitializerOrFinalizerError as e: print(str(e)) util.debug("worker exiting after %d tasks" % completed) class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = "".join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc multiprocessing.pool.worker = worker
MIT License
camelot-dev/camelot
camelot/plotting.py
PlotMethods.grid
python
def grid(self, table): fig = plt.figure() ax = fig.add_subplot(111, aspect="equal") for row in table.cells: for cell in row: if cell.left: ax.plot([cell.lb[0], cell.lt[0]], [cell.lb[1], cell.lt[1]]) if cell.right: ax.plot([cell.rb[0], cell.rt[0]], [cell.rb[1], cell.rt[1]]) if cell.top: ax.plot([cell.lt[0], cell.rt[0]], [cell.lt[1], cell.rt[1]]) if cell.bottom: ax.plot([cell.lb[0], cell.rb[0]], [cell.lb[1], cell.rb[1]]) return fig
Generates a plot for the detected table grids on the PDF page. Parameters ---------- table : camelot.core.Table Returns ------- fig : matplotlib.fig.Figure
https://github.com/camelot-dev/camelot/blob/644bbe7c6d57b95aefa2f049a9aacdbc061cc04f/camelot/plotting.py#L74-L99
try: import matplotlib.pyplot as plt import matplotlib.patches as patches except ImportError: _HAS_MPL = False else: _HAS_MPL = True class PlotMethods(object): def __call__(self, table, kind="text", filename=None): if not _HAS_MPL: raise ImportError("matplotlib is required for plotting.") if table.flavor == "lattice" and kind in ["textedge"]: raise NotImplementedError(f"Lattice flavor does not support kind='{kind}'") elif table.flavor == "stream" and kind in ["joint", "line"]: raise NotImplementedError(f"Stream flavor does not support kind='{kind}'") plot_method = getattr(self, kind) fig = plot_method(table) if filename is not None: fig.savefig(filename) return None return fig def text(self, table): fig = plt.figure() ax = fig.add_subplot(111, aspect="equal") xs, ys = [], [] for t in table._text: xs.extend([t[0], t[2]]) ys.extend([t[1], t[3]]) ax.add_patch(patches.Rectangle((t[0], t[1]), t[2] - t[0], t[3] - t[1])) ax.set_xlim(min(xs) - 10, max(xs) + 10) ax.set_ylim(min(ys) - 10, max(ys) + 10) return fig
MIT License