repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
msu-coinlab/pymop
pymop/problem.py
Problem.pareto_set
python
def pareto_set(self, *args, **kwargs): if self._pareto_set is None: self._pareto_set = self._calc_pareto_set(*args, **kwargs) return self._pareto_set
Returns ------- S : np.array Returns the pareto set for a problem. Points in the X space to be known to be optimal!
https://github.com/msu-coinlab/pymop/blob/7b7e789e640126c6d254e86ede5d7f4baad7eaa5/pymop/problem.py#L109-L119
import warnings from abc import abstractmethod import autograd import autograd.numpy as anp import numpy as np from pymop.gradient import run_and_trace, calc_jacobian class Problem: def __init__(self, n_var=-1, n_obj=-1, n_constr=0, xl=None, xu=None, type_var=np.double, evaluation_of="auto"): self.n_var = n_var self.type_var = type_var self.n_obj = n_obj self.n_constr = n_constr if n_var > 0 and isinstance(xl, int) and isinstance(xu, int): self.xl = xl if type(xl) is np.ndarray else np.ones(n_var) * xl self.xu = xu if type(xu) is np.ndarray else np.ones(n_var) * xu else: self.xl = xl self.xu = xu self._pareto_front = None self._pareto_set = None if evaluation_of == "auto": self.evaluation_of = ["F"] if self.n_constr > 0: self.evaluation_of.append("G") else: self.evaluation_of = evaluation_of def nadir_point(self): return np.max(self.pareto_front(), axis=0) def ideal_point(self): return np.min(self.pareto_front(), axis=0) def pareto_front(self, *args, **kwargs): if self._pareto_front is None: self._pareto_front = self._calc_pareto_front(*args, **kwargs) return self._pareto_front
Apache License 2.0
spulec/moto
moto/batch/models.py
Job.__init__
python
def __init__( self, name, job_def, job_queue, log_backend, container_overrides, depends_on, all_jobs, ): threading.Thread.__init__(self) DockerModel.__init__(self) self.job_name = name self.job_id = str(uuid.uuid4()) self.job_definition = job_def self.container_overrides = container_overrides or {} self.job_queue = job_queue self.job_state = "SUBMITTED" self.job_queue.jobs.append(self) self.job_started_at = datetime.datetime(1970, 1, 1) self.job_stopped_at = datetime.datetime(1970, 1, 1) self.job_stopped = False self.job_stopped_reason = None self.depends_on = depends_on self.all_jobs = all_jobs self.stop = False self.daemon = True self.name = "MOTO-BATCH-" + self.job_id self._log_backend = log_backend self.log_stream_name = None
Docker Job :param name: Job Name :param job_def: Job definition :type: job_def: JobDefinition :param job_queue: Job Queue :param log_backend: Log backend :type log_backend: moto.logs.models.LogsBackend
https://github.com/spulec/moto/blob/7240e8f9657601e8e6e1b3e1f923dd28819cf7b9/moto/batch/models.py#L324-L367
import re from itertools import cycle import datetime import time import uuid import logging import docker import threading import dateutil.parser from boto3 import Session from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.iam import iam_backends from moto.ec2 import ec2_backends from moto.ecs import ecs_backends from moto.logs import logs_backends from .exceptions import InvalidParameterValueException, ClientException, ValidationError from .utils import ( make_arn_for_compute_env, make_arn_for_job_queue, make_arn_for_task_def, lowercase_first_key, ) from moto.ec2.exceptions import InvalidSubnetIdError from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES from moto.iam.exceptions import IAMNotFoundException from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID from moto.utilities.docker_utilities import DockerModel, parse_image_ref from ..utilities.tagging_service import TaggingService logger = logging.getLogger(__name__) COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile( r"^[A-Za-z0-9][A-Za-z0-9_-]{1,126}[A-Za-z0-9]$" ) def datetime2int(date): return int(time.mktime(date.timetuple())) class ComputeEnvironment(CloudFormationModel): def __init__( self, compute_environment_name, _type, state, compute_resources, service_role, region_name, ): self.name = compute_environment_name self.env_type = _type self.state = state self.compute_resources = compute_resources self.service_role = service_role self.arn = make_arn_for_compute_env( DEFAULT_ACCOUNT_ID, compute_environment_name, region_name ) self.instances = [] self.ecs_arn = None self.ecs_name = None def add_instance(self, instance): self.instances.append(instance) def set_ecs(self, arn, name): self.ecs_arn = arn self.ecs_name = name @property def physical_resource_id(self): return self.arn @staticmethod def cloudformation_name_type(): return "ComputeEnvironmentName" @staticmethod def cloudformation_type(): return "AWS::Batch::ComputeEnvironment" @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): backend = batch_backends[region_name] properties = cloudformation_json["Properties"] env = backend.create_compute_environment( resource_name, properties["Type"], properties.get("State", "ENABLED"), lowercase_first_key(properties["ComputeResources"]), properties["ServiceRole"], ) arn = env[1] return backend.get_compute_environment_by_arn(arn) class JobQueue(CloudFormationModel): def __init__( self, name, priority, state, environments, env_order_json, region_name ): self.name = name self.priority = priority self.state = state self.environments = environments self.env_order_json = env_order_json self.arn = make_arn_for_job_queue(DEFAULT_ACCOUNT_ID, name, region_name) self.status = "VALID" self.jobs = [] def describe(self): result = { "computeEnvironmentOrder": self.env_order_json, "jobQueueArn": self.arn, "jobQueueName": self.name, "priority": self.priority, "state": self.state, "status": self.status, } return result @property def physical_resource_id(self): return self.arn @staticmethod def cloudformation_name_type(): return "JobQueueName" @staticmethod def cloudformation_type(): return "AWS::Batch::JobQueue" @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): backend = batch_backends[region_name] properties = cloudformation_json["Properties"] compute_envs = [ lowercase_first_key(dict_item) for dict_item in properties["ComputeEnvironmentOrder"] ] queue = backend.create_job_queue( queue_name=resource_name, priority=properties["Priority"], state=properties.get("State", "ENABLED"), compute_env_order=compute_envs, ) arn = queue[1] return backend.get_job_queue_by_arn(arn) class JobDefinition(CloudFormationModel): def __init__( self, name, parameters, _type, container_properties, region_name, tags={}, revision=0, retry_strategy=0, ): self.name = name self.retries = retry_strategy self.type = _type self.revision = revision self._region = region_name self.container_properties = container_properties self.arn = None self.status = "ACTIVE" self.tagger = TaggingService() if parameters is None: parameters = {} self.parameters = parameters self._validate() self._update_arn() tags = self._format_tags(tags) errmsg = self.tagger.validate_tags(tags or []) if errmsg: raise ValidationError(errmsg) self.tagger.tag_resource(self.arn, tags or []) def _format_tags(self, tags): return [{"Key": k, "Value": v} for k, v in tags.items()] def _update_arn(self): self.revision += 1 self.arn = make_arn_for_task_def( DEFAULT_ACCOUNT_ID, self.name, self.revision, self._region ) def _validate(self): if self.type not in ("container",): raise ClientException('type must be one of "container"') if self.type != "container": raise NotImplementedError() if not isinstance(self.parameters, dict): raise ClientException("parameters must be a string to string map") if "image" not in self.container_properties: raise ClientException("containerProperties must contain image") if "memory" not in self.container_properties: raise ClientException("containerProperties must contain memory") if self.container_properties["memory"] < 4: raise ClientException("container memory limit must be greater than 4") if "vcpus" not in self.container_properties: raise ClientException("containerProperties must contain vcpus") if self.container_properties["vcpus"] < 1: raise ClientException("container vcpus limit must be greater than 0") def update(self, parameters, _type, container_properties, retry_strategy): if parameters is None: parameters = self.parameters if _type is None: _type = self.type if container_properties is None: container_properties = self.container_properties if retry_strategy is None: retry_strategy = self.retries return JobDefinition( self.name, parameters, _type, container_properties, region_name=self._region, revision=self.revision, retry_strategy=retry_strategy, ) def describe(self): result = { "jobDefinitionArn": self.arn, "jobDefinitionName": self.name, "parameters": self.parameters, "revision": self.revision, "status": self.status, "type": self.type, "tags": self.tagger.get_tag_dict_for_resource(self.arn), } if self.container_properties is not None: result["containerProperties"] = self.container_properties if self.retries is not None and self.retries > 0: result["retryStrategy"] = {"attempts": self.retries} return result @property def physical_resource_id(self): return self.arn @staticmethod def cloudformation_name_type(): return "JobDefinitionName" @staticmethod def cloudformation_type(): return "AWS::Batch::JobDefinition" @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): backend = batch_backends[region_name] properties = cloudformation_json["Properties"] res = backend.register_job_definition( def_name=resource_name, parameters=lowercase_first_key(properties.get("Parameters", {})), _type="container", tags=lowercase_first_key(properties.get("Tags", {})), retry_strategy=lowercase_first_key(properties["RetryStrategy"]), container_properties=lowercase_first_key(properties["ContainerProperties"]), ) arn = res[1] return backend.get_job_definition_by_arn(arn) class Job(threading.Thread, BaseModel, DockerModel):
Apache License 2.0
peopledoc/django-agnocomplete
agnocomplete/views.py
UserContextFormViewMixin.get_agnocomplete_context
python
def get_agnocomplete_context(self): return self.request.user
Return the view current user. You may want to change this value by overrding this method.
https://github.com/peopledoc/django-agnocomplete/blob/1ef67a0a808cfe61c6d1ac5ec449ee1a0f0246e8/agnocomplete/views.py#L114-L120
from six import with_metaclass from abc import abstractmethod, ABCMeta from django.core.exceptions import PermissionDenied, SuspiciousOperation from django.http import Http404, JsonResponse from django.utils.encoding import force_text as text from django.utils.functional import cached_property from django.views.generic import View from .register import get_agnocomplete_registry from .exceptions import ( AuthenticationRequiredAgnocompleteException, ImproperlyConfiguredView ) from requests.exceptions import HTTPError, Timeout def get_error(exc): if isinstance(exc, HTTPError): return exc.response.status_code, text(exc.response.content) if isinstance(exc, Timeout): return 408, exc if isinstance(exc, Http404): return 404, exc if isinstance(exc, PermissionDenied): return 403, exc if isinstance(exc, SuspiciousOperation): return 400, exc return 500, exc class AgnocompleteJSONView(with_metaclass(ABCMeta, View)): @property def content_type(self): if 'HTTP_X_REQUESTED_WITH' in self.request.META: return "application/json;charset=utf-8" else: return "text/html" @abstractmethod def get_dataset(self, **kwargs): pass def get_extra_arguments(self): extra = filter(lambda x: x[0] != 'q', self.request.GET.items()) return dict(extra) def get(self, *args, **kwargs): try: dataset = self.get_dataset(**self.get_extra_arguments()) return JsonResponse( {'data': dataset}, content_type=self.content_type, ) except Exception as exc: status, message = get_error(exc) return JsonResponse( {"errors": [{ "title": "An error has occurred", "detail": "{}".format(message) }]}, content_type=self.content_type, status=status, ) class RegistryMixin(object): @cached_property def registry(self): return get_agnocomplete_registry() class UserContextFormViewMixin(object):
MIT License
mcw0/dahuaconsole
net.py
Network.__init__
python
def __init__(self): super(Network, self).__init__() self.args = None """ If we don't have own udp server running in main app, will be False and we do not send anything """ self.tcp_server = None self.console_attach = None self.DeviceClass = None self.DeviceType = None self.AuthCode = None self.ErrorCode = None self.ID = 0 self.SessionID = 0 self.header = None self.instance_serviceDB = {} self.multicall_query_args = [] self.multicall_query = [] self.multicall_return_check = None self.fuzzDB = {} self.RestoreEventHandler = {} self.params_tmp = {} self.attachParamsTMP = [] self.RemoteServicesCache = {} self.RemoteMethodsCache = {} self.RemoteConfigCache = {} self.rhost = None self.rport = None self.proto = None self.events = None self.ssl = None self.relay_host = None self.timeout = None self.udp_server = None self.proto = None self.relay = None self.remote = None self.debug = None self.debugCalls = None self.event = threading.Event() self.socket_event = threading.Event() self.lock = threading.Lock() self.recv_stream_status = threading.Event() self.terminate = False
If we don't have own udp server running in main app, will be False and we do not send anything
https://github.com/mcw0/dahuaconsole/blob/976dbaa6e5cbbe09413a9476eb67e5a9d7e4d585/net.py#L31-L86
import ast import ndjson import copy import inspect import _thread from utils import * from pwdmanager import PwdManager from relay import init_relay, DahuaHttp def dahua_proto(proto): headers = [ b'\xa0\x00', b'\xa0\x01', b'\xa0\x05', b'\xb0\x00', b'\xb0\x01', b'\xa3\x01', b'\xb3\x00', b'\xf6\x00', ] if proto[:2] in headers: return True return False class Network(object):
MIT License
seecode-audit/clocwalk
clocwalk/libs/core/update_mysql.py
Upgrade.__init__
python
def __init__(self, proxies=None, upgrade_interval_day='7d', http_timeout=15): self.http_timeout = int(http_timeout) self.cve_path = paths.CVE_PATH self.cve_cpe_db = paths.DB_FILE self.cpe_file = os.path.join(self.cve_path, 'nvdcpematch-1.0.json') interval_type = re.search(r'(\d+)(\w)', upgrade_interval_day) if interval_type and interval_type.group(2) in ('d', 'h'): if interval_type.group(2) == 'd': self.upgrade_interval = 60 * 60 * 24 * int(interval_type.group(1)) elif interval_type.group(2) == 'h': self.upgrade_interval = 60 * 60 * int(interval_type.group(1)) else: self.upgrade_interval = 60 * 60 * 24 * 7 self.headers = { "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3", "accept-encoding": "gzip, deflate, br", "accept-language": "en;q=0.9", "connection": "keep-alive", "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108" } self.headers.update(conf['http']['headers']) self.pool = ThreadPool(10) logger.info('Proxies: {0}'.format(proxies)) self.proxies = proxies
:param proxies: :param upgrade_interval_day: :param http_timeout:
https://github.com/seecode-audit/clocwalk/blob/9ae41a841c159212ee1da25d97c93f9daa3c1d5c/clocwalk/libs/core/update_mysql.py#L27-L57
import datetime import glob import gzip import json import os import re import shutil import time import gevent import requests from gevent.threadpool import ThreadPool from clocwalk.libs.core.data import conf from clocwalk.libs.core.data import kb from clocwalk.libs.core.data import logger from clocwalk.libs.core.data import paths from clocwalk.libs.core.mysql_helper import MySQLHelper from clocwalk.libs.detector.cvecpe import cpe_parse class Upgrade(object):
Apache License 2.0
facebookresearch/crypten
crypten/communicator/communicator.py
Communicator.broadcast
python
def broadcast(self, tensor, src, async_op=False): raise NotImplementedError("broadcast is not implemented")
Broadcasts the tensor to all parties.
https://github.com/facebookresearch/crypten/blob/3f190af0ab2932fdbdcd3c53fdb33d27cb706684/crypten/communicator/communicator.py#L66-L68
import sys import timeit from crypten.config import cfg class Communicator: @classmethod def is_initialized(cls): raise NotImplementedError("is_initialized is not implemented") @classmethod def get(cls): raise NotImplementedError("get is not implemented") @classmethod def initialize(cls, **kwargs): raise NotImplementedError("initialize is not implemented") @classmethod def shutdown(cls): raise NotImplementedError("shutdown is not implemented") def send(self, tensor, dst): raise NotImplementedError("send is not implemented") def recv(self, tensor, src=None): raise NotImplementedError("recv is not implemented") def scatter(self, scatter_list, src, size=None, async_op=False): raise NotImplementedError("scatter is not implemented") def reduce(self, tensor, op=None, async_op=False): raise NotImplementedError("tensor is not implemented") def all_reduce(self, tensor, op=None, async_op=False): raise NotImplementedError("tensor is not implemented") def gather(self, tensor, dst, async_op=False): raise NotImplementedError("gather is not implemented") def all_gather(self, tensor, async_op=False): raise NotImplementedError("all_gather is not implemented")
MIT License
zachchristensen28/ta-opnsense
bin/ta_opnsense/aob_py3/jinja2/utils.py
object_type_repr
python
def object_type_repr(obj): if obj is None: return "None" elif obj is Ellipsis: return "Ellipsis" cls = type(obj) if cls.__module__ in ("__builtin__", "builtins"): name = cls.__name__ else: name = cls.__module__ + "." + cls.__name__ return "%s object" % name
Returns the name of the object's type. For some recognized singletons the name of the object is returned instead. (For example for `None` and `Ellipsis`).
https://github.com/zachchristensen28/ta-opnsense/blob/fc736f4c6f0fa7866b4f6d2dcf9761b6b693d6cf/bin/ta_opnsense/aob_py3/jinja2/utils.py#L147-L165
import json import os import re import warnings from collections import deque from random import choice from random import randrange from string import ascii_letters as _letters from string import digits as _digits from threading import Lock from markupsafe import escape from markupsafe import Markup from ._compat import abc from ._compat import string_types from ._compat import text_type from ._compat import url_quote missing = type("MissingType", (), {"__repr__": lambda x: "missing"})() internal_code = set() concat = u"".join _slash_escape = "\\/" not in json.dumps("/") def contextfunction(f): f.contextfunction = True return f def evalcontextfunction(f): f.evalcontextfunction = True return f def environmentfunction(f): f.environmentfunction = True return f def internalcode(f): internal_code.add(f.__code__) return f def is_undefined(obj): from .runtime import Undefined return isinstance(obj, Undefined) def consume(iterable): for _ in iterable: pass def clear_caches(): from .environment import _spontaneous_environments from .lexer import _lexer_cache _spontaneous_environments.clear() _lexer_cache.clear() def import_string(import_name, silent=False): try: if ":" in import_name: module, obj = import_name.split(":", 1) elif "." in import_name: module, _, obj = import_name.rpartition(".") else: return __import__(import_name) return getattr(__import__(module, None, None, [obj]), obj) except (ImportError, AttributeError): if not silent: raise def open_if_exists(filename, mode="rb"): if not os.path.isfile(filename): return None return open(filename, mode)
MIT License
bandoche/pypinksign
pypinksign/pypinksign.py
pbkdf1
python
def pbkdf1(password: bytes, salt: bytes, c: int = 1200, dk_len: int = 20): dk_max_len = hashlib.sha1().digest_size if dk_len > dk_max_len: raise ValueError("derived key too long") if len(salt) != 8: raise ValueError('Salt should be 8 bytes') t = sha1(password + salt).digest() for _ in range(2, c + 1): t = sha1(t).digest() return t[:dk_len]
From PKCS#5 2.0 sect 5.1 PBKDF1 (P, S, c, dkLen) Options: Hash underlying hash function Input: P password, an octet string S salt, an eight-octet string c iteration count, a positive integer dkLen intended length in octets of derived key, a positive integer, at most 16 for MD2 or MD5 and 20 for SHA-1 Output: DK derived key, a dkLen-octet string
https://github.com/bandoche/pypinksign/blob/ed1609a9bfdffbfff94cebbacae0984b90c03cad/pypinksign/pypinksign.py#L567-L589
import base64 import hashlib import logging import os import random from datetime import datetime from hashlib import sha1 from os.path import expanduser from sys import platform as _platform from cryptography import x509 from cryptography.exceptions import InvalidSignature, UnsupportedAlgorithm from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import padding, hashes, serialization from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers, RSAPrivateNumbers, rsa_crt_iqmp, rsa_crt_dmp1, rsa_crt_dmq1, RSAPublicKey, RSAPrivateKey from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC from cryptography.hazmat.primitives.serialization import pkcs12 from pyasn1.codec.der import decoder as der_decoder from pyasn1.codec.der import encoder as der_encoder from pyasn1.codec.der.encoder import encode from pyasn1.type import tag from pyasn1.type.namedtype import NamedTypes, NamedType from pyasn1.type.univ import Sequence, Integer, OctetString, ObjectIdentifier, Set, BitString, Null ID_SEED_CBC = (1, 2, 410, 200004, 1, 4) ID_SEED_CBC_WITH_SHA1 = (1, 2, 410, 200004, 1, 15) ID_PBES2 = (1, 2, 840, 113549, 1, 5, 13) ID_PKCS7_ENVELOPED_DATA = (1, 2, 840, 113549, 1, 7, 3) ID_PKCS1_ENCRYPTION = (1, 2, 840, 113549, 1, 1, 1) ID_KISA_NPKI_RAND_NUM = (1, 2, 410, 200004, 10, 1, 1, 3) class PinkSign: def __init__(self, pubkey_path: str = None, pubkey_data: bytes = None, prikey_path: str = None, prikey_data: bytes = None, prikey_password: bytes = None, p12_path: str = None, p12_data: bytes = None): self.pubkey_path = pubkey_path self.prikey_path = prikey_path self.prikey_data = prikey_data if prikey_password: if isinstance(prikey_password, str): logging.warning("Please use bytes for passphrase") prikey_password = str.encode(prikey_password) self.prikey_password = prikey_password self.p12_path = p12_path self.p12_data = p12_data self.pub_cert = None self.prikey: RSAPrivateKey = None self.pub_data: bytes = None self.pubkey: RSAPublicKey = None self.rand_num = None if p12_path is not None: self.load_p12() elif p12_data is not None: self.load_p12(p12_data=p12_data) else: if pubkey_path is not None: self.load_pubkey() elif pubkey_data is not None: self.load_pubkey(pubkey_data=pubkey_data) if prikey_path is not None and prikey_password is not None: self.load_prikey() return def load_pubkey(self, pubkey_path: str = None, pubkey_data: bytes = None) -> None: if not any([self.pubkey_path, pubkey_path, pubkey_data]): raise ValueError("Neither pubkey_path nor pubkey_data is exist.") if pubkey_data is not None: self.pub_data = pubkey_data else: if pubkey_path is not None: self.pubkey_path = pubkey_path self.pub_data = open(self.pubkey_path, 'rb').read() self.pub_cert = x509.load_der_x509_certificate(self.pub_data, default_backend()) self.pubkey = self.pub_cert.public_key() return def load_prikey(self, prikey_path: str = None, prikey_data: bytes = None, prikey_password: str = None) -> None: if self.pubkey is None: raise ValueError("public key file should be loaded before load private key.") if not any([self.prikey_path, prikey_path, self.prikey_data, prikey_data]): raise ValueError("prikey_path(prikey_data) is not defined.") if not any([self.prikey_password, prikey_password]): raise ValueError("prikey_password is not defined.") if prikey_path is not None: self.prikey_path = prikey_path if prikey_data is not None: self.prikey_data = prikey_data if prikey_password is not None: self.prikey_password = prikey_password if self.prikey_path is not None: d = open(self.prikey_path, 'rb').read() else: d = self.prikey_data der = der_decoder.decode(d)[0] algorithm_type = der[0][0].asTuple() private_key_decryption_key_functions = { ID_SEED_CBC_WITH_SHA1: self.get_private_key_decryption_key_for_seed_cbc_with_sha1, ID_SEED_CBC: self.get_private_key_decryption_key_for_seed_cbc, ID_PBES2: self.get_private_key_decryption_key_for_pbes2, } if algorithm_type not in private_key_decryption_key_functions.keys(): raise ValueError("prikey is not correct K-PKI private key file") k, iv = private_key_decryption_key_functions[algorithm_type](der) cipher_key = der[1].asOctets() prikey_data = seed_cbc_128_decrypt(k, cipher_key, iv) self._load_prikey_with_decrypted_data(decrypted_prikey_data=prikey_data) return def _load_prikey_with_decrypted_data(self, decrypted_prikey_data: bytes) -> None: der_pri = der_decoder.decode(decrypted_prikey_data) der_pri2 = der_decoder.decode(der_pri[0][2]) (n, e, d, p, q) = (der_pri2[0][1], der_pri2[0][2], der_pri2[0][3], der_pri2[0][4], der_pri2[0][5]) (n, e, d, p, q) = (int(n), int(e), int(d), int(p), int(q)) iqmp = rsa_crt_iqmp(p, q) dmp1 = rsa_crt_dmp1(d, p) dmq1 = rsa_crt_dmq1(d, q) pn = RSAPublicNumbers(n=n, e=e) self.prikey = RSAPrivateNumbers(p=p, q=q, d=d, dmp1=dmp1, dmq1=dmq1, iqmp=iqmp, public_numbers=pn).private_key(backend=default_backend()) if len(der_pri[0]) > 3: self._rand_num = der_pri[0][3][1][0] return def load_p12(self, p12_data: bytes = None) -> None: if p12_data is None: p12_data = open(self.p12_path, 'rb').read() pubkey_data, prikey_data = separate_p12_into_npki(p12_data, self.prikey_password) self.load_pubkey(pubkey_data=pubkey_data) self._load_prikey_with_decrypted_data(decrypted_prikey_data=prikey_data) return def cn(self) -> str: if self.pub_cert is None: raise ValueError("Public key should be loaded before fetching DN.") for dn in self.pub_cert.subject.rdns: if dn.rfc4514_string().startswith('CN='): return dn.rfc4514_string()[3:] return '' def issuer(self) -> str: if self.pub_cert is None: raise ValueError("Public key should be loaded before fetching issuer.") for dn in self.pub_cert.issuer.rdns: if dn.rfc4514_string().startswith('O='): return dn.rfc4514_string()[2:] def cert_class(self) -> str: if self.pub_cert is None: raise ValueError("Public key should be loaded before fetching cert class.") for dn in self.pub_cert.issuer.rdns: if dn.rfc4514_string().startswith('CN='): return dn.rfc4514_string()[3:] def cert_type_oid(self) -> str: if self.pub_cert is None: raise ValueError("Public key should be loaded before fetching cert type.") for ext in self.pub_cert.extensions: if ext.oid.dotted_string == '2.5.29.32': return ext.value[0].policy_identifier.dotted_string def valid_date(self) -> (datetime, datetime): if self.pub_cert is None: raise ValueError("Public key should be loaded before fetching valid date.") return self.pub_cert.not_valid_before, self.pub_cert.not_valid_after def serialnum(self) -> int: if self.pub_cert is None: raise ValueError("Public key should be loaded before fetching serial number.") return self.pub_cert.serial_number def sign(self, msg: bytes, algorithm=hashes.SHA256(), padding_=PKCS1v15()): if self.prikey is None: raise ValueError("Private key is required for signing.") return self.prikey.sign(data=msg, padding=padding_, algorithm=algorithm) def verify(self, signature: bytes, msg: bytes, algorithm=hashes.SHA256(), padding_=PKCS1v15()) -> bool: if self.pubkey is None: raise ValueError("Public key is required for verification.") try: self.pubkey.verify(data=msg, signature=signature, padding=padding_, algorithm=algorithm) return True except InvalidSignature: return False except Exception as e: raise e def decrypt(self, msg, padding_=PKCS1v15()): if self.prikey is None: raise ValueError("Private key is required for decryption.") return self.prikey.decrypt(ciphertext=msg, padding=padding_) def encrypt(self, msg, padding_=PKCS1v15()): if self.pubkey is None: raise ValueError("Public key is required for encryption.") return self.pubkey.encrypt(msg, padding=padding_) def pkcs7_signed_msg(self, msg: bytes): signed = self.sign(msg) owner_cert_pub = der_decoder.decode(self.pub_data)[0] oi_pkcs7_signed = ObjectIdentifier((1, 2, 840, 113549, 1, 7, 2)) oi_pkcs7_data = ObjectIdentifier((1, 2, 840, 113549, 1, 7, 1)) oi_sha256 = ObjectIdentifier((2, 16, 840, 1, 101, 3, 4, 2, 1)) oi_pkcs7_rsa_enc = ObjectIdentifier((1, 2, 840, 113549, 1, 1, 1)) der = Sequence().setComponentByPosition(0, oi_pkcs7_signed) data = Sequence() data = data.setComponentByPosition(0, Integer(1)) data = data.setComponentByPosition( 1, Set().setComponentByPosition( 0, Sequence().setComponentByPosition( 0, oi_sha256).setComponentByPosition( 1, Null('')))) data = data.setComponentByPosition( 2, Sequence().setComponentByPosition( 0, oi_pkcs7_data).setComponentByPosition( 1, Sequence().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)). setComponentByPosition(0, OctetString(hexValue=msg.hex())))) data = data.setComponentByPosition(3, Sequence().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).setComponentByPosition(0, owner_cert_pub)) data4001 = Sequence().setComponentByPosition(0, owner_cert_pub[0][3]) data4001 = data4001.setComponentByPosition(1, owner_cert_pub[0][1]) data4002 = Sequence().setComponentByPosition(0, oi_sha256).setComponentByPosition(1, Null('')) data4003 = Sequence().setComponentByPosition(0, oi_pkcs7_rsa_enc).setComponentByPosition(1, Null('')) data4004 = OctetString(hexValue=signed.hex()) data = data.setComponentByPosition( 4, Set().setComponentByPosition( 0, Sequence().setComponentByPosition( 0, Integer(1)).setComponentByPosition( 1, data4001).setComponentByPosition( 2, data4002).setComponentByPosition( 3, data4003).setComponentByPosition( 4, data4004))) der = der.setComponentByPosition(1, Sequence().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).setComponentByPosition(0, data)) return der_encoder.encode(der) def get_private_key_decryption_key_for_seed_cbc_with_sha1(self, der: Sequence) -> (bytes, bytes): salt = der[0][1][0].asOctets() iter_cnt = int(der[0][1][1]) dk = pbkdf1(self.prikey_password, salt, iter_cnt, 20) k = dk[:16] div = hashlib.sha1(dk[16:20]).digest() iv = div[:16] return k, iv def get_private_key_decryption_key_for_seed_cbc(self, der: Sequence) -> (bytes, bytes): salt = der[0][1][0].asOctets() iter_cnt = int(der[0][1][1]) dk = pbkdf1(self.prikey_password, salt, iter_cnt, 20) k = dk[:16] iv = b"0123456789012345" return k, iv def get_private_key_decryption_key_for_pbes2(self, der: Sequence) -> (bytes, bytes): salt = der[0][1][0][1][0].asOctets() iter_cnt = int(der[0][1][0][1][1]) kdf = PBKDF2HMAC( algorithm=hashes.SHA1(), length=16, salt=salt, iterations=iter_cnt, backend=default_backend(), ) k = kdf.derive(self.prikey_password) iv = der[0][1][1][1].asOctets() return k, iv def get_npki_path(): if _platform == "linux" or _platform == "linux2": path = expanduser("~/NPKI/") elif _platform == "darwin": suspect = ["~/Documents/NPKI/", "~/NPKI/", "~/Library/Preferences/NPKI/"] for p in suspect: path = expanduser(p) if os.path.isdir(path): return path raise ValueError("can't find certificate folder") elif _platform == "win32": suspect = ["C:/Program Files/NPKI/", "~/AppData/LocalLow/NPKI/"] for p in suspect: path = expanduser(p) if os.path.isdir(path): return path raise ValueError("can't find certificate folder") else: path = expanduser("~/NPKI/") return path def choose_cert(base_path: str = None, cn: str = None, pw: str = None): cert_list = [] if base_path is not None: path = base_path else: path = get_npki_path() for root, dirs, files in os.walk(path): if root[-5:] == "/USER": for cert_dir in dirs: if cert_dir[:3] == "cn=": cert_path = "%s/%s" % (root, cert_dir) cert = PinkSign(pubkey_path="%s/signCert.der" % cert_path) cert.prikey_path = "%s/signPri.key" % cert_path if cn is not None: if cn in cert.cn(): if pw is not None: cert.load_prikey(prikey_path="%s/signPri.key" % cert_path, prikey_password=pw) return cert cert_list.append(cert) i = 1 for cert in cert_list: (cn, (valid_from, valid_until), issuer) = (cert.cn(), cert.valid_date(), cert.issuer()) print("[%d] %s (%s ~ %s) issued by %s" % (i, cn, valid_from, valid_until, issuer)) i += 1 i = int(input("Choose your certifiacte: ")) return cert_list[i - 1] def seed_cbc_128_encrypt(key: bytes, plaintext: bytes, iv: bytes = b'0123456789012345') -> bytes: try: return seed_cbc_128_encrypt_openssl(key, plaintext, iv) except UnsupportedAlgorithm: return seed_cbc_128_encrypt_pure(key, plaintext, iv) def seed_cbc_128_decrypt(key: bytes, ciphertext: bytes, iv: bytes = b'0123456789012345') -> bytes: try: return seed_cbc_128_decrypt_openssl(key, ciphertext, iv) except UnsupportedAlgorithm: return seed_cbc_128_decrypt_pure(key, ciphertext, iv) def seed_cbc_128_encrypt_openssl(key: bytes, plaintext: bytes, iv: bytes = b'0123456789012345') -> bytes: backend = default_backend() cipher = Cipher(algorithms.SEED(key), modes.CBC(iv), backend=backend) encryptor = cipher.encryptor() padder = padding.PKCS7(128).padder() padded_text = padder.update(plaintext) + padder.finalize() encrypted_text = encryptor.update(padded_text) return encrypted_text def byte_xor(ba1, ba2): return bytes([_a ^ _b for _a, _b in zip(ba1, ba2)]) def seed_cbc_128_encrypt_pure(key: bytes, plaintext: bytes, iv: bytes = b'0123456789012345') -> bytes: from . import process_block padder = padding.PKCS7(128).padder() padded_text = padder.update(plaintext) + padder.finalize() n = 16 chunks = [padded_text[i:i + n] for i in range(0, len(padded_text), n)] vector = iv result = b'' for ch in chunks: new_ch = byte_xor(ch, vector) result_ba = process_block(True, key, new_ch) vector = result_ba result += result_ba return result def seed_cbc_128_decrypt_openssl(key: bytes, ciphertext: bytes, iv: bytes = b'0123456789012345') -> bytes: backend = default_backend() cipher = Cipher(algorithms.SEED(key), modes.CBC(iv), backend=backend) decryptor = cipher.decryptor() decrypted_text = decryptor.update(ciphertext) unpadder = padding.PKCS7(128).unpadder() unpadded_text = unpadder.update(decrypted_text) + unpadder.finalize() return unpadded_text def seed_cbc_128_decrypt_pure(key: bytes, ciphertext: bytes, iv: bytes = b'0123456789012345') -> bytes: from . import process_block n = 16 chunks = [ciphertext[i:i + n] for i in range(0, len(ciphertext), n)] vector = iv result = b'' for ch in chunks: dec = process_block(False, key, ch) result += byte_xor(dec, vector) vector = ch unpadder = padding.PKCS7(128).unpadder() unpadded_text = unpadder.update(result) + unpadder.finalize() return unpadded_text def seed_generator(size: int) -> bytes: return bytes([random.choice(range(255)) + 1 for _ in range(size)])
MIT License
dedsecinside/awesome-scripts
APIs/Telegram API/telethon/tl/custom/inlinebuilder.py
InlineBuilder._message
python
async def _message( self, *, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ):
Use this method to edit_preview. Args: self: (todo): write your description text: (str): write your description parse_mode: (todo): write your description link_preview: (str): write your description geo: (str): write your description period: (str): write your description contact: (str): write your description game: (todo): write your description buttons: (str): write your description
https://github.com/dedsecinside/awesome-scripts/blob/856835e5ff5f8a6af2d74bb25800c620feb712e3/APIs/Telegram API/telethon/tl/custom/inlinebuilder.py#L278-L296
import hashlib from .. import functions, types from ... import utils class InlineBuilder: def __init__(self, client): self._client = client async def article( self, title, description=None, *, url=None, thumb=None, content=None, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): result = types.InputBotInlineResult( id=id or '', type='article', send_message=await self._message( text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons ), title=title, description=description, url=url, thumb=thumb, content=content ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result async def photo( self, file, *, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): try: fh = utils.get_input_photo(file) except TypeError: _, media, _ = await self._client._file_to_media( file, allow_cache=True, as_image=True ) if isinstance(media, types.InputPhoto): fh = media else: r = await self._client(functions.messages.UploadMediaRequest( types.InputPeerSelf(), media=media )) fh = utils.get_input_photo(r.photo) result = types.InputBotInlineResultPhoto( id=id or '', type='photo', photo=fh, send_message=await self._message( text=text or '', parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons ) ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result async def document( self, file, title=None, *, description=None, type=None, mime_type=None, attributes=None, force_document=False, voice_note=False, video_note=False, use_cache=True, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): if type is None: if voice_note: type = 'voice' else: type = 'document' try: fh = utils.get_input_document(file) except TypeError: _, media, _ = await self._client._file_to_media( file, mime_type=mime_type, attributes=attributes, force_document=True, voice_note=voice_note, video_note=video_note, allow_cache=use_cache ) if isinstance(media, types.InputDocument): fh = media else: r = await self._client(functions.messages.UploadMediaRequest( types.InputPeerSelf(), media=media )) fh = utils.get_input_document(r.document) result = types.InputBotInlineResultDocument( id=id or '', type=type, document=fh, send_message=await self._message( text=text or '', parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons ), title=title, description=description ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result async def game( self, short_name, *, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): result = types.InputBotInlineResultGame( id=id or '', short_name=short_name, send_message=await self._message( text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons ) ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result
MIT License
hazyresearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
_allowed_char
python
def _allowed_char(c): c = ord(c) if c < 0: return False if c < 128: return _ascii_allowed[c] return True
Returns whether the given unicode char is allowed in output
https://github.com/hazyresearch/pdftotree/blob/0686a1845c7901aa975544a9107fc10594523986/pdftotree/utils/pdf/pdf_utils.py#L249-L260
import collections import re import string from collections import Counter from typing import List, NamedTuple, Optional, Tuple, Union from pdfminer.converter import PDFPageAggregator from pdfminer.layout import ( LTAnno, LTChar, LTComponent, LTContainer, LTCurve, LTFigure, LTLayoutContainer, LTLine, LTPage, LTTextContainer, LTTextLine, ) from pdfminer.utils import INF, apply_matrix_pt from pdftotree.utils.img_utils import normalize_bbox, normalize_pts class PDFElems(NamedTuple): mentions: List[LTTextLine] segments: List[LTLine] curves: List[LTCurve] figures: List[LTFigure] layout: LTPage chars: List[Union[LTChar, LTAnno]] class CustomPDFPageAggregator(PDFPageAggregator): line_only_shape = re.compile("ml+h?") def paint_path(self, gstate, stroke, fill, evenodd, path): shape = "".join(x[0] for x in path) prev_split = 0 for i in range(len(shape)): if shape[i] == "m" and prev_split != i: self.paint_single_path( gstate, stroke, fill, evenodd, path[prev_split:i] ) prev_split = i if shape[i] == "h": self.paint_single_path( gstate, stroke, fill, evenodd, path[prev_split : i + 1] ) prev_split = i + 1 if prev_split < len(shape): self.paint_single_path(gstate, stroke, fill, evenodd, path[prev_split:]) def paint_single_path(self, gstate, stroke, fill, evenodd, path): if len(path) < 2: return shape = "".join(x[0] for x in path) pts = [] for p in path: for i in range(1, len(p), 2): pts.append(apply_matrix_pt(self.ctm, (p[i], p[i + 1]))) if self.line_only_shape.match(shape): has_slope = False for i in range(len(pts) - 1): if pts[i][0] != pts[i + 1][0] and pts[i][1] != pts[i + 1][1]: has_slope = True break if not has_slope: for i in range(len(pts) - 1): self.cur_item.add(LTLine(gstate.linewidth, pts[i], pts[i + 1])) if shape.endswith("h"): self.cur_item.add(LTLine(gstate.linewidth, pts[0], pts[-1])) return self.cur_item.add(LTCurve(gstate.linewidth, pts)) def normalize_pdf(self, layout: LTPage, scaler) -> Tuple[PDFElems, Counter]: chars = [] mentions: List[LTTextContainer] = [] height = scaler * layout.height font_size_counter = collections.Counter() pts_thres = 2.0 * scaler segments = [] curves = [] figures = [] container: LTContainer = None _font = None def processor(m, parent): if isinstance(m, LTContainer): for child in m: processor(child, m) if isinstance(m, LTComponent): m.set_bbox(normalize_bbox(m.bbox, height, scaler)) if isinstance(m, LTCurve): m.pts = normalize_pts(m.pts, height, scaler) if isinstance(m, LTLine) and max(m.width, m.height) > pts_thres: segments.append(m) else: curves.append(m) elif isinstance(m, LTFigure): figures.append(m) elif isinstance(m, LTChar): if not isinstance(parent, LTTextLine): nonlocal _font nonlocal container font = (m.fontname, m.size) dummy_bbox = (+INF, +INF, -INF, -INF) if font != _font: if _font is not None: layout_container = LTLayoutContainer(dummy_bbox) for textline in layout_container.group_objects( self.laparams, container ): cleaned_textline = _clean_textline(textline) if cleaned_textline is not None: mentions.append(cleaned_textline) container = LTContainer(dummy_bbox) _font = font container.add(m) chars.append(m) font_size = _font_size_of(m) font_size_counter[font_size] += 1 elif isinstance(m, LTTextLine): cleaned_textline = _clean_textline(m) if cleaned_textline is not None: mentions.append(cleaned_textline) elif isinstance(m, LTAnno): chars.append(m) return processor(layout, None) for m in mentions: alphanum_c = next((c for c in m if c.get_text().isalnum()), None) if alphanum_c: m.set_bbox((m.x0, alphanum_c.y0, m.x1, alphanum_c.y1)) elems = PDFElems(mentions, segments, curves, figures, layout, chars) return elems, font_size_counter def _print_dict(elem_dict): for key, value in sorted(elem_dict.iteritems()): if isinstance(value, collections.Iterable): print(key, len(value)) else: print(key, value) def _font_size_of(ch): if isinstance(ch, LTChar): return max(map(abs, ch.matrix[:4])) return -1 def _clean_textline(item: LTTextLine) -> Optional[LTTextLine]: clean_text = keep_allowed_chars(item.get_text()).strip() if clean_text: item.clean_text = clean_text item.font_name, item.font_size = _font_of_mention(item) return item else: return None def _font_of_mention(m): for ch in m: if isinstance(ch, LTChar) and ch.get_text().isalnum(): return (ch.fontname, _font_size_of(ch)) return (None, 0) _ascii_allowed = [False] * 128 _forbidden_chars = "\n\t" for c in string.printable: _ascii_allowed[ord(c)] = True for c in _forbidden_chars: _ascii_allowed[ord(c)] = False
MIT License
unofficial-memsource/memsource-cli-client
memsource_cli/models/sdl_xlf_settings_dto.py
SdlXlfSettingsDto.save_confirmed_segments
python
def save_confirmed_segments(self, save_confirmed_segments): self._save_confirmed_segments = save_confirmed_segments
Sets the save_confirmed_segments of this SdlXlfSettingsDto. Default: true # noqa: E501 :param save_confirmed_segments: The save_confirmed_segments of this SdlXlfSettingsDto. # noqa: E501 :type: bool
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/sdl_xlf_settings_dto.py#L252-L261
import pprint import re import six class SdlXlfSettingsDto(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'skip_import_rules': 'str', 'import_as_confirmed_rules': 'str', 'import_as_locked_rules': 'str', 'export_attrs_when_confirmed_and_locked': 'str', 'export_attrs_when_confirmed_and_not_locked': 'str', 'export_attrs_when_not_confirmed_and_locked': 'str', 'export_attrs_when_not_confirmed_and_not_locked': 'str', 'save_confirmed_segments': 'bool' } attribute_map = { 'skip_import_rules': 'skipImportRules', 'import_as_confirmed_rules': 'importAsConfirmedRules', 'import_as_locked_rules': 'importAsLockedRules', 'export_attrs_when_confirmed_and_locked': 'exportAttrsWhenConfirmedAndLocked', 'export_attrs_when_confirmed_and_not_locked': 'exportAttrsWhenConfirmedAndNotLocked', 'export_attrs_when_not_confirmed_and_locked': 'exportAttrsWhenNotConfirmedAndLocked', 'export_attrs_when_not_confirmed_and_not_locked': 'exportAttrsWhenNotConfirmedAndNotLocked', 'save_confirmed_segments': 'saveConfirmedSegments' } def __init__(self, skip_import_rules=None, import_as_confirmed_rules=None, import_as_locked_rules=None, export_attrs_when_confirmed_and_locked=None, export_attrs_when_confirmed_and_not_locked=None, export_attrs_when_not_confirmed_and_locked=None, export_attrs_when_not_confirmed_and_not_locked=None, save_confirmed_segments=None): self._skip_import_rules = None self._import_as_confirmed_rules = None self._import_as_locked_rules = None self._export_attrs_when_confirmed_and_locked = None self._export_attrs_when_confirmed_and_not_locked = None self._export_attrs_when_not_confirmed_and_locked = None self._export_attrs_when_not_confirmed_and_not_locked = None self._save_confirmed_segments = None self.discriminator = None if skip_import_rules is not None: self.skip_import_rules = skip_import_rules if import_as_confirmed_rules is not None: self.import_as_confirmed_rules = import_as_confirmed_rules if import_as_locked_rules is not None: self.import_as_locked_rules = import_as_locked_rules if export_attrs_when_confirmed_and_locked is not None: self.export_attrs_when_confirmed_and_locked = export_attrs_when_confirmed_and_locked if export_attrs_when_confirmed_and_not_locked is not None: self.export_attrs_when_confirmed_and_not_locked = export_attrs_when_confirmed_and_not_locked if export_attrs_when_not_confirmed_and_locked is not None: self.export_attrs_when_not_confirmed_and_locked = export_attrs_when_not_confirmed_and_locked if export_attrs_when_not_confirmed_and_not_locked is not None: self.export_attrs_when_not_confirmed_and_not_locked = export_attrs_when_not_confirmed_and_not_locked if save_confirmed_segments is not None: self.save_confirmed_segments = save_confirmed_segments @property def skip_import_rules(self): return self._skip_import_rules @skip_import_rules.setter def skip_import_rules(self, skip_import_rules): self._skip_import_rules = skip_import_rules @property def import_as_confirmed_rules(self): return self._import_as_confirmed_rules @import_as_confirmed_rules.setter def import_as_confirmed_rules(self, import_as_confirmed_rules): self._import_as_confirmed_rules = import_as_confirmed_rules @property def import_as_locked_rules(self): return self._import_as_locked_rules @import_as_locked_rules.setter def import_as_locked_rules(self, import_as_locked_rules): self._import_as_locked_rules = import_as_locked_rules @property def export_attrs_when_confirmed_and_locked(self): return self._export_attrs_when_confirmed_and_locked @export_attrs_when_confirmed_and_locked.setter def export_attrs_when_confirmed_and_locked(self, export_attrs_when_confirmed_and_locked): self._export_attrs_when_confirmed_and_locked = export_attrs_when_confirmed_and_locked @property def export_attrs_when_confirmed_and_not_locked(self): return self._export_attrs_when_confirmed_and_not_locked @export_attrs_when_confirmed_and_not_locked.setter def export_attrs_when_confirmed_and_not_locked(self, export_attrs_when_confirmed_and_not_locked): self._export_attrs_when_confirmed_and_not_locked = export_attrs_when_confirmed_and_not_locked @property def export_attrs_when_not_confirmed_and_locked(self): return self._export_attrs_when_not_confirmed_and_locked @export_attrs_when_not_confirmed_and_locked.setter def export_attrs_when_not_confirmed_and_locked(self, export_attrs_when_not_confirmed_and_locked): self._export_attrs_when_not_confirmed_and_locked = export_attrs_when_not_confirmed_and_locked @property def export_attrs_when_not_confirmed_and_not_locked(self): return self._export_attrs_when_not_confirmed_and_not_locked @export_attrs_when_not_confirmed_and_not_locked.setter def export_attrs_when_not_confirmed_and_not_locked(self, export_attrs_when_not_confirmed_and_not_locked): self._export_attrs_when_not_confirmed_and_not_locked = export_attrs_when_not_confirmed_and_not_locked @property def save_confirmed_segments(self): return self._save_confirmed_segments @save_confirmed_segments.setter
Apache License 2.0
microsoft/restler-fuzzer
restler/engine/core/driver.py
apply_checkers
python
def apply_checkers(checkers, renderings, global_lock): for checker in checkers: try: if checker.enabled: RAW_LOGGING(f"Checker: {checker.__class__.__name__} kicks in\n") checker.apply(renderings, global_lock) RAW_LOGGING(f"Checker: {checker.__class__.__name__} kicks out\n") except Exception as error: print(f"Exception {error!s} applying checker {checker}") raise
Calls each enabled checker from a list of Checker objects @param checkers: A list of checkers to apply @type checkers: List[Checker] @param renderings: Object containing the rendered sequence information @type renderings: RenderedSequence @param global_lock: Lock object used for sync of more than one fuzzing jobs. @type global_lock: thread.Lock @return: None @rtype : None
https://github.com/microsoft/restler-fuzzer/blob/d74a267467a2d43fb37c8a16754d0b28e80b649a/restler/engine/core/driver.py#L132-L154
from __future__ import print_function import sys, os import copy import time import random import inspect import itertools import functools import multiprocessing from multiprocessing.dummy import Pool as ThreadPool from collections import deque import re from restler_settings import Settings import utils.logger as logger import utils.saver as saver import utils.formatting as formatting import engine.dependencies as dependencies import engine.core.sequences as sequences import engine.core.requests as requests import engine.core.fuzzing_monitor as fuzzing_monitor from engine.core.fuzzing_requests import FuzzingRequestCollection from engine.core.requests import GrammarRequestCollection from engine.core.requests import FailureInformation from engine.core.request_utilities import execute_token_refresh_cmd from engine.core.request_utilities import get_hostname_from_line from engine.core.fuzzing_monitor import Monitor from engine.errors import TimeOutException from engine.errors import ExhaustSeqCollectionException from engine.errors import InvalidDictionaryException from engine.transport_layer import messaging from utils.logger import raw_network_logging as RAW_LOGGING def validate_dependencies(consumer_req, producer_seq): producer_requests = [] for req in producer_seq: producer_requests.extend(req.produces) return consumer_req.consumes <= set(producer_requests) def extend(seq_collection, fuzzing_requests, lock): prev_len = len(seq_collection) extended_requests = [] Monitor().current_fuzzing_generation += 1 for req in fuzzing_requests: for i in range(prev_len): seq = seq_collection[i] if not validate_dependencies(req, seq) and not Settings().ignore_dependencies: continue extended_requests.append(req) req_copy = copy.copy(req) req_copy._current_combination_id = 0 if seq.is_empty_sequence(): new_seq = sequences.Sequence(req_copy) else: new_seq = seq + sequences.Sequence(req_copy) seq_collection.append(new_seq) if Settings().fuzzing_mode in ['bfs-fast', 'bfs-minimal']: break Monitor().current_fuzzing_generation -= 1 if Settings().fuzzing_mode == 'random-walk': if len(seq_collection) > 0: rand_int = random.randint(prev_len, len(seq_collection) - 1) return seq_collection[rand_int: rand_int + 1], extended_requests[rand_int: rand_int + 1] else: return [], [] return seq_collection[prev_len:], extended_requests
MIT License
canerturkmen/hawkeslib
hawkeslib/model/uv_bayes.py
BayesianUVExpHawkesProcess.log_posterior
python
def log_posterior(self, t, T=None): t, T = self._prep_t_T(t, T) mu, alpha, theta = self.get_params() return self._log_posterior(t, T)([mu, alpha, theta])
Get the log unnormalized posterior for parameters already fit, under observed timestamps ``t``. :param numpy.array[float] t: Observation timestamps of the process up to time T. 1-d array of timestamps. must be sorted (asc) :param T: (optional) maximum time :type T: float or None :rtype: float :return: the log unnormalized posterior (log potential)
https://github.com/canerturkmen/hawkeslib/blob/37f7b0c247568bb5ae96eb83d89a2d9bbbc4d43c/hawkeslib/model/uv_bayes.py#L194-L208
import numpy as np import numdifftools as nd from hawkeslib.util.multitrace import MultiTrace from .model import BayesianPointProcessMixin from .c.c_uv_exp import uv_exp_ll, uv_exp_ll_grad from .uv_exp import UnivariateExpHawkesProcess from scipy.stats import gamma, beta from scipy.optimize import minimize class BayesianUVExpHawkesProcess(UnivariateExpHawkesProcess, BayesianPointProcessMixin): def __init__(self, mu_hyp, alpha_hyp, theta_hyp): super(BayesianUVExpHawkesProcess, self).__init__() self.mu_hyp = mu_hyp self.alpha_hyp = alpha_hyp self.theta_hyp = theta_hyp self._log_posterior = lambda t, T: self._get_log_posterior_pot_grad_fns(t, T, mu_hyp, alpha_hyp, theta_hyp)[0] self._log_posterior_grad = lambda t, T: self._get_log_posterior_pot_grad_fns(t, T, mu_hyp, alpha_hyp, theta_hyp)[1] @classmethod def _get_log_posterior_pot_grad_fns(cls, t, T, mu_hyp, alpha_hyp, theta_hyp): t, T = cls._prep_t_T(t, T) def f0(x): mu, a, th = x[0], x[1], x[2] res = uv_exp_ll(t, mu, a, th, T) res += gamma.logpdf(mu, mu_hyp[0], scale=mu_hyp[1]) + gamma.logpdf(th, theta_hyp[0], scale=theta_hyp[1]) + beta.logpdf(a, alpha_hyp[0], alpha_hyp[1]) return res def g0(x): mu, a, th = x[0], x[1], x[2] res = uv_exp_ll_grad(t, mu, a, th, T) res[0] += (mu_hyp[0] - 1) / mu - 1. / mu_hyp[1] res[1] += (alpha_hyp[0] - 1) / a - (alpha_hyp[1] - 1) / (1 - a) res[2] += (theta_hyp[0] - 1) / th - 1. / theta_hyp[1] return res return f0, g0 def _fit_grad_desc(self, t, T=None, nr_restarts=5): t, T = self._prep_t_T(t, T) N = len(t) ress = [] f = self._log_posterior(t, T) g = self._log_posterior_grad(t, T) best_minres = None best_ll = np.inf for epoch in range(nr_restarts): mu0 = np.random.gamma(self.mu_hyp[0], scale=self.mu_hyp[1]) th0 = np.random.gamma(self.theta_hyp[0], scale=self.theta_hyp[1]) a0 = np.random.beta(self.alpha_hyp[0], self.alpha_hyp[1]) minres = minimize(lambda x: -f(x), x0=np.array([mu0, a0, th0]), jac=lambda x: -g(x), bounds=[(1e-5, None), (1e-5, 1), (1e-5, None)], method="L-BFGS-B", options={"disp": False, "ftol": 1e-8, "gtol": 1e-8}) ress.append(minres) mu, a, _ = minres.x if minres.fun < best_ll: best_ll = minres.fun best_minres = minres return best_minres def marginal_likelihood(self, t, T = None): t, T = self._prep_t_T(t, T) f = self._log_posterior(t, T) g = self._log_posterior_grad(t, T) xopt = np.array(self.get_params()) H = nd.Jacobian(g)(xopt) return f(xopt) + 1.5 * np.log(2 * np.pi) - .5 * np.linalg.slogdet(H)[1] def log_posterior_with_params(self, t, mu, alpha, theta, T=None): t, T = self._prep_t_T(t, T) return self._log_posterior(t, T)([mu, alpha, theta])
MIT License
yingzhangdut/cross-modal-projection-learning
evaluation/retrieval_eval.py
average_precision_at_k
python
def average_precision_at_k(rank, plabels=None, glabels=None, k=50): n_probe, n_gallery = rank.shape match = 0 average_precision = 1.0 * np.zeros_like(plabels) for i in range(n_probe): relevant_size = sum(glabels == plabels[i]) hit_index = np.where(glabels[rank[i, :k]] == plabels[i]) precision = 1.0 * np.zeros_like(hit_index[0]) for j in range(hit_index[0].shape[0]): hitid = max(1, hit_index[0][j]) precision[j] = sum(glabels[rank[i, :hitid]] == plabels[i]) * 1.0 / (hit_index[0][j] + 1) average_precision[i] = np.sum(precision) * 1.0 / relevant_size score = np.mean(average_precision) return score
Compute AP@K: We report the AP@K, the percent of top-K scoring images whose class matches that of the text query, averaged over all the test classes. --------------------------------------------------- Inputs: distmat : numpy.ndarray The distance matrix. ``distmat[i, j]`` is the distance between i-th probe sample and j-th gallery sample. glabels : numpy.ndarray or None, optional plabels : numpy.ndarray or None, optional --------------------------------------------------- Outputs: out : numpy.ndarray, The AP@K accuracy ---------------------------------------------------
https://github.com/yingzhangdut/cross-modal-projection-learning/blob/447d6427528ba2ef721e8c0e2f745578686bca64/evaluation/retrieval_eval.py#L29-L59
import numpy as np def recall_at_k(rank, plabels=None, glabels=None, k=1): n_probe, n_gallery = rank.shape match = 0 for i in range(n_probe): match += int(sum(glabels[rank[i, :k]] == plabels[i]) > 0) score = match * 1.0 / n_probe return score
MIT License
windelbouwman/ppci
ppci/lang/c3/codegenerator.py
CodeGenerator.gen
python
def gen(self, context): self.context = context ir_module = ir.Module("c3_code", debug_db=self.debug_db) self.builder = irutils.Builder() self.builder.module = ir_module for module in context.modules: self.gen_globals(module) for module in context.modules: self.gen_module(module) return ir_module
Generate code for a whole context
https://github.com/windelbouwman/ppci/blob/915c069e0667042c085ec42c78e9e3c9a5295324/ppci/lang/c3/codegenerator.py#L33-L48
import logging from ... import ir from ... import irutils from ...binutils import debuginfo from . import astnodes as ast from .scope import SemanticError class CodeGenerator: logger = logging.getLogger("c3cgen") def __init__(self, diag): self.builder = irutils.Builder() self.diag = diag self.context = None self.debug_db = debuginfo.DebugDb() self.module_ok = False
BSD 2-Clause Simplified License
fastats/fastats
fastats/scaling/scaling.py
demean
python
def demean(A): assert A.ndim > 1 n = A.shape[1] res = empty_like(A, dtype=np_float64) for i in range(n): data_i = A[:, i] res[:, i] = data_i - mean(data_i) return res
Subtract the mean from the supplied data column-wise.
https://github.com/fastats/fastats/blob/5915423714b32ed7e953e1e3a311fe50c3f30943/fastats/scaling/scaling.py#L103-L116
from numba import prange from numpy import empty_like, mean, std, sqrt, argsort, ones, nonzero, empty from numpy import float64 as np_float64 from numpy import int32 as np_int32 from numpy import max as np_max from numpy import min as np_min def scale(A): return A def standard(A, ddof=0): assert A.ndim > 1 if ddof not in (0, 1): raise ValueError('ddof must be either 0 or 1') n = A.shape[1] res = empty_like(A, dtype=np_float64) for i in range(n): data_i = A[:, i] res[:, i] = (data_i - mean(data_i)) / std(data_i) if ddof == 1: m = A.shape[0] res *= sqrt((m - 1) / m) return res def min_max(A): assert A.ndim > 1 n = A.shape[1] res = empty_like(A, dtype=np_float64) for i in range(n): data_i = A[:, i] data_min = np_min(data_i) res[:, i] = (data_i - data_min) / (np_max(data_i) - data_min) return res def rank(A): assert A.ndim > 1 A = A.astype(np_float64) res = empty_like(A) m, n = A.shape for i in range(n): data_i = A[:, i] data_i_std = empty_like(data_i, dtype=np_int32) sort_order = argsort(data_i) data_i = data_i[sort_order] obs = ones(m, dtype=np_int32) for j in range(m): idx = sort_order[j] data_i_std[idx] = j if j > 0: if data_i[j] == data_i[j - 1]: obs[j] = 0 dense = obs.cumsum()[data_i_std] non_zero_indices = nonzero(obs)[0] count = empty(len(non_zero_indices) + 1) count[:-1] = non_zero_indices count[-1] = m res[:, i] = (count[dense] + count[dense - 1] + 1) / 2.0 return res
MIT License
google/mobly
mobly/test_runner.py
TestRunner.time_elapsed_sec
python
def time_elapsed_sec(self): if self._start_counter is None or self._end_counter is None: return None return self._end_counter - self._start_counter
The total time elapsed for a test run in seconds. This value is None until the test run has completed.
https://github.com/google/mobly/blob/542a78a7198256d172f56546ab8a6493166b3d9b/mobly/test_runner.py#L268-L275
import argparse import contextlib import logging import os import sys import time from mobly import base_test from mobly import config_parser from mobly import logger from mobly import records from mobly import signals from mobly import utils class Error(Exception): pass def main(argv=None): args = parse_mobly_cli_args(argv) test_class = _find_test_class() if args.list_tests: _print_test_names(test_class) sys.exit(0) test_configs = config_parser.load_test_config_file(args.config, args.test_bed) tests = None if args.tests: tests = args.tests ok = True for config in test_configs: runner = TestRunner(log_dir=config.log_path, testbed_name=config.testbed_name) with runner.mobly_logger(): runner.add_test_class(config, test_class, tests) try: runner.run() ok = runner.results.is_all_pass and ok except signals.TestAbortAll: pass except Exception: logging.exception('Exception when executing %s.', config.testbed_name) ok = False if not ok: sys.exit(1) def parse_mobly_cli_args(argv): parser = argparse.ArgumentParser(description='Mobly Test Executable.') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-c', '--config', type=str, metavar='<PATH>', help='Path to the test configuration file.') group.add_argument( '-l', '--list_tests', action='store_true', help='Print the names of the tests defined in a script without ' 'executing them.') parser.add_argument('--tests', '--test_case', nargs='+', type=str, metavar='[test_a test_b...]', help='A list of tests in the test class to execute.') parser.add_argument('-tb', '--test_bed', nargs='+', type=str, metavar='[<TEST BED NAME1> <TEST BED NAME2> ...]', help='Specify which test beds to run tests on.') if not argv: argv = sys.argv[1:] return parser.parse_known_args(argv)[0] def _find_test_class(): try: return utils.find_subclass_in_module(base_test.BaseTestClass, sys.modules['__main__']) except ValueError: logging.exception('Exactly one subclass of `base_test.BaseTestClass`' ' should be in the main file.') sys.exit(1) def _print_test_names(test_class): cls = test_class(config_parser.TestRunConfig()) test_names = [] try: cls.setup_generated_tests() test_names = cls.get_existing_test_names() except Exception: logging.exception('Failed to retrieve generated tests.') finally: cls._controller_manager.unregister_controllers() print('==========> %s <==========' % cls.TAG) for name in test_names: print(name) class TestRunner: class _TestRunInfo: def __init__(self, config, test_class, tests=None, test_class_name_suffix=None): self.config = config self.test_class = test_class self.test_class_name_suffix = test_class_name_suffix self.tests = tests class _TestRunMetaData: def __init__(self, log_dir, testbed_name): self._log_dir = log_dir self._testbed_name = testbed_name self._logger_start_time = None self._start_counter = None self._end_counter = None self.root_output_path = log_dir def generate_test_run_log_path(self): self._logger_start_time = logger.get_log_file_timestamp() self.root_output_path = os.path.join(self._log_dir, self._testbed_name, self._logger_start_time) return self.root_output_path def set_start_point(self): self._start_counter = time.perf_counter() def set_end_point(self): self._end_counter = time.perf_counter() @property def run_id(self): return f'{self._testbed_name}@{self._logger_start_time}' @property
Apache License 2.0
johndmcmaster/uvscada
uvscada/k2750.py
K2700.tim_int
python
def tim_int(self): return float(self.instrument.ask('TRIGger:TIMer?'))
Query timer interval
https://github.com/johndmcmaster/uvscada/blob/0b2bc4af49c51060d385da003fda13540cf9af4e/uvscada/k2750.py#L64-L66
from pymeasure.instruments.keithley import Keithley2700 from pymeasure.adapters import PrologixAdapter import re import time import glob volt_dc_re = re.compile("(.*)VDC,(.*)SECS,(.*)RDNG#") curr_dc_re = re.compile("(.*)ADC,(.*)SECS,(.*)RDNG#") res_re = re.compile("(.*),(.*)SECS,(.*)RDNG#") class K2700(object): def __init__(self, port=None, clr=True, ident=True): if port is None: devices = glob.glob("/dev/serial/by-id/usb-Prologix_Prologix_GPIB-USB_Controller_*") assert len(devices), "No GPIB found" port = devices[0] self.adapter = PrologixAdapter(port) self.instrument = Keithley2700(self.adapter.gpib(5)) self.func = None self.vendor = None self.model = None self.sn = None if ident: vendor, model = self.ident() if (vendor, model) != ('KEITHLEY INSTRUMENTS INC.', 'MODEL 2750') and (vendor, model) != ('KEITHLEY INSTRUMENTS INC.', 'MODEL 2700'): raise ValueError('Bad instrument: %s, %s' % (vendor, model)) def ident(self): return self.ident_ex()[0:2] def ident_ex(self): tmp = self.instrument.ask("*IDN?") ret = tmp.split(',') self.vendor = ret[0] self.model = ret[1] sn = ret[2] fw = ret[3] return (self.vendor, self.model, sn, fw) def card_sn(self): return self.gpib.ask("SYSTem:CARD1:SNUMber?")
BSD 2-Clause Simplified License
devopshq/vspheretools
pysphere/ZSI/ServiceContainer.py
SimpleWSResource.getNode
python
def getNode(self, post): return self._nodes.getNode(post)
post -- POST HTTP value
https://github.com/devopshq/vspheretools/blob/10890423bfbba976e3ddee61204e9eed4b73fe92/pysphere/ZSI/ServiceContainer.py#L476-L479
import urlparse, sys, thread,re from BaseHTTPServer import HTTPServer from pysphere.ZSI import ParseException, FaultFromException, FaultFromZSIException, Fault from pysphere.ZSI import _get_element_nsuri_name, resolvers from pysphere.ZSI import _get_idstr from pysphere.ZSI.address import Address from pysphere.ZSI.parse import ParsedSoap from pysphere.ZSI.writer import SoapWriter from pysphere.ZSI.dispatch import SOAPRequestHandler as BaseSOAPRequestHandler class NoSuchService(Exception): pass class UnknownRequestException(Exception): pass class PostNotSpecified(Exception): pass class SOAPActionNotSpecified(Exception): pass class WSActionException(Exception): pass class WSActionNotSpecified(WSActionException): pass class NotAuthorized(Exception): pass class ServiceAlreadyPresent(Exception): pass class SOAPContext: def __init__(self, container, xmldata, ps, connection, httpheaders, soapaction): self.container = container self.xmldata = xmldata self.parsedsoap = ps self.connection = connection self.httpheaders= httpheaders self.soapaction = soapaction _contexts = dict() def GetSOAPContext(): global _contexts return _contexts[thread.get_ident()] def _Dispatch(ps, server, SendResponse, SendFault, post, action, nsdict={}, **kw): localURL = 'http://%s:%d%s' %(server.server_name,server.server_port,post) address = action service = server.getNode(post) isWSResource = False if isinstance(service, WSAResource): isWSResource = True service.setServiceURL(localURL) address = Address() try: address.parse(ps) except Exception, e: return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw) if action and action != address.getAction(): e = WSActionException('SOAP Action("%s") must match WS-Action("%s") if specified.' %(action,address.getAction())) return SendFault(FaultFromException(e, 0, None), **kw) action = address.getAction() if isinstance(service, ServiceInterface) is False: e = NoSuchService('no service at POST(%s) in container: %s' %(post,server)) return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw) if not service.authorize(None, post, action): return SendFault(Fault(Fault.Server, "Not authorized"), code=401) try: method = service.getOperation(ps, address) except Exception, e: return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw) try: if isWSResource is True: _,result = method(ps, address) else: _,result = method(ps) except Exception, e: return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw) service.verify(ps) if result is None: return SendResponse('', **kw) sw = SoapWriter(nsdict=nsdict) try: sw.serialize(result) except Exception, e: return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw) if isWSResource is True: action = service.getResponseAction(ps, action) addressRsp = Address(action=action) try: addressRsp.setResponseFromWSAddress(address, localURL) addressRsp.serialize(sw) except Exception, e: return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw) service.sign(sw) try: soapdata = str(sw) return SendResponse(soapdata, **kw) except Exception, e: return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw) def AsServer(port=80, services=()): address = ('', port) sc = ServiceContainer(address, services) sc.serve_forever() class ServiceInterface: soapAction = {} wsAction = {} root = {} def __init__(self, post): self.post = post def authorize(self, auth_info, post, action): return 1 def __str__(self): return '%s(%s) POST(%s)' %(self.__class__.__name__, _get_idstr(self), self.post) def sign(self, sw): return def verify(self, ps): return def getPost(self): return self.post def getOperation(self, ps, action): opName = self.getOperationName(ps, action) return getattr(self, opName) def getOperationName(self, ps, action): method = self.root.get(_get_element_nsuri_name(ps.body_root)) or self.soapAction.get(action) if method is None: raise UnknownRequestException( 'failed to map request to a method: action(%s), root%s' %(action,_get_element_nsuri_name(ps.body_root))) return method class ServiceSOAPBinding(ServiceInterface): def __init__(self, post): ServiceInterface.__init__(self, post) def __call___(self, action, ps): return self.getOperation(ps, action)(ps) class WSAResource(ServiceSOAPBinding): encoding = "UTF-8" def __init__(self, post): assert isinstance(self.soapAction, dict), "soapAction must be a dict" assert isinstance(self.wsAction, dict), "wsAction must be a dict" ServiceSOAPBinding.__init__(self, post) def __call___(self, action, ps, address): return self.getOperation(ps, action)(ps, address) def getServiceURL(self): return self._url def setServiceURL(self, url): self._url = url def getOperation(self, ps, address): action = address.getAction() opName = self.getOperationName(ps, action) return getattr(self, opName) def getResponseAction(self, ps, action): opName = self.getOperationName(ps, action) if not opName in self.wsAction: raise WSActionNotSpecified('wsAction dictionary missing key(%s)' % opName) return self.wsAction[opName] def do_POST(self): global _contexts soapAction = self.headers.getheader('SOAPAction') post = self.path if not post: raise PostNotSpecified('HTTP POST not specified in request') if soapAction: soapAction = soapAction.strip('\'"') post = post.strip('\'"') try: ct = self.headers['content-type'] if ct.startswith('multipart/'): cid = resolvers.MIMEResolver(ct, self.rfile) xml = cid.GetSOAPPart() ps = ParsedSoap(xml, resolver=cid.Resolve) else: length = int(self.headers['content-length']) ps = ParsedSoap(self.rfile.read(length)) except ParseException, e: self.send_fault(FaultFromZSIException(e)) except Exception, e: self.send_fault(FaultFromException(e, 1, sys.exc_info()[2])) else: thread_id = thread.get_ident() _contexts[thread_id] = SOAPContext(self.server, xml, ps, self.connection, self.headers, soapAction) try: _Dispatch(ps, self.server, self.send_xml, self.send_fault, post=post, action=soapAction) except Exception, e: self.send_fault(FaultFromException(e, 0, sys.exc_info()[2])) if thread_id in _contexts: del _contexts[thread_id] class SOAPRequestHandler(BaseSOAPRequestHandler): def do_POST(self): soapAction = self.headers.getheader('SOAPAction') post = self.path if not post: raise PostNotSpecified('HTTP POST not specified in request') if soapAction: soapAction = soapAction.strip('\'"') post = post.strip('\'"') try: ct = self.headers['content-type'] if ct.startswith('multipart/'): cid = resolvers.MIMEResolver(ct, self.rfile) xml = cid.GetSOAPPart() ps = ParsedSoap(xml, resolver=cid.Resolve) else: length = int(self.headers['content-length']) xml = self.rfile.read(length) ps = ParsedSoap(xml) except ParseException, e: self.send_fault(FaultFromZSIException(e)) except Exception, e: self.send_fault(FaultFromException(e, 1, sys.exc_info()[2])) else: thread_id = thread.get_ident() _contexts[thread_id] = SOAPContext(self.server, xml, ps, self.connection, self.headers, soapAction) try: _Dispatch(ps, self.server, self.send_xml, self.send_fault, post=post, action=soapAction) except Exception, e: self.send_fault(FaultFromException(e, 0, sys.exc_info()[2])) if thread_id in _contexts: del _contexts[thread_id] def do_GET(self): if self.path.lower().endswith("?wsdl"): service_path = self.path[:-5] service = self.server.getNode(service_path) if hasattr(service, "_wsdl"): wsdl = service._wsdl proto = 'http' if hasattr(self.server,'proto'): proto = self.server.proto serviceUrl = '%s://%s:%d%s' % (proto, self.server.server_name, self.server.server_port, service_path) soapAddress = '<soap:address location="%s"/>' % serviceUrl wsdlre = re.compile('\<soap:address[^\>]*>',re.IGNORECASE) wsdl = re.sub(wsdlre,soapAddress,wsdl) self.send_xml(wsdl) else: self.send_error(404, "WSDL not available for that service [%s]." % self.path) else: self.send_error(404, "Service not found [%s]." % self.path) class ServiceContainer(HTTPServer): class NodeTree: def __init__(self): self.__dict = {} def __str__(self): return str(self.__dict) def listNodes(self): print list(self.__dict.iterkeys()) def getNode(self, url): path = urlparse.urlsplit(url)[2] if path.startswith("/"): path = path[1:] if path in self.__dict: return self.__dict[path] else: raise NoSuchService('No service(%s) in ServiceContainer' % path) def setNode(self, service, url): path = urlparse.urlsplit(url)[2] if path.startswith("/"): path = path[1:] if not isinstance(service, ServiceSOAPBinding): raise TypeError('A Service must implement class ServiceSOAPBinding') if path in self.__dict: raise ServiceAlreadyPresent('Service(%s) already in ServiceContainer' % path) else: self.__dict[path] = service def removeNode(self, url): path = urlparse.urlsplit(url)[2] if path.startswith("/"): path = path[1:] if path in self.__dict: node = self.__dict[path] del self.__dict[path] return node else: raise NoSuchService('No service(%s) in ServiceContainer' % path) def __init__(self, server_address, services=[], RequestHandlerClass=SOAPRequestHandler): HTTPServer.__init__(self, server_address, RequestHandlerClass) self._nodes = self.NodeTree() [self.setNode(s) for s in services] def __str__(self): return '%s(%s) nodes( %s )' %(self.__class__, _get_idstr(self), str(self._nodes)) def __call__(self, ps, post, action, address=None): method = self.getCallBack(ps, post, action) if (isinstance(method.im_self, WSAResource) or isinstance(method.im_self, SimpleWSResource)): return method(ps, address) return method(ps) def setNode(self, service, url=None): if url is None: url = service.getPost() self._nodes.setNode(service, url) def getNode(self, url): return self._nodes.getNode(url) def removeNode(self, url): self._nodes.removeNode(url) class SimpleWSResource(ServiceSOAPBinding):
MIT License
user-cont/conu
conu/backend/docker/container.py
DockerContainer.execute
python
def execute(self, command, blocking=True, exec_create_kwargs=None, exec_start_kwargs=None): logger.info("running command %s", command) exec_create_kwargs = exec_create_kwargs or {} exec_start_kwargs = exec_start_kwargs or {} exec_start_kwargs["stream"] = True exec_i = self.d.exec_create(self.get_id(), command, **exec_create_kwargs) output = self.d.exec_start(exec_i, **exec_start_kwargs) if blocking: response = [] for line in output: response.append(line) logger.info("%s", line.decode("utf-8").strip("\n\r")) e_inspect = self.d.exec_inspect(exec_i) exit_code = e_inspect["ExitCode"] if exit_code: logger.error("command failed") logger.info("exec metadata: %s", e_inspect) raise ConuException("failed to execute command %s, exit code %s" % ( command, exit_code)) return response return output
Execute a command in this container -- the container needs to be running. If the command fails, a ConuException is thrown. This is a blocking call by default and writes output of the command to logger using the INFO level -- this behavior can be changed if you set the argument `blocking` to `False`. If not blocking, you should consume the returned iterator in order to see logs or know when the command finished: :: for line in container.execute(["ping", "-c", "4", "8.8.8.8"], blocking=False): print(line) print("command finished") :param command: list of str, command to execute in the container :param blocking: bool, if True blocks until the command finishes :param exec_create_kwargs: dict, params to pass to exec_create() :param exec_start_kwargs: dict, params to pass to exec_start() :return: iterator if non-blocking or list of bytes if blocking
https://github.com/user-cont/conu/blob/0d8962560f6f7f17fe1be0d434a4809e2a0ea51d/conu/backend/docker/container.py#L449-L496
from __future__ import print_function, unicode_literals import functools import logging import shutil import subprocess from tempfile import mkdtemp from docker.errors import NotFound from docker.types import Healthcheck from conu.apidefs.container import Container from conu.apidefs.image import Image from conu.apidefs.metadata import ContainerStatus from conu.backend.docker.container_parameters import DockerContainerParameters from conu.apidefs.filesystem import Filesystem from conu.apidefs.metadata import ContainerMetadata from conu.backend.docker.client import get_client from conu.backend.docker.utils import inspect_to_container_metadata from conu.exceptions import ConuException from conu.utils import check_port, run_cmd, export_docker_container_to_directory, graceful_get from conu.utils.probes import Probe from conu.backend.docker.constants import CONU_ARTIFACT_TAG logger = logging.getLogger(__name__) class DockerRunBuilder(object): def __init__(self, command=None, additional_opts=None): self.binary = ["docker"] self.global_options = [] self.command = ["run"] self.options = additional_opts or [] self.image_name = None self.arguments = command or [] def __str__(self): return str(self.build()) def build(self): return self.binary + self.global_options + self.command + self.options + ["-l", CONU_ARTIFACT_TAG] + [self.image_name] + self.arguments def get_parameters(self): import argparse parser = argparse.ArgumentParser(add_help=False) parser.add_argument("-i", "--interactive", action="store_true", dest="stdin_open") parser.add_argument("-d", "--detach", action="store_true", dest="detach") parser.add_argument("-t", "--tty", action="store_true", dest="tty") parser.add_argument("--init", action="store_true", dest="init") parser.add_argument("--privileged", action="store_true", dest="privileged") parser.add_argument("-P", "--publish-all", action="store_true", dest="publish_all_ports") parser.add_argument("--read-only", action="store_true", dest="read_only") parser.add_argument("--rm", action="store_true", dest="remove") parser.add_argument("--entrypoint", action="store", dest="entrypoint") parser.add_argument("-h", "--hostname", action="store", dest="hostname") parser.add_argument("--name", action="store", dest="name") parser.add_argument("--ipc", action="store", dest="ipc_mode") parser.add_argument("--isolation", action="store", dest="isolation") parser.add_argument("--mac-address", action="store", dest="mac_address") parser.add_argument("-m", "--memory", action="store", dest="mem_limit") parser.add_argument("--network", action="store", dest="network") parser.add_argument("--platform", action="store", dest="platform") parser.add_argument("--runtime", action="store", dest="runtime") parser.add_argument("--stop-signal", action="store", dest="stop_signal") parser.add_argument("-u", "--user", action="store", dest="user") parser.add_argument("-w", "--workdir", action="store", dest="working_dir") parser.add_argument("--pids-limit", action="store", dest="pids_limit", type=int) parser.add_argument("-e", "--env", action="append", dest="env_variables") parser.add_argument("--cap-add", action="append", dest="cap_add") parser.add_argument("--cap-drop", action="append", dest="cap_drop") parser.add_argument("--device", action="append", dest="devices") parser.add_argument("--dns", action="append", dest="dns") parser.add_argument("--group-add", action="append", dest="group_add") parser.add_argument("--mount", action="append", dest="mounts") parser.add_argument("-v", "--volume", action="append", dest="volumes") parser.add_argument("-l", "--label", action="append", dest="labels") parser.add_argument("-p", "--publish", action="append", dest="port_mappings") parser.add_argument("--health-cmd", action="store", dest="health_cmd") parser.add_argument("--health-interval", action="store", dest="health_interval", type=int) parser.add_argument("--health-retries", action="store", dest="health_retries", type=int) parser.add_argument("--health-timeout", action="store", dest="health_timeout", type=int) parser.add_argument("--no-healthcheck", action="store_true", dest="no_healthcheck") args, _ = parser.parse_known_args(args=self.options) command = self.arguments options_dict = vars(args) if not options_dict.pop("no_healthcheck", None): options_dict["healthcheck"] = Healthcheck( test=options_dict.pop("health_cmd", None), interval=options_dict.pop("health_interval", None), timeout=options_dict.pop("health_timeout", None), retries=options_dict.pop("health_retries", None) ) else: options_dict['healthcheck'] = None with_dictionary_parameter = {'labels': '='} for name, separator in with_dictionary_parameter.items(): if options_dict[name] is not None: dictionary = {} for item in options_dict[name]: try: key, value = item.split(separator) dictionary[key] = value except ValueError: dictionary = options_dict[name] raise ConuException('Wrong format of dictionary: {name}'.format(name=name)) break options_dict[name] = dictionary if options_dict['port_mappings'] is not None: dictionary = {} for port_string in options_dict['port_mappings']: colon_count = port_string.count(':') if colon_count == 2: split_array = port_string.split(':') if split_array[1] == '': dictionary[split_array[2]] = (split_array[0], None) else: dictionary[split_array[2]] = (split_array[0], int(split_array[1])) elif colon_count == 1: split_array = port_string.split(':') dictionary[split_array[1]] = int(split_array[0]) elif colon_count == 0: dictionary[port_string] = None else: raise ConuException('Wrong format of port mappings') options_dict['port_mappings'] = dictionary container_parameters = DockerContainerParameters(cap_add=options_dict['cap_add'], cap_drop=options_dict['cap_drop'], command=command, detach=options_dict['detach'], devices=options_dict['devices'], dns=options_dict['dns'], entrypoint=options_dict['entrypoint'], env_variables=options_dict['env_variables'], group_add=options_dict['group_add'], healthcheck=options_dict['healthcheck'], hostname=options_dict['hostname'], init=options_dict['init'], ipc_mode=options_dict['ipc_mode'], isolation=options_dict['isolation'], labels=options_dict['labels'], mac_address=options_dict['mac_address'], mem_limit=options_dict['mem_limit'], mounts=options_dict['mounts'], name=options_dict['name'], network=options_dict['network'], pids_limit=options_dict['pids_limit'], platform=options_dict['platform'], port_mappings=options_dict['port_mappings'], privileged=options_dict['privileged'], publish_all_ports=options_dict['publish_all_ports'], read_only=options_dict['read_only'], remove=options_dict['remove'], runtime=options_dict['runtime'], stdin_open=options_dict['stdin_open'], stop_signal=options_dict['stop_signal'], tty=options_dict['tty'], user=options_dict['user'], volumes=options_dict['volumes'], working_dir=options_dict['working_dir'] ) return container_parameters class DockerContainerViaExportFS(Filesystem): def __init__(self, container, mount_point=None): super(DockerContainerViaExportFS, self).__init__(container, mount_point=mount_point) self.container = container @property def mount_point(self): if self._mount_point is None: self._mount_point = mkdtemp(prefix="conu", dir="/var/tmp") self.mount_point_provided = False return self._mount_point def __enter__(self): client = get_client() export_docker_container_to_directory(client, self.container, self.mount_point) return super(DockerContainerViaExportFS, self).__enter__() def __exit__(self, exc_type, exc_val, exc_tb): if not self.mount_point_provided: run_cmd(["chmod", "-R", "u+w", self.mount_point]) shutil.rmtree(self.mount_point) class DockerContainer(Container): def __init__(self, image, container_id, name=None, popen_instance=None): self.d = get_client() super(DockerContainer, self).__init__(image, container_id, name) self.popen_instance = popen_instance self._inspect_data = None self.metadata = ContainerMetadata() def __repr__(self): return "DockerContainer(image=%s, id=%s)" % (self.image, self.get_id()) def __str__(self): return self.get_id() def get_id(self): if self._id is None: self._id = self.inspect(refresh=False)["Id"] return self._id def inspect(self, refresh=True): if refresh or not self._inspect_data: ident = self._id or self.name if not ident: raise ConuException("This container does not have a valid identifier.") self._inspect_data = self.d.inspect_container(ident) return self._inspect_data def is_running(self): try: return self.inspect(refresh=True)["State"]["Running"] except NotFound: return False def get_IPv4s(self): self.get_metadata() return self.metadata.ipv4_addresses def get_IPv6s(self): self.get_metadata() return self.metadata.ipv6_addresses def get_ports(self): ports = [] container_ports = self.inspect(refresh=True)["NetworkSettings"]["Ports"] if not container_ports: return ports for p in container_ports: ports.append(p.split("/")[0]) return ports def is_port_open(self, port, timeout=2): addresses = self.get_IPv4s() if not addresses: return False return check_port(port, host=addresses[0], timeout=timeout) def get_port_mappings(self, port=None): port_mappings = self.inspect(refresh=True)["NetworkSettings"]["Ports"] if not port: return port_mappings if str(port) not in self.get_ports(): return [] for p in port_mappings: if p.split("/")[0] == str(port): return port_mappings[p] def get_image_name(self): metadata = self.inspect() if "Config" in metadata: return metadata["Config"].get("Image", None) return None def wait_for_port(self, port, timeout=10, **probe_kwargs): Probe(timeout=timeout, fnc=functools.partial(self.is_port_open, port), **probe_kwargs).run() def copy_to(self, src, dest): logger.debug("copying %s from host to container at %s", src, dest) cmd = ["docker", "cp", src, "%s:%s" % (self.get_id(), dest)] run_cmd(cmd) def copy_from(self, src, dest): logger.debug("copying %s from host to container at %s", src, dest) cmd = ["docker", "cp", "%s:%s" % (self.get_id(), src), dest] run_cmd(cmd) def start(self): self.d.start(self.get_id())
MIT License
continuumio/pykit
pykit/transform/inline.py
assert_inlinable
python
def assert_inlinable(func, call, callee, uses): if not isinstance(callee, Function): return CompileError("Cannot inline external function: %s" % (callee,)) yields = findallops(callee, 'yield') if yields: for use in uses[call]: if use.opcode not in ('iter', 'next'): return CompileError( "Cannot inline generator with use %s" % (use,)) if len(uses[call]) != 2: return CompileError("Can only") loops = loop_detection.find_natural_loops(func)
Verify that a function call can be inlined. We can inline generators if they are consumed in a single loop: - iter(g) must be in a loop header - next(g) must be in the loop body :return: None if inlineable, or an exception with a message
https://github.com/continuumio/pykit/blob/1730d7b831e0cf12a641ac23b5cf03e17e0dc550/pykit/transform/inline.py#L63-L86
from pykit.error import CompileError from pykit.analysis import loop_detection from pykit.ir import Function, Builder, findallops, copy_function, verify from pykit.transform import ret as ret_normalization def rewrite_return(func): ret_normalization.run(func) [ret] = findallops(func, 'ret') [value] = ret.args ret.delete() return value def inline(func, call, uses=None): callee = call.args[0] builder = Builder(func) builder.position_before(call) inline_header, inline_exit = builder.splitblock() new_callee = copy_function(callee, temper=func.temp) result = rewrite_return(new_callee) for funcarg, arg in zip(new_callee.args, call.args[1]): funcarg.replace_uses(arg) after = inline_header for block in new_callee.blocks: block.parent = None func.add_block(block, after=after) after = block builder.jump(new_callee.startblock) with builder.at_end(new_callee.exitblock): builder.jump(inline_exit) if result is not None: result.unlink() result.result = call.result call.replace(result) else: call.delete() func.reset_uses() verify(func)
BSD 3-Clause New or Revised License
jbasko/configmanager
configmanager/item_types.py
_ItemType.includes
python
def includes(self, obj): if self.builtin_types: return isinstance(obj, self.builtin_types)
Returns: ``True`` if ``obj`` belongs to this type.
https://github.com/jbasko/configmanager/blob/6547224f11ae643aeaa6b141180a2149cd432032/configmanager/item_types.py#L23-L29
import six from builtins import str as text from .utils import not_set class _ItemType(object): aliases = () builtin_types = () def serialize(self, instance, **kwargs): return instance def deserialize(self, payload, **kwargs): if payload is None or payload is not_set: return payload if self.builtin_types: return self.builtin_types[0](payload) else: return payload
MIT License
nii-cloud/dodai-compute
nova/virt/xenapi/vm_utils.py
find_iso_sr
python
def find_iso_sr(session): host = session.get_xenapi_host() sr_refs = session.get_xenapi().SR.get_all() for sr_ref in sr_refs: sr_rec = session.get_xenapi().SR.get_record(sr_ref) LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals()) if not sr_rec['content_type'] == 'iso': LOG.debug(_("ISO: not iso content")) continue if not 'i18n-key' in sr_rec['other_config']: LOG.debug(_("ISO: iso content_type, no 'i18n-key' key")) continue if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso': LOG.debug(_("ISO: iso content_type, i18n-key value not " "'local-storage-iso'")) continue LOG.debug(_("ISO: SR MATCHing our criteria")) for pbd_ref in sr_rec['PBDs']: LOG.debug(_("ISO: ISO, looking to see if it is host local")) pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref) pbd_rec_host = pbd_rec['host'] LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, have %(host)s") % locals()) if pbd_rec_host == host: LOG.debug(_("ISO: SR with local PBD")) return sr_ref return None
Return the storage repository to hold ISO images
https://github.com/nii-cloud/dodai-compute/blob/d9bea632913c0ddc6f59c6120f60daea369d09cc/nova/virt/xenapi/vm_utils.py#L960-L989
import json import os import pickle import re import sys import tempfile import time import urllib import uuid from xml.dom import minidom from nova import db from nova import exception from nova import flags from nova.image import glance from nova import log as logging from nova import utils from nova.compute import instance_types from nova.compute import power_state from nova.virt import disk from nova.virt import images from nova.virt.xenapi import HelperBase from nova.virt.xenapi.volume_utils import StorageError LOG = logging.getLogger("nova.virt.xenapi.vm_utils") FLAGS = flags.FLAGS flags.DEFINE_string('default_os_type', 'linux', 'Default OS type') flags.DEFINE_integer('block_device_creation_timeout', 10, 'time to wait for a block device to be created') flags.DEFINE_integer('max_kernel_ramdisk_size', 16 * 1024 * 1024, 'maximum size in bytes of kernel or ramdisk images') XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, 'Paused': power_state.PAUSED, 'Suspended': power_state.SUSPENDED, 'Crashed': power_state.CRASHED} SECTOR_SIZE = 512 MBR_SIZE_SECTORS = 63 MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE KERNEL_DIR = '/boot/guest' class ImageType: KERNEL = 0 RAMDISK = 1 DISK = 2 DISK_RAW = 3 DISK_VHD = 4 DISK_ISO = 5 _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO) KERNEL_STR = "kernel" RAMDISK_STR = "ramdisk" DISK_STR = "os" DISK_RAW_STR = "os_raw" DISK_VHD_STR = "vhd" DISK_ISO_STR = "iso" _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR, DISK_ISO_STR) @classmethod def to_string(cls, image_type): return dict(zip(ImageType._ids, ImageType._strs)).get(image_type) @classmethod def from_string(cls, image_type_str): return dict(zip(ImageType._strs, ImageType._ids)).get(image_type_str) class VMHelper(HelperBase): @classmethod def create_vm(cls, session, instance, kernel, ramdisk, use_pv_kernel=False): inst_type_id = instance.instance_type_id instance_type = instance_types.get_instance_type(inst_type_id) mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) rec = { 'actions_after_crash': 'destroy', 'actions_after_reboot': 'restart', 'actions_after_shutdown': 'destroy', 'affinity': '', 'blocked_operations': {}, 'ha_always_run': False, 'ha_restart_priority': '', 'HVM_boot_params': {}, 'HVM_boot_policy': '', 'is_a_template': False, 'memory_dynamic_min': mem, 'memory_dynamic_max': mem, 'memory_static_min': '0', 'memory_static_max': mem, 'memory_target': mem, 'name_description': '', 'name_label': instance.name, 'other_config': {'allowvssprovider': False}, 'other_config': {}, 'PCI_bus': '', 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true', 'viridian': 'true', 'timeoffset': '0'}, 'PV_args': '', 'PV_bootloader': '', 'PV_bootloader_args': '', 'PV_kernel': '', 'PV_legacy_args': '', 'PV_ramdisk': '', 'recommendations': '', 'tags': [], 'user_version': '0', 'VCPUs_at_startup': vcpus, 'VCPUs_max': vcpus, 'VCPUs_params': {}, 'xenstore_data': {}} if use_pv_kernel: rec['platform']['nx'] = 'false' if instance.kernel_id: rec['PV_args'] = 'root=/dev/xvda1' rec['PV_kernel'] = kernel rec['PV_ramdisk'] = ramdisk else: rec['PV_bootloader'] = 'pygrub' else: rec['platform']['nx'] = 'true' rec['HVM_boot_params'] = {'order': 'dc'} rec['HVM_boot_policy'] = 'BIOS order' LOG.debug(_('Created VM %s...'), instance.name) vm_ref = session.call_xenapi('VM.create', rec) instance_name = instance.name LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals()) return vm_ref @classmethod def ensure_free_mem(cls, session, instance): inst_type_id = instance.instance_type_id instance_type = instance_types.get_instance_type(inst_type_id) mem = long(instance_type['memory_mb']) * 1024 * 1024 host = session.get_xenapi_host() host_free_mem = long(session.get_xenapi().host. compute_free_memory(host)) return host_free_mem >= mem @classmethod def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): vbd_rec = {} vbd_rec['VM'] = vm_ref vbd_rec['VDI'] = vdi_ref vbd_rec['userdevice'] = str(userdevice) vbd_rec['bootable'] = bootable vbd_rec['mode'] = 'RW' vbd_rec['type'] = 'disk' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] LOG.debug(_('Creating VBD for VM %(vm_ref)s,' ' VDI %(vdi_ref)s ... ') % locals()) vbd_ref = session.call_xenapi('VBD.create', vbd_rec) LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,' ' VDI %(vdi_ref)s.') % locals()) return vbd_ref @classmethod def create_cd_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): vbd_rec = {} vbd_rec['VM'] = vm_ref vbd_rec['VDI'] = vdi_ref vbd_rec['userdevice'] = str(userdevice) vbd_rec['bootable'] = bootable vbd_rec['mode'] = 'RO' vbd_rec['type'] = 'CD' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] LOG.debug(_('Creating a CDROM-specific VBD for VM %(vm_ref)s,' ' VDI %(vdi_ref)s ... ') % locals()) vbd_ref = session.call_xenapi('VBD.create', vbd_rec) LOG.debug(_('Created a CDROM-specific VBD %(vbd_ref)s ' ' for VM %(vm_ref)s, VDI %(vdi_ref)s.') % locals()) return vbd_ref @classmethod def find_vbd_by_number(cls, session, vm_ref, number): vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref) if vbd_refs: for vbd_ref in vbd_refs: try: vbd_rec = session.get_xenapi().VBD.get_record(vbd_ref) if vbd_rec['userdevice'] == str(number): return vbd_ref except cls.XenAPI.Failure, exc: LOG.exception(exc) raise StorageError(_('VBD not found in instance %s') % vm_ref) @classmethod def unplug_vbd(cls, session, vbd_ref): try: vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref) except cls.XenAPI.Failure, exc: LOG.exception(exc) if exc.details[0] != 'DEVICE_ALREADY_DETACHED': raise StorageError(_('Unable to unplug VBD %s') % vbd_ref) @classmethod def destroy_vbd(cls, session, vbd_ref): try: task = session.call_xenapi('Async.VBD.destroy', vbd_ref) session.wait_for_task(task) except cls.XenAPI.Failure, exc: LOG.exception(exc) raise StorageError(_('Unable to destroy VBD %s') % vbd_ref) @classmethod def destroy_vdi(cls, session, vdi_ref): try: task = session.call_xenapi('Async.VDI.destroy', vdi_ref) session.wait_for_task(task) except cls.XenAPI.Failure, exc: LOG.exception(exc) raise StorageError(_('Unable to destroy VDI %s') % vdi_ref) @classmethod def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): vdi_ref = session.get_xenapi().VDI.create( {'name_label': name_label, 'name_description': '', 'SR': sr_ref, 'virtual_size': str(virtual_size), 'type': 'User', 'sharable': False, 'read_only': read_only, 'xenstore_data': {}, 'other_config': {}, 'sm_config': {}, 'tags': []}) LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,' ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.') % locals()) return vdi_ref @classmethod def get_vdi_for_vm_safely(cls, session, vm_ref): vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref) for vbd in vbd_refs: vbd_rec = session.get_xenapi().VBD.get_record(vbd) if vbd_rec['userdevice'] == '0': vdi_rec = session.get_xenapi().VDI.get_record(vbd_rec['VDI']) return vbd_rec['VDI'], vdi_rec raise exception.Error(_("No primary VDI found for" "%(vm_ref)s") % locals()) @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...") % locals()) vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref) sr_ref = vm_vdi_rec["SR"] original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref) task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) template_vm_ref = session.wait_for_task(task, instance_id) template_vdi_rec = cls.get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] LOG.debug(_('Created snapshot %(template_vm_ref)s from' ' VM %(vm_ref)s.') % locals()) parent_uuid = wait_for_vhd_coalesce( session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) template_vdi_uuids = {'image': parent_uuid, 'snap': template_vdi_uuid} return template_vm_ref, template_vdi_uuids @classmethod def get_sr_path(cls, session): sr_ref = safe_find_sr(session) sr_rec = session.get_xenapi().SR.get_record(sr_ref) sr_uuid = sr_rec["uuid"] return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) @classmethod def upload_image(cls, context, session, instance, vdi_uuids, image_id): logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) os_type = instance.os_type or FLAGS.default_os_type glance_host, glance_port = glance.pick_glance_api_server() params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': glance_host, 'glance_port': glance_port, 'sr_path': cls.get_sr_path(session), 'os_type': os_type, 'auth_token': getattr(context, 'auth_token', None)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) session.wait_for_task(task, instance.id) @classmethod def fetch_blank_disk(cls, session, instance_type_id): one_gig = 1024 * 1024 * 1024 req_type = instance_types.get_instance_type(instance_type_id) req_size = req_type['local_gb'] LOG.debug("Creating blank HD of size %(req_size)d gigs" % locals()) vdi_size = one_gig * req_size LOG.debug("ISO vm create: Looking for the SR") sr_ref = safe_find_sr(session) vdi_ref = cls.create_vdi(session, sr_ref, 'blank HD', vdi_size, False) return vdi_ref @classmethod def fetch_image(cls, context, session, instance, image, user_id, project_id, image_type): if image_type == ImageType.DISK_VHD: return cls._fetch_image_glance_vhd(context, session, instance, image, image_type) else: return cls._fetch_image_glance_disk(context, session, instance, image, image_type) @classmethod def _fetch_image_glance_vhd(cls, context, session, instance, image, image_type): instance_id = instance.id LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) sr_ref = safe_find_sr(session) uuid_stack = [str(uuid.uuid4()) for i in xrange(2)] glance_host, glance_port = glance.pick_glance_api_server() params = {'image_id': image, 'glance_host': glance_host, 'glance_port': glance_port, 'uuid_stack': uuid_stack, 'sr_path': cls.get_sr_path(session), 'auth_token': getattr(context, 'auth_token', None)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) result = session.wait_for_task(task, instance_id) vdis = json.loads(result) for vdi in vdis: LOG.debug(_("xapi 'download_vhd' returned VDI of " "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'" % vdi)) cls.scan_sr(session, instance_id, sr_ref) os_vdi_uuid = vdis[0]['vdi_uuid'] vdi_ref = session.get_xenapi().VDI.get_by_uuid(os_vdi_uuid) primary_name_label = get_name_label_for_image(image) session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label) cls._check_vdi_size(context, session, instance, os_vdi_uuid) return vdis @classmethod def _get_vdi_chain_size(cls, context, session, vdi_uuid): size_bytes = 0 for vdi_rec in walk_vdi_chain(session, vdi_uuid): cur_vdi_uuid = vdi_rec['uuid'] vdi_size_bytes = int(vdi_rec['physical_utilisation']) LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=' '%(vdi_size_bytes)d' % locals())) size_bytes += vdi_size_bytes return size_bytes @classmethod def _check_vdi_size(cls, context, session, instance, vdi_uuid): size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid) instance_type_id = instance['instance_type_id'] instance_type = db.instance_type_get(context, instance_type_id) allowed_size_gb = instance_type['local_gb'] allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes=" "%(allowed_size_bytes)d") % locals()) if size_bytes > allowed_size_bytes: LOG.info(_("Image size %(size_bytes)d exceeded" " instance_type allowed size " "%(allowed_size_bytes)d") % locals()) raise exception.ImageTooLarge() @classmethod def _fetch_image_glance_disk(cls, context, session, instance, image, image_type): instance_id = instance.id LOG.debug(_("Fetching image %(image)s") % locals()) LOG.debug(_("Image Type: %s"), ImageType.to_string(image_type)) if image_type == ImageType.DISK_ISO: sr_ref = safe_find_iso_sr(session) LOG.debug(_("ISO: Found sr possibly containing the ISO image")) else: sr_ref = safe_find_sr(session) glance_client, image_id = glance.get_glance_client(context, image) glance_client.set_auth_token(getattr(context, 'auth_token', None)) meta, image_file = glance_client.get_image(image_id) virtual_size = int(meta['size']) vdi_size = virtual_size LOG.debug(_("Size for image %(image)s:" + "%(virtual_size)d") % locals()) if image_type == ImageType.DISK: vdi_size += MBR_SIZE_BYTES elif image_type in (ImageType.KERNEL, ImageType.RAMDISK) and vdi_size > FLAGS.max_kernel_ramdisk_size: max_size = FLAGS.max_kernel_ramdisk_size raise exception.Error( _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, " "max %(max_size)d bytes") % locals()) name_label = get_name_label_for_image(image) vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) try: filename = None vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) with_vdi_attached_here(session, vdi_ref, False, lambda dev: _stream_disk(dev, image_type, virtual_size, image_file)) if image_type in (ImageType.KERNEL, ImageType.RAMDISK): LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref) fn = "copy_kernel_vdi" args = {} args['vdi-ref'] = vdi_ref args['image-size'] = str(vdi_size) task = session.async_call_plugin('glance', fn, args) filename = session.wait_for_task(task, instance_id) session.get_xenapi().VDI.destroy(vdi_ref) LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) return [dict(vdi_type=ImageType.to_string(image_type), vdi_uuid=None, file=filename)] else: return [dict(vdi_type=ImageType.to_string(image_type), vdi_uuid=vdi_uuid, file=None)] except (cls.XenAPI.Failure, IOError, OSError) as e: LOG.exception(_("instance %s: Failed to fetch glance image"), instance_id, exc_info=sys.exc_info()) e.args = e.args + ([dict(vdi_type=ImageType. to_string(image_type), vdi_uuid=vdi_uuid, file=filename)],) raise e @classmethod def determine_disk_image_type(cls, instance, context): def log_disk_format(image_type): pretty_format = {ImageType.KERNEL: 'KERNEL', ImageType.RAMDISK: 'RAMDISK', ImageType.DISK: 'DISK', ImageType.DISK_RAW: 'DISK_RAW', ImageType.DISK_VHD: 'DISK_VHD', ImageType.DISK_ISO: 'DISK_ISO'} disk_format = pretty_format[image_type] image_ref = instance.image_ref instance_id = instance.id LOG.debug(_("Detected %(disk_format)s format for image " "%(image_ref)s, instance %(instance_id)s") % locals()) def determine_from_glance(): glance_disk_format2nova_type = { 'ami': ImageType.DISK, 'aki': ImageType.KERNEL, 'ari': ImageType.RAMDISK, 'raw': ImageType.DISK_RAW, 'vhd': ImageType.DISK_VHD, 'iso': ImageType.DISK_ISO} image_ref = instance.image_ref glance_client, image_id = glance.get_glance_client(context, image_ref) meta = glance_client.get_image_meta(image_id) disk_format = meta['disk_format'] try: return glance_disk_format2nova_type[disk_format] except KeyError: raise exception.InvalidDiskFormat(disk_format=disk_format) def determine_from_instance(): if instance.kernel_id: return ImageType.DISK else: return ImageType.DISK_RAW image_type = determine_from_glance() log_disk_format(image_type) return image_type @classmethod def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, os_type): LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) if disk_image_type == ImageType.DISK_VHD: if os_type == 'windows': is_pv = False else: is_pv = True elif disk_image_type == ImageType.DISK_RAW: is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv) elif disk_image_type == ImageType.DISK: is_pv = True elif disk_image_type == ImageType.DISK_ISO: is_pv = False else: raise exception.Error(_("Unknown image format %(disk_image_type)s") % locals()) return is_pv @classmethod def lookup(cls, session, name_label): vm_refs = session.get_xenapi().VM.get_by_name_label(name_label) n = len(vm_refs) if n == 0: return None elif n > 1: raise exception.InstanceExists(name=name_label) else: return vm_refs[0] @classmethod def lookup_vm_vdis(cls, session, vm_ref): vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref) vdi_refs = [] if vbd_refs: for vbd_ref in vbd_refs: try: vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref) record = session.get_xenapi().VDI.get_record(vdi_ref) LOG.debug(_('VDI %s is still available'), record['uuid']) except cls.XenAPI.Failure, exc: LOG.exception(exc) else: vdi_refs.append(vdi_ref) if len(vdi_refs) > 0: return vdi_refs else: return None @classmethod def preconfigure_instance(cls, session, instance, vdi_ref, network_info): mount_required = False key, net, metadata = _prepare_injectables(instance, network_info) mount_required = key or net or metadata if not mount_required: return with_vdi_attached_here(session, vdi_ref, False, lambda dev: _mounted_processing(dev, key, net, metadata)) @classmethod def lookup_kernel_ramdisk(cls, session, vm): vm_rec = session.get_xenapi().VM.get_record(vm) if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec: return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk']) else: return (None, None) @classmethod def compile_info(cls, record): LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"), record['power_state']) LOG.info(_("(VM_UTILS) xenapi power_state -> |%s|"), XENAPI_POWER_STATE[record['power_state']]) return {'state': XENAPI_POWER_STATE[record['power_state']], 'max_mem': long(record['memory_static_max']) >> 10, 'mem': long(record['memory_dynamic_max']) >> 10, 'num_cpu': record['VCPUs_max'], 'cpu_time': 0} @classmethod def compile_diagnostics(cls, session, record): try: host = session.get_xenapi_host() host_ip = session.get_xenapi().host.get_record(host)["address"] except (cls.XenAPI.Failure, KeyError) as e: return {"Unable to retrieve diagnostics": e} try: diags = {} xml = get_rrd(host_ip, record["uuid"]) if xml: rrd = minidom.parseString(xml) for i, node in enumerate(rrd.firstChild.childNodes): if i >= 3 and i <= 11: ref = node.childNodes if len(ref) > 6: diags[ref[0].firstChild.data] = ref[6].firstChild.data return diags except cls.XenAPI.Failure as e: return {"Unable to retrieve diagnostics": e} @classmethod def scan_sr(cls, session, instance_id=None, sr_ref=None): if sr_ref: LOG.debug(_("Re-scanning SR %s"), sr_ref) task = session.call_xenapi('Async.SR.scan', sr_ref) session.wait_for_task(task, instance_id) @classmethod def scan_default_sr(cls, session): sr_ref = find_sr(session) session.call_xenapi('SR.scan', sr_ref) def get_rrd(host, vm_uuid): try: xml = urllib.urlopen("http://%s:%s@%s/vm_rrd?uuid=%s" % ( FLAGS.xenapi_connection_username, FLAGS.xenapi_connection_password, host, vm_uuid)) return xml.read() except IOError: return None def get_vhd_parent(session, vdi_rec): if 'vhd-parent' in vdi_rec['sm_config']: parent_uuid = vdi_rec['sm_config']['vhd-parent'] parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid) parent_rec = session.get_xenapi().VDI.get_record(parent_ref) vdi_uuid = vdi_rec['uuid'] LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_ref)s") % locals()) return parent_ref, parent_rec else: return None def get_vhd_parent_uuid(session, vdi_ref): vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) ret = get_vhd_parent(session, vdi_rec) if ret: parent_ref, parent_rec = ret return parent_rec["uuid"] else: return None def walk_vdi_chain(session, vdi_uuid): while True: vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) yield vdi_rec parent_uuid = vdi_rec['sm_config'].get('vhd-parent') if parent_uuid: vdi_uuid = parent_uuid else: break def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, original_parent_uuid): max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts attempts = {'counter': 0} def _poll_vhds(): attempts['counter'] += 1 if attempts['counter'] > max_attempts: counter = attempts['counter'] msg = (_("VHD coalesce attempts exceeded (%(counter)d >" " %(max_attempts)d), giving up...") % locals()) raise exception.Error(msg) VMHelper.scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent" " %(original_parent_uuid)s, waiting for coalesce...") % locals()) else: raise utils.LoopingCallDone(parent_uuid) loop = utils.LoopingCall(_poll_vhds) loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True) parent_uuid = loop.wait() return parent_uuid def get_vdi_for_vm_safely(session, vm_ref): vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) if vdi_refs is None: raise Exception(_("No VDIs found for VM %s") % vm_ref) else: num_vdis = len(vdi_refs) if num_vdis != 1: raise exception.Error(_("Unexpected number of VDIs" "(%(num_vdis)s) found" " for VM %(vm_ref)s") % locals()) vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) return vdi_ref, vdi_rec def safe_find_sr(session): sr_ref = find_sr(session) if sr_ref is None: raise exception.StorageRepositoryNotFound() return sr_ref def find_sr(session): host = session.get_xenapi_host() sr_refs = session.get_xenapi().SR.get_all() for sr_ref in sr_refs: sr_rec = session.get_xenapi().SR.get_record(sr_ref) if not ('i18n-key' in sr_rec['other_config'] and sr_rec['other_config']['i18n-key'] == 'local-storage'): continue for pbd_ref in sr_rec['PBDs']: pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref) if pbd_rec['host'] == host: return sr_ref return None def safe_find_iso_sr(session): sr_ref = find_iso_sr(session) if sr_ref is None: raise exception.NotFound(_('Cannot find SR of content-type ISO')) return sr_ref
Apache License 2.0
yuchdev/code_generator
cpp_generator.py
CppLanguageElement.render_to_string
python
def render_to_string(self, cpp): raise NotImplementedError('CppLanguageElement is an abstract class')
@param: cpp - handle that supports code generation interface (see code_generator.py) Typically it is passed to all child elements so that render their content
https://github.com/yuchdev/code_generator/blob/f6391b53f83a99b24e76277f6108eb5fe176699e/cpp_generator.py#L139-L144
__doc__ = """The module encapsutates C++ code generation logics for main C++ language primitives: classes, methods and functions, variables, enums. Every C++ element could render its current state to a string that could be evaluated as a legal C++ construction. Some elements could be rendered to a pair of representations (i.e. declaration and definition) Example: # Python code cpp_class = CppClass(name = 'MyClass', is_struct = True) cpp_class.add_variable(CppVariable(name = "m_var", type = 'size_t', is_static = True, is_const = True, initialization_value = 255)) // Generated C++ declaration struct MyClass { static const size_t m_var; } // Generated C++ definition const size_t MyClass::m_var = 255; That module uses and highly depends on code_generator.py as it uses code generating and formatting primitives implemented there. The main object referenced from code_generator.py is CppFile, which is passed as a parameter to render_to_string(cpp) Python method It could also be used for composing more complicated C++ code, that does not supported by cpp_generator It support: - functional calls: cpp('int a = 10;') - 'with' semantic: with cpp.block('class MyClass', ';') class_definition(cpp) - append code to the last string without EOL: cpp.append(', p = NULL);') - empty lines: cpp.newline(2) For detailed information see code_generator.py documentation. """ from textwrap import dedent class CppDeclaration(object): def __init__(self, cpp_element): self.cpp_element = cpp_element def render_to_string(self, cpp): self.cpp_element.render_to_string_declaration(cpp) class CppImplementation(object): def __init__(self, cpp_element): self.cpp_element = cpp_element def render_to_string(self, cpp): self.cpp_element.render_to_string_implementation(cpp) class CppLanguageElement(object): availablePropertiesNames = {'name', 'ref_to_parent'} def __init__(self, properties): self.name = properties.get('name') self.ref_to_parent = properties.get('ref_to_parent') def check_input_properties_names(self, input_property_names): unknown_properties = input_property_names.difference(self.availablePropertiesNames) if unknown_properties: raise AttributeError( f'Error: try to initialize {self.__class__.__name__} with unknown property: {repr(unknown_properties)}') def init_class_properties(self, current_class_properties, input_properties_dict, default_property_value=None): for propertyName in current_class_properties: if propertyName not in CppLanguageElement.availablePropertiesNames: setattr(self, propertyName, default_property_value) for (propertyName, propertyValue) in input_properties_dict.items(): if propertyName not in CppLanguageElement.availablePropertiesNames: setattr(self, propertyName, propertyValue)
MIT License
andela/troupon
troupon/deals/tests/test_views.py
CreateDeal.create_deal
python
def create_deal(self): merchant = self.create_merchant() price = 5000 original_price = 6000 currency = 1 country = 2 location = 84 quorum = 0 disclaimer = '' description = 'Holiday for two to the luxurious Masai Mara.' title = 'Masai Mara Holiday' address = 'Masai Mara' max_quantity_available = 20 active = True advertiser_id = merchant.advertiser_ptr.id date_end = "2020-09-09" category = Category.objects.create(name="Travel N Hotels", slug="masai-mara-holiday") advertiser = Advertiser.objects.get(id=advertiser_id) deal = Deal( price=price, original_price=original_price, currency=currency, country=country, location=location, category=category, quorum=quorum, disclaimer=disclaimer, description=description, address=address, max_quantity_available=max_quantity_available, date_end=date_end, active=active, title=title, advertiser=advertiser, duration=20 ) deal.save()
Create the test deal
https://github.com/andela/troupon/blob/3704cbe6e69ba3e4c53401d3bbc339208e9ebccd/troupon/deals/tests/test_views.py#L116-L147
import unittest from django.contrib.auth.models import User from django.test import LiveServerTestCase from selenium import webdriver from accounts.models import UserProfile from deals.models import Advertiser, Category, Deal from merchant.models import Merchant TEST_USER_EMAIL = 'testuser@myemail.com' TEST_USER_PASSWORD = 'testpassword' TEST_SEARCH_TERM = "Holiday" TEST_SEARCH_LOCATION = "Nairobi" xpath_search_term = "//div[@class='custom-input-group']/input[@id='search']" xpath_search_location = "//div[@class='custom-input-group']/select" xpath_search_button = "//button[@class='btn-action']" xpath_search_results_title = "//h1[@class='title']" xpath_search_results_desc = "//p[@class='description']" xpath_search_results_deal = "//div[@class='packery-grid deal-grid']" "/div[@class='grid-item card']/form[@class='overlay row']" xpath_deals_page_title = "//h1[@class='title']" xpath_deals_first_deal = "//div[@class='grid-item card'][1]/form[@class='overlay row']" xpath_more_details_button = "//div[@class='grid-item card'][1]" "/form[@class='overlay row']/div[@class='row']/div/a[@class='btn-action']" xpath_deal_specs = "//div[@class='deals-specs']" class HomepageViewTests(LiveServerTestCase): @classmethod def setUpClass(cls): cls.driver = webdriver.Chrome() super(HomepageViewTests, cls).setUpClass() def setUp(self,): self.driver = HomepageViewTests.driver super(HomepageViewTests, self).setUp() def test_title(self,): self.driver.get(self.live_server_url + '/') self.assertIn("Troupon - Get Some", self.driver.title) def test_can_subscribe(self,): self.driver.get("%s" % (self.live_server_url)) self.assertTrue("driver.find_element_by_id('subscriberEmail')") def test_about_us_present(self,): self.driver.get(self.live_server_url + '/') body = self.driver.find_element_by_tag_name('body') self.assertIn("About", body.text) def tearDown(self,): super(HomepageViewTests, self).tearDown() @classmethod def tearDownClass(cls): cls.driver.quit() super(HomepageViewTests, cls).tearDownClass() class CreateDeal(object): def create_user(self): User.objects.create_user(username='mytestuser', email=TEST_USER_EMAIL, password=TEST_USER_PASSWORD) def login_user(self): self.driver.get( '%s%s' % (self.live_server_url, "/login/") ) self.driver.find_element_by_id('email').send_keys(TEST_USER_EMAIL) self.driver.find_element_by_id( 'password').send_keys(TEST_USER_PASSWORD) self.driver.find_element_by_id("loginBtn").click() def create_user_profile(self): user_object = User.objects.all()[:1].get() user_profile = UserProfile.objects.create( user=user_object, occupation='Travel Agent', intlnumber='0705123456') return user_profile def create_merchant(self): merchant = Merchant.objects.create( userprofile=self.create_user_profile(), intlnumber='0705123456', enabled=True, approved=True, trusted=True) return merchant
MIT License
romansalin/django-seo2
djangoseo/backends.py
MetadataBaseModel._resolve_template
python
def _resolve_template(value, model_instance=None, context=None): if isinstance(value, string_types) and "{" in value: if context is None: context = Context() if model_instance is not None: context[model_instance._meta.model_name] = model_instance value = Template(value).render(context) return value
Resolves any template references in the given value.
https://github.com/romansalin/django-seo2/blob/f788699a88e286ab9a698759d9b42f57852865d8/djangoseo/backends.py#L75-L83
from __future__ import unicode_literals import collections from django.utils.translation import ugettext_lazy as _ from django.db.utils import IntegrityError from django.conf import settings from django.db import models from django.contrib.sites.models import Site from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.fields import GenericForeignKey from django.template import Template, Context from django.utils.encoding import python_2_unicode_compatible from six import string_types, with_metaclass from djangoseo.utils import resolve_to_name, NotSet, Literal RESERVED_FIELD_NAMES = ('_metadata', '_path', '_content_type', '_object_id', '_content_object', '_view', '_site', 'objects', '_resolve_value', '_set_context', 'id', 'pk') backend_registry = collections.OrderedDict() class MetadataBaseModel(models.Model): class Meta: abstract = True def __init__(self, *args, **kwargs): super(MetadataBaseModel, self).__init__(*args, **kwargs) self._metadata = self.__class__._metadata() def _resolve_value(self, name): name = str(name) if name in self._metadata._meta.elements: element = self._metadata._meta.elements[name] if element.editable: value = getattr(self, name) if value: return value populate_from = element.populate_from if isinstance(populate_from, collections.Callable): return populate_from(self, **self._populate_from_kwargs()) elif isinstance(populate_from, Literal): return populate_from.value elif populate_from is not NotSet: return self._resolve_value(populate_from) try: value = getattr(self._metadata, name) except AttributeError: pass else: if isinstance(value, collections.Callable): if getattr(value, '__self__', None): return value(self) else: return value(self._metadata, obj=self) return value def _populate_from_kwargs(self): return {} @staticmethod
MIT License
interpretml/interpret-community
python/interpret_community/mimic/mimic_explainer.py
MimicExplainer._get_teacher_model_predictions
python
def _get_teacher_model_predictions(self, evaluation_examples): transformed_evaluation_examples = self._get_transformed_data(evaluation_examples) return self.model.predict(transformed_evaluation_examples)
Return the predictions given by the teacher model. :param evaluation_examples: A matrix of feature vector examples (# examples x # features) on which to explain the model's output. If specified, computes feature importance through aggregation. :type evaluation_examples: numpy.array or pandas.DataFrame or scipy.sparse.csr_matrix :return: predictions of the surrogate model. :rtype: numpy.array
https://github.com/interpretml/interpret-community/blob/c8ce475a8224dd401bfa5358dbad1fd6b224a695/python/interpret_community/mimic/mimic_explainer.py#L355-L365
import numpy as np from scipy.sparse import issparse from sklearn.metrics import accuracy_score, r2_score from ..common.explanation_utils import _order_imp from ..common.exception import ScenarioNotSupportedException from ..common.model_wrapper import _wrap_model from .._internal.raw_explain.raw_explain_utils import get_datamapper_and_transformed_data, transform_with_datamapper from ..common.blackbox_explainer import BlackBoxExplainer from .model_distill import _model_distill, _inverse_soft_logit from .models import LGBMExplainableModel from ..explanation.explanation import _create_local_explanation, _create_global_explanation, _aggregate_global_from_local_explanation, _aggregate_streamed_local_explanations, _create_raw_feats_global_explanation, _create_raw_feats_local_explanation, _get_raw_explainer_create_explanation_kwargs from ..dataset.decorator import tabular_decorator, init_tabular_decorator from ..dataset.dataset_wrapper import DatasetWrapper from ..common.constants import ExplainParams, ExplainType, ModelTask, ShapValuesOutput, MimicSerializationConstants, ExplainableModelType, LightGBMParams, Defaults, Extension, ResetIndex import logging import json import warnings with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'Starting from version 2.2.1', UserWarning) class MimicExplainer(BlackBoxExplainer): available_explanations = [Extension.GLOBAL, Extension.LOCAL] explainer_type = Extension.BLACKBOX """The Mimic Explainer for explaining black box models or functions. :param model: The black box model or function (if is_function is True) to be explained. Also known as the teacher model. A model that implements sklearn.predict or sklearn.predict_proba or function that accepts a 2d ndarray. :type model: object :param initialization_examples: A matrix of feature vector examples (# examples x # features) for initializing the explainer. :type initialization_examples: numpy.array or pandas.DataFrame or scipy.sparse.csr_matrix :param explainable_model: The uninitialized surrogate model used to explain the black box model. Also known as the student model. :type explainable_model: interpret_community.mimic.models.BaseExplainableModel :param explainable_model_args: An optional map of arguments to pass to the explainable model for initialization. :type explainable_model_args: dict :param is_function: Default is False. Set to True if passing function instead of model. :type is_function: bool :param augment_data: If True, oversamples the initialization examples to improve surrogate model accuracy to fit teacher model. Useful for high-dimensional data where the number of rows is less than the number of columns. :type augment_data: bool :param max_num_of_augmentations: Maximum number of times we can increase the input data size. :type max_num_of_augmentations: int :param explain_subset: List of feature indices. If specified, only selects a subset of the features in the evaluation dataset for explanation. Note for mimic explainer this will not affect the execution time of getting the global explanation. This argument is not supported when transformations are set. :type explain_subset: list[int] :param features: A list of feature names. :type features: list[str] :param classes: Class names as a list of strings. The order of the class names should match that of the model output. Only required if explaining classifier. :type classes: list[str] :param transformations: sklearn.compose.ColumnTransformer or a list of tuples describing the column name and transformer. When transformations are provided, explanations are of the features before the transformation. The format for a list of transformations is same as the one here: https://github.com/scikit-learn-contrib/sklearn-pandas. If you are using a transformation that is not in the list of sklearn.preprocessing transformations that are supported by the `interpret-community <https://github.com/interpretml/interpret-community>`_ package, then this parameter cannot take a list of more than one column as input for the transformation. You can use the following sklearn.preprocessing transformations with a list of columns since these are already one to many or one to one: Binarizer, KBinsDiscretizer, KernelCenterer, LabelEncoder, MaxAbsScaler, MinMaxScaler, Normalizer, OneHotEncoder, OrdinalEncoder, PowerTransformer, QuantileTransformer, RobustScaler, StandardScaler. Examples for transformations that work:: [ (["col1", "col2"], sklearn_one_hot_encoder), (["col3"], None) #col3 passes as is ] [ (["col1"], my_own_transformer), (["col2"], my_own_transformer), ] An example of a transformation that would raise an error since it cannot be interpreted as one to many:: [ (["col1", "col2"], my_own_transformer) ] The last example would not work since the interpret-community package can't determine whether my_own_transformer gives a many to many or one to many mapping when taking a sequence of columns. :type transformations: sklearn.compose.ColumnTransformer or list[tuple] :param shap_values_output: The shap values output from the explainer. Only applies to tree-based models that are in terms of raw feature values instead of probabilities. Can be default, probability or teacher_probability. If probability or teacher_probability are specified, we approximate the feature importance values as probabilities instead of using the default values. If teacher probability is specified, we use the probabilities from the teacher model as opposed to the surrogate model. :type shap_values_output: interpret_community.common.constants.ShapValuesOutput :param categorical_features: Categorical feature names or indexes. If names are passed, they will be converted into indexes first. Note if pandas indexes are categorical, you can either pass the name of the index or the index as if the pandas index was inserted at the end of the input dataframe. :type categorical_features: Union[list[str], list[int]] :param allow_all_transformations: Allow many to many and many to one transformations :type allow_all_transformations: bool :param model_task: Optional parameter to specify whether the model is a classification or regression model. In most cases, the type of the model can be inferred based on the shape of the output, where a classifier has a predict_proba method and outputs a 2 dimensional array, while a regressor has a predict method and outputs a 1 dimensional array. :type model_task: str :param reset_index: Uses the pandas DataFrame index column as part of the features when training the surrogate model. :type reset_index: str """ @init_tabular_decorator def __init__(self, model, initialization_examples, explainable_model, explainable_model_args=None, is_function=False, augment_data=True, max_num_of_augmentations=10, explain_subset=None, features=None, classes=None, transformations=None, allow_all_transformations=False, shap_values_output=ShapValuesOutput.DEFAULT, categorical_features=None, model_task=ModelTask.Unknown, reset_index=ResetIndex.Ignore, **kwargs): if transformations is not None and explain_subset is not None: raise ValueError("explain_subset not supported with transformations") self.reset_index = reset_index self._datamapper = None if transformations is not None: self._datamapper, new_initialization_examples = get_datamapper_and_transformed_data( examples=initialization_examples, transformations=transformations, allow_all_transformations=allow_all_transformations) initialization_examples._clear() initialization_examples = new_initialization_examples if reset_index != ResetIndex.Ignore: initialization_examples.reset_index() wrapped_model, eval_ml_domain = _wrap_model(model, initialization_examples, model_task, is_function) super(MimicExplainer, self).__init__(wrapped_model, is_function=is_function, model_task=eval_ml_domain, **kwargs) if explainable_model_args is None: explainable_model_args = {} if categorical_features is None: categorical_features = [] self._logger.debug('Initializing MimicExplainer') self._init_features = initialization_examples.get_features(features=features) self.features = features if augment_data: initialization_examples.augment_data(max_num_of_augmentations=max_num_of_augmentations) original_training_data = initialization_examples.typed_dataset else: original_training_data = initialization_examples.original_dataset_with_type if reset_index == ResetIndex.ResetTeacher: initialization_examples.set_index() if not all(isinstance(categorical_feature, int) for categorical_feature in categorical_features): categorical_features = initialization_examples.get_column_indexes(self._init_features, categorical_features) self._timestamp_featurizer = initialization_examples.timestamp_featurizer() is_tree_model = explainable_model.explainable_model_type() == ExplainableModelType.TREE_EXPLAINABLE_MODEL_TYPE if is_tree_model and self._supports_categoricals(explainable_model): self._column_indexer = initialization_examples.string_index(columns=categorical_features) self._one_hot_encoder = None if categorical_features: explainable_model_args[LightGBMParams.CATEGORICAL_FEATURE] = categorical_features else: self._column_indexer = initialization_examples.string_index(columns=categorical_features) self._one_hot_encoder = initialization_examples.one_hot_encode(columns=categorical_features) self.classes = classes self.explain_subset = explain_subset self.transformations = transformations self._shap_values_output = shap_values_output training_data = initialization_examples.dataset self.initialization_examples = initialization_examples if str(type(training_data)).endswith(".DenseData'>"): training_data = training_data.data explainable_model_args[ExplainParams.CLASSIFICATION] = self.predict_proba_flag if self._supports_shap_values_output(explainable_model): explainable_model_args[ExplainParams.SHAP_VALUES_OUTPUT] = shap_values_output self.surrogate_model = _model_distill(self.function, explainable_model, training_data, original_training_data, explainable_model_args) self._method = self.surrogate_model._method self._original_eval_examples = None self._allow_all_transformations = allow_all_transformations def _get_transformed_data(self, evaluation_examples): if self.transformations is not None: _, transformed_evaluation_examples = get_datamapper_and_transformed_data( examples=evaluation_examples, transformations=self.transformations, allow_all_transformations=self._allow_all_transformations) else: transformed_evaluation_examples = evaluation_examples return transformed_evaluation_examples def _get_surrogate_model_predictions(self, evaluation_examples): transformed_evaluation_examples = self._get_transformed_data(evaluation_examples) if self.classes is not None and len(self.classes) == 2: index_predictions = _inverse_soft_logit(self.surrogate_model.predict(transformed_evaluation_examples)) actual_predictions = [] for index in index_predictions: actual_predictions.append(self.classes[index]) return np.array(actual_predictions) else: return self.surrogate_model.predict(transformed_evaluation_examples)
MIT License
openkmip/pykmip
kmip/core/messages/payloads/encrypt.py
EncryptResponsePayload.read
python
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(EncryptResponsePayload, self).read( input_stream, kmip_version=kmip_version ) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString( tag=enums.Tags.UNIQUE_IDENTIFIER ) self._unique_identifier.read( local_stream, kmip_version=kmip_version ) else: raise ValueError( "invalid payload missing the unique identifier attribute" ) if self.is_tag_next(enums.Tags.DATA, local_stream): self._data = primitives.ByteString(tag=enums.Tags.DATA) self._data.read( local_stream, kmip_version=kmip_version ) else: raise ValueError("invalid payload missing the data attribute") if self.is_tag_next(enums.Tags.IV_COUNTER_NONCE, local_stream): self._iv_counter_nonce = primitives.ByteString( tag=enums.Tags.IV_COUNTER_NONCE ) self._iv_counter_nonce.read( local_stream, kmip_version=kmip_version ) if kmip_version >= enums.KMIPVersion.KMIP_1_4: if self.is_tag_next( enums.Tags.AUTHENTICATED_ENCRYPTION_TAG, local_stream ): self._auth_tag = primitives.ByteString( tag=enums.Tags.AUTHENTICATED_ENCRYPTION_TAG ) self._auth_tag.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream)
Read the data encoding the Encrypt response payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the unique_identifier or data attributes are missing from the encoded payload.
https://github.com/openkmip/pykmip/blob/c0c980395660ea1b1a8009e97f17ab32d1100233/kmip/core/messages/payloads/encrypt.py#L474-L540
import six from kmip.core import attributes from kmip.core import enums from kmip.core import primitives from kmip.core import utils from kmip.core.messages.payloads import base class EncryptRequestPayload(base.RequestPayload): def __init__(self, unique_identifier=None, cryptographic_parameters=None, data=None, iv_counter_nonce=None, auth_additional_data=None): super(EncryptRequestPayload, self).__init__() self._unique_identifier = None self._cryptographic_parameters = None self._data = None self._iv_counter_nonce = None self._auth_additional_data = None self.unique_identifier = unique_identifier self.cryptographic_parameters = cryptographic_parameters self.data = data self.iv_counter_nonce = iv_counter_nonce self.auth_additional_data = auth_additional_data @property def unique_identifier(self): if self._unique_identifier: return self._unique_identifier.value else: return None @unique_identifier.setter def unique_identifier(self, value): if value is None: self._unique_identifier = None elif isinstance(value, six.string_types): self._unique_identifier = primitives.TextString( value=value, tag=enums.Tags.UNIQUE_IDENTIFIER ) else: raise TypeError("unique identifier must be a string") @property def cryptographic_parameters(self): return self._cryptographic_parameters @cryptographic_parameters.setter def cryptographic_parameters(self, value): if value is None: self._cryptographic_parameters = None elif isinstance(value, attributes.CryptographicParameters): self._cryptographic_parameters = value else: raise TypeError( "cryptographic parameters must be a CryptographicParameters " "struct" ) @property def data(self): if self._data: return self._data.value else: return None @data.setter def data(self, value): if value is None: self._data = None elif isinstance(value, six.binary_type): self._data = primitives.ByteString( value=value, tag=enums.Tags.DATA ) else: raise TypeError("data must be bytes") @property def iv_counter_nonce(self): if self._iv_counter_nonce: return self._iv_counter_nonce.value else: return None @iv_counter_nonce.setter def iv_counter_nonce(self, value): if value is None: self._iv_counter_nonce = None elif isinstance(value, six.binary_type): self._iv_counter_nonce = primitives.ByteString( value=value, tag=enums.Tags.IV_COUNTER_NONCE ) else: raise TypeError("IV/counter/nonce must be bytes") @property def auth_additional_data(self): if self._auth_additional_data: return self._auth_additional_data.value else: return None @auth_additional_data.setter def auth_additional_data(self, value): if value is None: self._auth_additional_data = None elif isinstance(value, six.binary_type): self._auth_additional_data = primitives.ByteString( value=value, tag=enums.Tags.AUTHENTICATED_ENCRYPTION_ADDITIONAL_DATA ) else: raise TypeError("authenticated additional data must be bytes") def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(EncryptRequestPayload, self).read( input_stream, kmip_version=kmip_version ) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString( tag=enums.Tags.UNIQUE_IDENTIFIER ) self._unique_identifier.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next( enums.Tags.CRYPTOGRAPHIC_PARAMETERS, local_stream ): self._cryptographic_parameters = attributes.CryptographicParameters() self._cryptographic_parameters.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.DATA, local_stream): self._data = primitives.ByteString(tag=enums.Tags.DATA) self._data.read(local_stream, kmip_version=kmip_version) else: raise ValueError("invalid payload missing the data attribute") if self.is_tag_next(enums.Tags.IV_COUNTER_NONCE, local_stream): self._iv_counter_nonce = primitives.ByteString( tag=enums.Tags.IV_COUNTER_NONCE ) self._iv_counter_nonce.read( local_stream, kmip_version=kmip_version ) if kmip_version >= enums.KMIPVersion.KMIP_1_4: if self.is_tag_next( enums.Tags.AUTHENTICATED_ENCRYPTION_ADDITIONAL_DATA, local_stream ): self._auth_additional_data = primitives.ByteString( tag=enums.Tags.AUTHENTICATED_ENCRYPTION_ADDITIONAL_DATA ) self._auth_additional_data.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream) def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = utils.BytearrayStream() if self._unique_identifier: self._unique_identifier.write( local_stream, kmip_version=kmip_version ) if self._cryptographic_parameters: self._cryptographic_parameters.write( local_stream, kmip_version=kmip_version ) if self._data: self._data.write(local_stream, kmip_version=kmip_version) else: raise ValueError("invalid payload missing the data attribute") if self._iv_counter_nonce: self._iv_counter_nonce.write( local_stream, kmip_version=kmip_version ) if kmip_version >= enums.KMIPVersion.KMIP_1_4: if self._auth_additional_data: self._auth_additional_data.write( local_stream, kmip_version=kmip_version ) self.length = local_stream.length() super(EncryptRequestPayload, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer) def __eq__(self, other): if isinstance(other, EncryptRequestPayload): if self.unique_identifier != other.unique_identifier: return False elif self.cryptographic_parameters != other.cryptographic_parameters: return False elif self.data != other.data: return False elif self.iv_counter_nonce != other.iv_counter_nonce: return False elif self.auth_additional_data != other.auth_additional_data: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, EncryptRequestPayload): return not (self == other) else: return NotImplemented def __repr__(self): args = ", ".join([ "unique_identifier='{0}'".format(self.unique_identifier), "cryptographic_parameters={0}".format( repr(self.cryptographic_parameters) ), "data={0}".format(self.data), "iv_counter_nonce={0}".format(self.iv_counter_nonce), "auth_additional_data={0}".format(self.auth_additional_data) ]) return "EncryptRequestPayload({0})".format(args) def __str__(self): return str({ 'unique_identifier': self.unique_identifier, 'cryptographic_parameters': self.cryptographic_parameters, 'data': self.data, 'iv_counter_nonce': self.iv_counter_nonce, 'auth_additional_data': self.auth_additional_data }) class EncryptResponsePayload(base.ResponsePayload): def __init__(self, unique_identifier=None, data=None, iv_counter_nonce=None, auth_tag=None): super(EncryptResponsePayload, self).__init__() self._unique_identifier = None self._data = None self._iv_counter_nonce = None self._auth_tag = None self.unique_identifier = unique_identifier self.data = data self.iv_counter_nonce = iv_counter_nonce self.auth_tag = auth_tag @property def unique_identifier(self): if self._unique_identifier: return self._unique_identifier.value else: return None @unique_identifier.setter def unique_identifier(self, value): if value is None: self._unique_identifier = None elif isinstance(value, six.string_types): self._unique_identifier = primitives.TextString( value=value, tag=enums.Tags.UNIQUE_IDENTIFIER ) else: raise TypeError("unique identifier must be a string") @property def data(self): if self._data: return self._data.value else: return None @data.setter def data(self, value): if value is None: self._data = None elif isinstance(value, six.binary_type): self._data = primitives.ByteString( value=value, tag=enums.Tags.DATA ) else: raise TypeError("data must be bytes") @property def iv_counter_nonce(self): if self._iv_counter_nonce: return self._iv_counter_nonce.value else: return None @iv_counter_nonce.setter def iv_counter_nonce(self, value): if value is None: self._iv_counter_nonce = None elif isinstance(value, six.binary_type): self._iv_counter_nonce = primitives.ByteString( value=value, tag=enums.Tags.IV_COUNTER_NONCE ) else: raise TypeError("IV/counter/nonce must be bytes") @property def auth_tag(self): if self._auth_tag: return self._auth_tag.value else: return None @auth_tag.setter def auth_tag(self, value): if value is None: self._auth_tag = None elif isinstance(value, six.binary_type): self._auth_tag = primitives.ByteString( value=value, tag=enums.Tags.AUTHENTICATED_ENCRYPTION_TAG ) else: raise TypeError("authenticated encryption tag must be bytes")
Apache License 2.0
mandiant/capa
capa/features/extractors/ida/insn.py
extract_insn_number_features
python
def extract_insn_number_features(f, bb, insn): if idaapi.is_ret_insn(insn): return if capa.features.extractors.ida.helpers.is_sp_modified(insn): return for op in capa.features.extractors.ida.helpers.get_insn_ops(insn, target_ops=(idaapi.o_imm, idaapi.o_mem)): if capa.features.extractors.ida.helpers.is_op_offset(insn, op): continue if op.type == idaapi.o_imm: const = capa.features.extractors.ida.helpers.mask_op_val(op) else: const = op.addr yield Number(const), insn.ea yield Number(const, bitness=get_bitness(f.ctx)), insn.ea
parse instruction number features args: f (IDA func_t) bb (IDA BasicBlock) insn (IDA insn_t) example: push 3136B0h ; dwControlCode
https://github.com/mandiant/capa/blob/23a0aec1e6de99363023d327d1746b10968658c1/capa/features/extractors/ida/insn.py#L119-L152
import idc import idaapi import idautils import capa.features.extractors.helpers import capa.features.extractors.ida.helpers from capa.features.insn import API, Number, Offset, Mnemonic from capa.features.common import ( BITNESS_X32, BITNESS_X64, MAX_BYTES_FEATURE_SIZE, THUNK_CHAIN_DEPTH_DELTA, Bytes, String, Characteristic, ) SECURITY_COOKIE_BYTES_DELTA = 0x40 def get_bitness(ctx): if "bitness" not in ctx: info = idaapi.get_inf_structure() if info.is_64bit(): ctx["bitness"] = BITNESS_X64 elif info.is_32bit(): ctx["bitness"] = BITNESS_X32 else: raise ValueError("unexpected bitness") return ctx["bitness"] def get_imports(ctx): if "imports_cache" not in ctx: ctx["imports_cache"] = capa.features.extractors.ida.helpers.get_file_imports() return ctx["imports_cache"] def check_for_api_call(ctx, insn): info = () ref = insn.ea for _ in range(THUNK_CHAIN_DEPTH_DELTA): try: ref = tuple(idautils.CodeRefsFrom(ref, False))[0] except IndexError: try: ref = tuple(idautils.DataRefsFrom(ref))[0] except IndexError: break info = get_imports(ctx).get(ref, ()) if info: break f = idaapi.get_func(ref) if not f or not (f.flags & idaapi.FUNC_THUNK): break if info: yield "%s.%s" % (info[0], info[1]) def extract_insn_api_features(f, bb, insn): if not insn.get_canon_mnem() in ("call", "jmp"): return for api in check_for_api_call(f.ctx, insn): dll, _, symbol = api.rpartition(".") for name in capa.features.extractors.helpers.generate_symbols(dll, symbol): yield API(name), insn.ea targets = tuple(idautils.CodeRefsFrom(insn.ea, False)) if not targets: return target = targets[0] target_func = idaapi.get_func(target) if not target_func or target_func.start_ea != target: return if target_func.flags & idaapi.FUNC_LIB: name = idaapi.get_name(target_func.start_ea) yield API(name), insn.ea
Apache License 2.0
ericssonresearch/calvin-base
calvin/runtime/north/authentication/authentication_retrieval_point.py
FileAuthenticationRetrievalPoint.delete_users_db
python
def delete_users_db(self): _log.debug("delete_users_db") os.remove(os.path.join(self.path, "users.json")) self.users_db=None
Delete the policy named policy_id
https://github.com/ericssonresearch/calvin-base/blob/bc4645c2061c30ca305a660e48dc86e3317f5b6f/calvin/runtime/north/authentication/authentication_retrieval_point.py#L169-L173
from abc import ABCMeta, abstractmethod import os import glob import json from calvin.utilities import calvinuuid from calvin.utilities.calvinlogger import get_logger from calvin.utilities import calvinconfig from passlib.hash import pbkdf2_sha256 _log = get_logger(__name__) _conf = calvinconfig.get() _sec_conf = _conf class AuthenticationRetrievalPoint(object): __metaclass__ = ABCMeta @abstractmethod def get_users_db(self): return @abstractmethod def create_users_db(self, data): return @abstractmethod def update_users_db(self, data): return @abstractmethod def delete_users_db(self): return class FileAuthenticationRetrievalPoint(object): def __init__(self, path): _log.debug("FileAuthenticationRetrievalPoint::__init__") self.path = os.path.expanduser(path) self.users_db=self.get_users_db() self.groups_db=self.get_groups_db() if not os.path.exists(self.path): try: os.makedirs(self.path) except OSError as exc: _log.error("Failed to create path, path={}".format(path)) if exc.errno != errno.EEXIST: raise def get_users_db(self): _log.debug("get_users_db") try: self.check_stored_users_db_for_unhashed_passwords() except Exception as err: _log.error("Failed to check for unhashed passwords in users file, err={}".format(err)) return None try: users_db_path = os.path.join(self.path,'users.json') with open(users_db_path,'rt') as data: users_db = json.load(data) except Exception as err: _log.error("No users.json file can be found at path={}, err={}".format(users_db_path, err)) return None return users_db def create_users_db(self, data): _log.debug("create_users_db") hashed_data = self.hash_passwords(data) with open(os.path.join(self.path, "users.json"), "w") as file: json.dump(hashed_data, file) def hash_passwords(self, data): import time _log.debug("hash_passwords\n\tdata={}".format(data)) updates_made = False start = time.time() for username in data: user_data = data[username] try: is_pbkdf2 = pbkdf2_sha256.identify(user_data['password']) except Exception as err: _log.error("Failed to identify if password is PBKDF2 or not:" "\n\tusername={}" "\n\tuser_data={}" "\n\terr={}".format(username, user_data, err)) raise if ('password' in user_data) and not (is_pbkdf2): try: hash = pbkdf2_sha256.encrypt(user_data['password'], rounds=200000, salt_size=16) except Exception as err: _log.error("Failed to calculate PBKDF2 of password, err={}".format(err)) raise user_data['password']=hash updates_made = True _log.debug("hashed_passwords" "\n\tdata={}" "\n\ttime it took to hash passwords={}".format(data, time.time()-start)) return updates_made def update_users_db(self, data): _log.debug("update_users_db" "\n\tdata={}".format(data)) try: self.hash_passwords(data) except Exception as err: _log.error("Failed to hash passwords, err={}".format(err)) raise file_path = os.path.join(self.path, "users.json") if os.path.isfile(file_path): with open(file_path, "w") as file: json.dump(data, file) else: _log.error("update_users_db: file does not exist, file_path={}".format(file_path)) raise IOError self.users_db = data def check_stored_users_db_for_unhashed_passwords(self): _log.debug("check_stored_users_db_for_unhashed_passwords") file_path = os.path.join(self.path, "users.json") if os.path.isfile(file_path): try: with open(file_path, "r+") as file: data = json.load(file) if self.hash_passwords(data): file.seek(0) json.dump(data, file) file.truncate() except Exception as exc: _log.exception("Failed to open users.json, exc={}".format(exc)) else: _log.error("No users.json file, looking at {}".format(file_path)) raise IOError
Apache License 2.0
kubevirt/client-python
kubevirt/models/v1_volume.py
V1Volume.host_disk
python
def host_disk(self, host_disk): self._host_disk = host_disk
Sets the host_disk of this V1Volume. HostDisk represents a disk created on the cluster level :param host_disk: The host_disk of this V1Volume. :type: V1HostDisk
https://github.com/kubevirt/client-python/blob/c9f9d3bc429f783076982b46b194d5f7669eab1b/kubevirt/models/v1_volume.py#L339-L348
from pprint import pformat from six import iteritems import re class V1Volume(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'cloud_init_config_drive': 'V1CloudInitConfigDriveSource', 'cloud_init_no_cloud': 'V1CloudInitNoCloudSource', 'config_map': 'V1ConfigMapVolumeSource', 'container_disk': 'V1ContainerDiskSource', 'data_volume': 'V1DataVolumeSource', 'downward_api': 'V1DownwardAPIVolumeSource', 'downward_metrics': 'V1DownwardMetricsVolumeSource', 'empty_disk': 'V1EmptyDiskSource', 'ephemeral': 'V1EphemeralVolumeSource', 'host_disk': 'V1HostDisk', 'name': 'str', 'persistent_volume_claim': 'V1PersistentVolumeClaimVolumeSource', 'secret': 'V1SecretVolumeSource', 'service_account': 'V1ServiceAccountVolumeSource', 'sysprep': 'V1SysprepSource' } attribute_map = { 'cloud_init_config_drive': 'cloudInitConfigDrive', 'cloud_init_no_cloud': 'cloudInitNoCloud', 'config_map': 'configMap', 'container_disk': 'containerDisk', 'data_volume': 'dataVolume', 'downward_api': 'downwardAPI', 'downward_metrics': 'downwardMetrics', 'empty_disk': 'emptyDisk', 'ephemeral': 'ephemeral', 'host_disk': 'hostDisk', 'name': 'name', 'persistent_volume_claim': 'persistentVolumeClaim', 'secret': 'secret', 'service_account': 'serviceAccount', 'sysprep': 'sysprep' } def __init__(self, cloud_init_config_drive=None, cloud_init_no_cloud=None, config_map=None, container_disk=None, data_volume=None, downward_api=None, downward_metrics=None, empty_disk=None, ephemeral=None, host_disk=None, name=None, persistent_volume_claim=None, secret=None, service_account=None, sysprep=None): self._cloud_init_config_drive = None self._cloud_init_no_cloud = None self._config_map = None self._container_disk = None self._data_volume = None self._downward_api = None self._downward_metrics = None self._empty_disk = None self._ephemeral = None self._host_disk = None self._name = None self._persistent_volume_claim = None self._secret = None self._service_account = None self._sysprep = None if cloud_init_config_drive is not None: self.cloud_init_config_drive = cloud_init_config_drive if cloud_init_no_cloud is not None: self.cloud_init_no_cloud = cloud_init_no_cloud if config_map is not None: self.config_map = config_map if container_disk is not None: self.container_disk = container_disk if data_volume is not None: self.data_volume = data_volume if downward_api is not None: self.downward_api = downward_api if downward_metrics is not None: self.downward_metrics = downward_metrics if empty_disk is not None: self.empty_disk = empty_disk if ephemeral is not None: self.ephemeral = ephemeral if host_disk is not None: self.host_disk = host_disk self.name = name if persistent_volume_claim is not None: self.persistent_volume_claim = persistent_volume_claim if secret is not None: self.secret = secret if service_account is not None: self.service_account = service_account if sysprep is not None: self.sysprep = sysprep @property def cloud_init_config_drive(self): return self._cloud_init_config_drive @cloud_init_config_drive.setter def cloud_init_config_drive(self, cloud_init_config_drive): self._cloud_init_config_drive = cloud_init_config_drive @property def cloud_init_no_cloud(self): return self._cloud_init_no_cloud @cloud_init_no_cloud.setter def cloud_init_no_cloud(self, cloud_init_no_cloud): self._cloud_init_no_cloud = cloud_init_no_cloud @property def config_map(self): return self._config_map @config_map.setter def config_map(self, config_map): self._config_map = config_map @property def container_disk(self): return self._container_disk @container_disk.setter def container_disk(self, container_disk): self._container_disk = container_disk @property def data_volume(self): return self._data_volume @data_volume.setter def data_volume(self, data_volume): self._data_volume = data_volume @property def downward_api(self): return self._downward_api @downward_api.setter def downward_api(self, downward_api): self._downward_api = downward_api @property def downward_metrics(self): return self._downward_metrics @downward_metrics.setter def downward_metrics(self, downward_metrics): self._downward_metrics = downward_metrics @property def empty_disk(self): return self._empty_disk @empty_disk.setter def empty_disk(self, empty_disk): self._empty_disk = empty_disk @property def ephemeral(self): return self._ephemeral @ephemeral.setter def ephemeral(self, ephemeral): self._ephemeral = ephemeral @property def host_disk(self): return self._host_disk @host_disk.setter
Apache License 2.0
rajammanabrolu/worldgeneration
evennia-engine/evennia/evennia/server/initial_setup.py
get_god_account
python
def get_god_account(): try: god_account = AccountDB.objects.get(id=1) except AccountDB.DoesNotExist: raise AccountDB.DoesNotExist(ERROR_NO_SUPERUSER) return god_account
Creates the god user and don't take no for an answer.
https://github.com/rajammanabrolu/worldgeneration/blob/5e97df013399e1a401d0a7ec184c4b9eb3100edd/evennia-engine/evennia/evennia/server/initial_setup.py#L46-L55
import time from django.conf import settings from django.utils.translation import ugettext as _ from evennia.accounts.models import AccountDB from evennia.server.models import ServerConfig from evennia.utils import create, logger ERROR_NO_SUPERUSER = """ No superuser exists yet. The superuser is the 'owner' account on the Evennia server. Create a new superuser using the command evennia createsuperuser Follow the prompts, then restart the server. """ LIMBO_DESC = _( """ Welcome to your new |wEvennia|n-based game! Visit http://www.evennia.com if you need help, want to contribute, report issues or just join the community. As Account #1 you can create a demo/tutorial area with |w@batchcommand tutorial_world.build|n. """ ) WARNING_POSTGRESQL_FIX = """ PostgreSQL-psycopg2 compatibility fix: The in-game channels {chan1}, {chan2} and {chan3} were created, but the superuser was not yet connected to them. Please use in game commands to connect Account #1 to those channels when first logging in. """
MIT License
pytorchlightning/lightning-bolts
pl_bolts/datamodules/vision_datamodule.py
VisionDataModule.default_transforms
python
def default_transforms(self) -> Callable:
Default transform for the dataset.
https://github.com/pytorchlightning/lightning-bolts/blob/f4f6d53a039c521f3441750fa5297c7694320119/pl_bolts/datamodules/vision_datamodule.py#L108-L109
import os from abc import abstractmethod from typing import Any, Callable, List, Optional, Union import torch from pytorch_lightning import LightningDataModule from torch.utils.data import DataLoader, Dataset, random_split class VisionDataModule(LightningDataModule): EXTRA_ARGS: dict = {} name: str = "" dataset_cls: type dims: tuple def __init__( self, data_dir: Optional[str] = None, val_split: Union[int, float] = 0.2, num_workers: int = 0, normalize: bool = False, batch_size: int = 32, seed: int = 42, shuffle: bool = True, pin_memory: bool = True, drop_last: bool = False, *args: Any, **kwargs: Any, ) -> None: super().__init__(*args, **kwargs) self.data_dir = data_dir if data_dir is not None else os.getcwd() self.val_split = val_split self.num_workers = num_workers self.normalize = normalize self.batch_size = batch_size self.seed = seed self.shuffle = shuffle self.pin_memory = pin_memory self.drop_last = drop_last def prepare_data(self, *args: Any, **kwargs: Any) -> None: self.dataset_cls(self.data_dir, train=True, download=True) self.dataset_cls(self.data_dir, train=False, download=True) def setup(self, stage: Optional[str] = None) -> None: if stage == "fit" or stage is None: train_transforms = self.default_transforms() if self.train_transforms is None else self.train_transforms val_transforms = self.default_transforms() if self.val_transforms is None else self.val_transforms dataset_train = self.dataset_cls(self.data_dir, train=True, transform=train_transforms, **self.EXTRA_ARGS) dataset_val = self.dataset_cls(self.data_dir, train=True, transform=val_transforms, **self.EXTRA_ARGS) self.dataset_train = self._split_dataset(dataset_train) self.dataset_val = self._split_dataset(dataset_val, train=False) if stage == "test" or stage is None: test_transforms = self.default_transforms() if self.test_transforms is None else self.test_transforms self.dataset_test = self.dataset_cls( self.data_dir, train=False, transform=test_transforms, **self.EXTRA_ARGS ) def _split_dataset(self, dataset: Dataset, train: bool = True) -> Dataset: len_dataset = len(dataset) splits = self._get_splits(len_dataset) dataset_train, dataset_val = random_split(dataset, splits, generator=torch.Generator().manual_seed(self.seed)) if train: return dataset_train return dataset_val def _get_splits(self, len_dataset: int) -> List[int]: if isinstance(self.val_split, int): train_len = len_dataset - self.val_split splits = [train_len, self.val_split] elif isinstance(self.val_split, float): val_len = int(self.val_split * len_dataset) train_len = len_dataset - val_len splits = [train_len, val_len] else: raise ValueError(f"Unsupported type {type(self.val_split)}") return splits @abstractmethod
Apache License 2.0
packit/ogr
ogr/abstract.py
Comment.edited
python
def edited(self) -> datetime.datetime: return self._edited
Datetime of last edit of the comment.
https://github.com/packit/ogr/blob/2f2eec1a71b58efff0dc43cdbca28a4b8de8c38a/ogr/abstract.py#L188-L190
import datetime import functools import warnings from enum import Enum, IntEnum from typing import ( Optional, Match, List, Dict, Set, TypeVar, Any, Sequence, Union, Callable, ) from urllib.request import urlopen import github import gitlab import requests from ogr.deprecation import deprecate_and_set_removal from ogr.exceptions import ( OgrException, GitlabAPIException, GithubAPIException, OgrNetworkError, ) from ogr.parsing import parse_git_repo try: from functools import cached_property as _cached_property except ImportError: from functools import lru_cache def _cached_property(func): return property(lru_cache()(func)) AnyComment = TypeVar("AnyComment", bound="Comment") def catch_common_exceptions(function: Callable) -> Any: @functools.wraps(function) def wrapper(*args, **kwargs): try: return function(*args, **kwargs) except github.BadCredentialsException as ex: raise GithubAPIException("Invalid Github credentials") from ex except gitlab.GitlabAuthenticationError as ex: raise GitlabAPIException("Invalid Gitlab credentials") from ex except requests.exceptions.ConnectionError as ex: raise OgrNetworkError( "Could not perform the request due to a network error" ) from ex return wrapper class CatchCommonErrors(type): def __new__(cls, name, bases, namespace): for key, value in namespace.items(): if isinstance(value, staticmethod): namespace[key] = staticmethod(catch_common_exceptions(value.__func__)) elif isinstance(value, classmethod): namespace[key] = classmethod(catch_common_exceptions(value.__func__)) elif callable(namespace[key]): namespace[key] = catch_common_exceptions(namespace[key]) return super().__new__(cls, name, bases, namespace) class OgrAbstractClass(metaclass=CatchCommonErrors): def __repr__(self) -> str: return f"<{str(self)}>" class Reaction(OgrAbstractClass): def __init__(self, raw_reaction: Any) -> None: self._raw_reaction = raw_reaction def __str__(self): return f"Reaction(raw_reaction={self._raw_reaction})" def delete(self) -> None: raise NotImplementedError() class Comment(OgrAbstractClass): def __init__( self, raw_comment: Optional[Any] = None, parent: Optional[Any] = None, body: Optional[str] = None, author: Optional[str] = None, created: Optional[datetime.datetime] = None, edited: Optional[datetime.datetime] = None, ) -> None: if raw_comment: self._from_raw_comment(raw_comment) elif body and author: self._body = body self._author = author self._created = created self._edited = edited else: raise ValueError("cannot construct comment without body and author") self._parent = parent def __str__(self) -> str: body = f"{self.body[:10]}..." if self.body is not None else "None" return ( f"Comment(" f"comment='{body}', " f"author='{self.author}', " f"created='{self.created}', " f"edited='{self.edited}')" ) def _from_raw_comment(self, raw_comment: Any) -> None: raise NotImplementedError() @property def comment(self) -> str: warnings.warn( "Using deprecated property, that will be removed in 0.14.0" " (or 1.0.0 if it comes sooner). Please use body. " ) return self.body @property def body(self) -> str: return self._body @body.setter def body(self, new_body: str) -> None: self._body = new_body @property def author(self) -> str: return self._author @property def created(self) -> datetime.datetime: return self._created @property
MIT License
testproject-io/python-sdk
src/testproject/classes/web_driver_wait.py
TestProjectWebDriverWait.get_report_details
python
def get_report_details(self, method): step_name = " ".join(method.__class__.__name__.split("_")) attributes_dict = { attribute: json.dumps(getattr(method, attribute)) for attribute in self.get_user_attributes(method) } return step_name, attributes_dict
Returns the inferred report details. Attributes: method: is a callable expected condition class. Examples: Assuming the method sent to the WebDriverWait's wait function is title_is(title="some title")... The method class is name 'title_is' and the returned step_name will be 'title is' The method attributes dict will be {"title": "some title"} Returns: step_name (str): The method class's name, underscores are replaces with spaces. attributes_dict (dict): is all the method's attributes and their values.
https://github.com/testproject-io/python-sdk/blob/efae0de04936bacc5358e3cdc3b86fe6c14434fb/src/testproject/classes/web_driver_wait.py#L105-L125
import os import json from selenium.common.exceptions import TimeoutException from selenium.webdriver.support.wait import WebDriverWait from src.testproject.classes import DriverStepSettings, StepSettings class TestProjectWebDriverWait(WebDriverWait): def __init__(self, driver, timeout): super().__init__(driver, timeout) self._driver = driver def until(self, method, message=""): return self.execute("until", method, message) def until_not(self, method, message=""): return self.execute("until_not", method, message) def execute(self, function_name, method, message): timeout_exception = None result = None step_helper = self.driver.command_executor.step_helper step_settings = self.driver.command_executor.settings reports_disabled = self._driver.command_executor.disable_reports self._driver.report().disable_reports(True) step_helper.handle_timeout(timeout=step_settings.timeout) step_helper.handle_sleep( sleep_timing_type=step_settings.sleep_timing_type, sleep_time=step_settings.sleep_time, ) with DriverStepSettings(self._driver, StepSettings()): try: result = getattr(super(), function_name)(method, message) passed = True if result else False except TimeoutException as e: passed = False timeout_exception = e step_helper.handle_sleep( sleep_timing_type=step_settings.sleep_timing_type, sleep_time=step_settings.sleep_time, step_executed=True, ) passed, step_message = step_helper.handle_step_result( step_result=passed, invert_result=step_settings.invert_result, always_pass=step_settings.always_pass, ) screenshot = step_helper.take_screenshot(step_settings.screenshot_condition, passed) self._driver.report().disable_reports(reports_disabled) function_name = " ".join(function_name.split("_")) step_name, step_attributes = self.get_report_details(method) self._driver.report().step( description=f"Wait {function_name} {step_name}", message=f"{step_message}{os.linesep}", passed=passed, inputs=step_attributes, screenshot=screenshot, ) if not result and step_settings.always_pass: return True if timeout_exception: raise timeout_exception return result
Apache License 2.0
hackatbrown/2015.hackatbrown.org
hack-at-brown-2015/cssutils/css/cssvariablesrule.py
CSSVariablesRule._setCssText
python
def _setCssText(self, cssText): super(CSSVariablesRule, self)._setCssText(cssText) tokenizer = self._tokenize2(cssText) attoken = self._nexttoken(tokenizer, None) if self._type(attoken) != self._prods.VARIABLES_SYM: self._log.error(u'CSSVariablesRule: No CSSVariablesRule found: %s' % self._valuestr(cssText), error=xml.dom.InvalidModificationErr) else: newVariables = CSSVariablesDeclaration(parentRule=self) ok = True beforetokens, brace = self._tokensupto2(tokenizer, blockstartonly=True, separateEnd=True) if self._tokenvalue(brace) != u'{': ok = False self._log.error(u'CSSVariablesRule: No start { of variable ' u'declaration found: %r' % self._valuestr(cssText), brace) new = {'wellformed': True} newseq = self._tempSeq() beforewellformed, expected = self._parse(expected=':', seq=newseq, tokenizer=self._tokenize2(beforetokens), productions={}) ok = ok and beforewellformed and new['wellformed'] variablestokens, braceorEOFtoken = self._tokensupto2(tokenizer, blockendonly=True, separateEnd=True) val, type_ = self._tokenvalue(braceorEOFtoken), self._type(braceorEOFtoken) if val != u'}' and type_ != 'EOF': ok = False self._log.error(u'CSSVariablesRule: No "}" after variables ' u'declaration found: %r' % self._valuestr(cssText)) nonetoken = self._nexttoken(tokenizer) if nonetoken: ok = False self._log.error(u'CSSVariablesRule: Trailing content found.', token=nonetoken) if 'EOF' == type_: variablestokens.append(braceorEOFtoken) newVariables.cssText = variablestokens if ok: self._setSeq(newseq) self.variables = newVariables
:exceptions: - :exc:`~xml.dom.SyntaxErr`: Raised if the specified CSS string value has a syntax error and is unparsable. - :exc:`~xml.dom.InvalidModificationErr`: Raised if the specified CSS string value represents a different type of rule than the current one. - :exc:`~xml.dom.HierarchyRequestErr`: Raised if the rule cannot be inserted at this point in the style sheet. - :exc:`~xml.dom.NoModificationAllowedErr`: Raised if the rule is readonly. Format:: variables : VARIABLES_SYM S* medium [ COMMA S* medium ]* LBRACE S* variableset* '}' S* ; variableset : LBRACE S* vardeclaration [ ';' S* vardeclaration ]* '}' S* ;
https://github.com/hackatbrown/2015.hackatbrown.org/blob/6e6e10b010421228deb562909a1c8bb4272b759f/hack-at-brown-2015/cssutils/css/cssvariablesrule.py#L82-L165
__all__ = ['CSSVariablesRule'] __docformat__ = 'restructuredtext' __version__ = '$Id: cssfontfacerule.py 1818 2009-07-30 21:39:00Z cthedot $' from cssvariablesdeclaration import CSSVariablesDeclaration import cssrule import cssutils import xml.dom class CSSVariablesRule(cssrule.CSSRule): def __init__(self, mediaText=None, variables=None, parentRule=None, parentStyleSheet=None, readonly=False): super(CSSVariablesRule, self).__init__(parentRule=parentRule, parentStyleSheet=parentStyleSheet) self._atkeyword = u'@variables' self._media = cssutils.stylesheets.MediaList(mediaText, readonly=readonly) if variables: self.variables = variables else: self.variables = CSSVariablesDeclaration(parentRule=self) self._readonly = readonly def __repr__(self): return u"cssutils.css.%s(mediaText=%r, variables=%r)" % ( self.__class__.__name__, self._media.mediaText, self.variables.cssText) def __str__(self): return u"<cssutils.css.%s object mediaText=%r variables=%r valid=%r " u"at 0x%x>" % (self.__class__.__name__, self._media.mediaText, self.variables.cssText, self.valid, id(self)) def _getCssText(self): return cssutils.ser.do_CSSVariablesRule(self)
MIT License
pyglet/pyglet
pyglet/math.py
Vec2.clamp
python
def clamp(self, min_val, max_val): return Vec2(clamp(self[0], min_val, max_val), clamp(self[1], min_val, max_val))
Restrict the value of the X and Y components of the vector to be within the given values. :parameters: `min_val` : int or float : The minimum value `max_val` : int or float : The maximum value :returns: A new vector with clamped X and Y components. :rtype: Vec2
https://github.com/pyglet/pyglet/blob/b9a63ea179735c8f252ac31d51751bdf8a741c9d/pyglet/math.py#L255-L267
import math as _math import warnings as _warnings from operator import mul as _mul def clamp(num, min_val, max_val): return max(min(num, max_val), min_val) class Vec2(tuple): def __new__(cls, *args): assert len(args) in (0, 2), "0 or 2 values are required for Vec2 types." return super().__new__(Vec2, args or (0, 0)) @staticmethod def from_polar(mag, angle): return Vec2(mag * _math.cos(angle), mag * _math.sin(angle)) @property def x(self): return self[0] @property def y(self): return self[1] @property def heading(self): return _math.atan2(self[1], self[0]) @property def mag(self): return self.__abs__() def __add__(self, other): return Vec2(self[0] + other[0], self[1] + other[1]) def __sub__(self, other): return Vec2(self[0] - other[0], self[1] - other[1]) def __mul__(self, other): return Vec2(self[0] * other[0], self[1] * other[1]) def __truediv__(self, other): return Vec2(self[0] / other[0], self[1] / other[1]) def __abs__(self): return _math.sqrt(self[0] ** 2 + self[1] ** 2) def __neg__(self): return Vec2(-self[0], -self[1]) def __round__(self, ndigits=None): return Vec2(*(round(v, ndigits) for v in self)) def __radd__(self, other): if other == 0: return self else: return self.__add__(other) def from_magnitude(self, magnitude): return self.normalize().scale(magnitude) def from_heading(self, heading): mag = self.__abs__() return Vec2(mag * _math.cos(heading), mag * _math.sin(heading)) def limit(self, max): if self[0] ** 2 + self[1] ** 2 > max * max: return self.from_magnitude(max) return self def lerp(self, other, alpha): return Vec2(self[0] + (alpha * (other[0] - self[0])), self[1] + (alpha * (other[1] - self[1]))) def scale(self, value): return Vec2(self[0] * value, self[1] * value) def rotate(self, angle): mag = self.mag heading = self.heading return Vec2(mag * _math.cos(heading + angle), mag * _math.sin(heading+angle)) def distance(self, other): return _math.sqrt(((other[0] - self[0]) ** 2) + ((other[1] - self[1]) ** 2)) def normalize(self): d = self.__abs__() if d: return Vec2(self[0] / d, self[1] / d) return self
BSD 3-Clause New or Revised License
cn-uofbasel/picn
PiCN/Layers/AutoconfigLayer/AutoconfigServerLayer.py
AutoconfigServerLayer.__init__
python
def __init__(self, linklayer: BasicLinkLayer, address: str = '127.0.0.1', registration_prefixes: List[Tuple[Name, bool]] = list(), log_level: int = 255): super().__init__(logger_name='AutoconfigLayer', log_level=log_level) self._linklayer: BasicLinkLayer = linklayer self.fib: BaseForwardingInformationBase = None self.rib: BaseRoutingInformationBase = None self._announce_addr: str = address self._known_services: List[Tuple[Name, Tuple[str, int], datetime]] = [] self._service_registration_prefixes: List[Tuple[Name, bool]] = registration_prefixes self._service_registration_timeout = timedelta(hours=1) self._bc_interfaces: List[int] = list() if self._linklayer is not None: for i in range(len(self._linklayer.interfaces)): interface = self._linklayer.interfaces[i] if interface.enable_broadcast(): self._bc_interfaces.append(i)
:param linklayer: :param address: :param log_level:
https://github.com/cn-uofbasel/picn/blob/64ed40242657238e9f1d522d5873173f0b93a30e/PiCN/Layers/AutoconfigLayer/AutoconfigServerLayer.py#L23-L49
from typing import List, Tuple, Optional import multiprocessing from datetime import datetime, timedelta from PiCN.Layers.LinkLayer import BasicLinkLayer from PiCN.Layers.LinkLayer.Interfaces import AddressInfo, UDP4Interface from PiCN.Processes import LayerProcess from PiCN.Packets import Packet, Interest, Content, Nack, NackReason, Name from PiCN.Layers.ICNLayer.ForwardingInformationBase import ForwardingInformationBaseEntry, BaseForwardingInformationBase from PiCN.Layers.RoutingLayer.RoutingInformationBase import BaseRoutingInformationBase _AUTOCONFIG_PREFIX: Name = Name('/autoconfig') _AUTOCONFIG_FORWARDERS_PREFIX: Name = Name('/autoconfig/forwarders') _AUTOCONFIG_SERVICE_LIST_PREFIX: Name = Name('/autoconfig/services') _AUTOCONFIG_SERVICE_REGISTRATION_PREFIX: Name = Name('/autoconfig/service') class AutoconfigServerLayer(LayerProcess):
BSD 3-Clause New or Revised License
sergioteula/python-amazon-paapi
amazon/paapi5_python_sdk/item_info.py
ItemInfo.classifications
python
def classifications(self): return self._classifications
Gets the classifications of this ItemInfo. # noqa: E501 :return: The classifications of this ItemInfo. # noqa: E501 :rtype: Classifications
https://github.com/sergioteula/python-amazon-paapi/blob/9cb744bef17f5127231367430191df12126e9c24/amazon/paapi5_python_sdk/item_info.py#L145-L152
import pprint import re import six from .by_line_info import ByLineInfo from .classifications import Classifications from .content_info import ContentInfo from .content_rating import ContentRating from .external_ids import ExternalIds from .manufacture_info import ManufactureInfo from .multi_valued_attribute import MultiValuedAttribute from .product_info import ProductInfo from .single_string_valued_attribute import SingleStringValuedAttribute from .technical_info import TechnicalInfo from .trade_in_info import TradeInInfo class ItemInfo(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'by_line_info': 'ByLineInfo', 'classifications': 'Classifications', 'content_info': 'ContentInfo', 'content_rating': 'ContentRating', 'external_ids': 'ExternalIds', 'features': 'MultiValuedAttribute', 'manufacture_info': 'ManufactureInfo', 'product_info': 'ProductInfo', 'technical_info': 'TechnicalInfo', 'title': 'SingleStringValuedAttribute', 'trade_in_info': 'TradeInInfo' } attribute_map = { 'by_line_info': 'ByLineInfo', 'classifications': 'Classifications', 'content_info': 'ContentInfo', 'content_rating': 'ContentRating', 'external_ids': 'ExternalIds', 'features': 'Features', 'manufacture_info': 'ManufactureInfo', 'product_info': 'ProductInfo', 'technical_info': 'TechnicalInfo', 'title': 'Title', 'trade_in_info': 'TradeInInfo' } def __init__(self, by_line_info=None, classifications=None, content_info=None, content_rating=None, external_ids=None, features=None, manufacture_info=None, product_info=None, technical_info=None, title=None, trade_in_info=None): self._by_line_info = None self._classifications = None self._content_info = None self._content_rating = None self._external_ids = None self._features = None self._manufacture_info = None self._product_info = None self._technical_info = None self._title = None self._trade_in_info = None self.discriminator = None if by_line_info is not None: self.by_line_info = by_line_info if classifications is not None: self.classifications = classifications if content_info is not None: self.content_info = content_info if content_rating is not None: self.content_rating = content_rating if external_ids is not None: self.external_ids = external_ids if features is not None: self.features = features if manufacture_info is not None: self.manufacture_info = manufacture_info if product_info is not None: self.product_info = product_info if technical_info is not None: self.technical_info = technical_info if title is not None: self.title = title if trade_in_info is not None: self.trade_in_info = trade_in_info @property def by_line_info(self): return self._by_line_info @by_line_info.setter def by_line_info(self, by_line_info): self._by_line_info = by_line_info @property
MIT License
google-research/tapas
tapas/utils/pretrain_utils.py
split_by_table_id_and_write
python
def split_by_table_id_and_write( examples, output_dir, train_suffix = ".tfrecord", test_suffix = ".tfrecord", num_splits = 100, proto_message=interaction_pb2.Interaction, ): train, test = ( examples | "Partition" >> beam.Partition(partition_fn, 2, num_splits)) for name, suffix, data in zip( ["train", "test"], [train_suffix, test_suffix], [train, test], ): output_file = os.path.join(output_dir, name + suffix) write_proto_outputs(output_file, name, data, proto_message)
Split interactions into train and test and write them to disc.
https://github.com/google-research/tapas/blob/733aca5273e560ad8c6380b7be984a3a680e97f6/tapas/utils/pretrain_utils.py#L209-L228
import os import random from typing import Iterable, Tuple, Text, Optional, Union import apache_beam as beam from tapas.protos import interaction_pb2 from tapas.utils import number_annotation_utils from tapas.utils import tf_example_utils import tensorflow.compat.v1 as tf from google.protobuf import text_format _NS = "main" _KeyInteraction = Tuple[Text, interaction_pb2.Interaction] _KeyInteractionTable = Tuple[Text, Tuple[interaction_pb2.Interaction, Optional[interaction_pb2.Table]]] _Proto = Union[interaction_pb2.Interaction, interaction_pb2.Table, tf.train.Example] def fingerprint(key): return "%08X" % abs(tf_example_utils.fingerprint(key)) def prepand_fingerprint(key): return "%s_%s" % (fingerprint(key), key) def _has_valid_shape(table): if not table.columns: return False if not table.rows: return False num_columns = len(table.columns) for row in table.rows: if len(row.cells) != num_columns: return False return True def check_table_id_fn( key_interaction): key, interaction = key_interaction if not _has_valid_shape(interaction.table): beam.metrics.Metrics.counter(_NS, "Tables empty or of ragged shape").inc() return if interaction.id and interaction.table.table_id and all( bool(q.id) for q in interaction.questions): yield key_interaction else: new_interaction = interaction_pb2.Interaction() new_interaction.CopyFrom(interaction) for question in new_interaction.questions: if not question.id: question.id = key beam.metrics.Metrics.counter(_NS, "Question Ids added").inc() if not new_interaction.table.table_id: new_interaction.table.table_id = key beam.metrics.Metrics.counter(_NS, "Table Ids added").inc() if not new_interaction.id: new_interaction.id = key beam.metrics.Metrics.counter(_NS, "Interaction Ids added").inc() yield key, new_interaction def check_tale_size_fn(key_interaction, min_num_rows, min_num_columns): beam.metrics.Metrics.counter(_NS, "Inputs").inc() _, interaction = key_interaction num_rows = len(interaction.table.rows) if num_rows < min_num_rows: beam.metrics.Metrics.counter(_NS, "Inputs: Too few rows").inc() return num_cols = len(interaction.table.columns) if num_cols < min_num_columns: beam.metrics.Metrics.counter(_NS, "Inputs: Too few columns").inc() return yield key_interaction def add_numeric_values_fn(element): key, interaction = element new_interaction = interaction_pb2.Interaction() new_interaction.CopyFrom(interaction) number_annotation_utils.add_numeric_values(new_interaction) return key, new_interaction def duplicate_fn(key_interaction, dupe_factor): key, interaction = key_interaction for dupe_index in range(dupe_factor): new_id = "%s_%d" % (key, dupe_index) yield new_id, interaction def _parse_text_proto( text_proto_line, proto_message, ): message = text_format.Parse(text_proto_line, proto_message()) return (_get_input_id(message), message) def _parse_text_interaction(text_proto_line,): output = _parse_text_proto(text_proto_line, interaction_pb2.Interaction) assert isinstance(output[1], interaction_pb2.Interaction) return output def _proto_to_text(message): return text_format.MessageToString(message, as_one_line=True) class ToTensorflowExample(beam.DoFn): def __init__(self, config): self._config = config def start_bundle(self): self._converter = tf_example_utils.ToPretrainingTensorflowExample( self._config) def process( self, element): beam.metrics.Metrics.counter(_NS, "Interactions").inc() key, (interaction, random_table) = element seed = tf_example_utils.fingerprint( "%s_%d_%d" % (key, self._config.random_seed, self._config.max_seq_length)) rng = random.Random(seed) example = self._converter.convert(rng, interaction, random_table) if example: beam.metrics.Metrics.counter(_NS, "Examples").inc() yield prepand_fingerprint(key), example _MAX_INT = 2**32 - 1 def to_numpy_seed(obj): return tf_example_utils.fingerprint(repr(obj)) % _MAX_INT def partition_fn( example, partition_count, num_splits, ): assert partition_count == 2 example_id = _get_table_id(example[1]) shard = to_numpy_seed(example_id) % num_splits if shard == 0: return 1 return 0 def write_proto_outputs(output_file, name, data, proto_message): if output_file.endswith((".txtpb.gz", ".txtpb")): _ = ( data | "DropKey_%s" % name >> beam.Values() | "ToTextProto" % name >> beam.Map( _proto_to_text, proto_message=proto_message, ) | "WriteTextExamples_%s" % name >> beam.io.WriteToText(output_file)) return elif output_file.endswith(".tfrecord"): _ = ( data | "DropKey_%s" % name >> beam.Values() | "WriteTFRecordsExamples_%s" % name >> beam.io.WriteToTFRecord( file_path_prefix=output_file, shard_name_template="", coder=beam.coders.ProtoCoder(proto_message))) return raise ValueError(f"Unsupported output format: {output_file}")
Apache License 2.0
pokerregion/poker
poker/handhistory.py
_BaseHandHistory._parse_date
python
def _parse_date(self, date_string): date = datetime.strptime(date_string, self._DATE_FORMAT) self.date = self._TZ.localize(date).astimezone(pytz.UTC)
Parse the date_string and return a datetime object as UTC.
https://github.com/pokerregion/poker/blob/ca132b218a484382bfb4a6a5c372c5ae82bb7667/poker/handhistory.py#L187-L190
import io import itertools from datetime import datetime import attr import pytz from zope.interface import Interface, Attribute from cached_property import cached_property from .card import Rank @attr.s(slots=True) class _Player: name = attr.ib() stack = attr.ib() seat = attr.ib() combo = attr.ib() @attr.s(slots=True) class _PlayerAction: name = attr.ib() action = attr.ib() amount = attr.ib() class IStreet(Interface): actions = Attribute("_StreetAction instances.") cards = Attribute("Cards.") pot = Attribute("Pot size after actions.") class IHandHistory(Interface): header_parsed = Attribute("Shows wheter header is parsed already or not.") parsed = Attribute("Shows wheter the whole hand history is parsed already or not.") date = Attribute("Date of the hand history.") preflop = Attribute("_Street instance for preflop actions.") flop = Attribute("_Street instance for flop actions.") turn = Attribute("_Street instance for turn actions.") river = Attribute("_Street instance for river actions.") show_down = Attribute("_Street instance for showdown.") table_name = Attribute("Name of") max_players = Attribute("Maximum number of players can sit on the table.") players = Attribute("Tuple of player instances.") hero = Attribute("_Player instance with hero data.") button = Attribute("_Player instance of button.") winners = Attribute("Tuple of _Player instances with winners.") game_type = Attribute("GameType enum value (CASH, TOUR or SNG)") sb = Attribute("Small blind size.") bb = Attribute("Big blind size.") buyin = Attribute("Buyin with rake.") rake = Attribute("Rake only.") game = Attribute("Game enum value (HOLDEM, OMAHA? OHILO, RAZZ or STUD)") limit = Attribute("Limit enum value (NL, PL or FL)") ident = Attribute("Unique id of the hand history.") currency = Attribute("Currency of the hand history.") total_pot = Attribute("Total pot Decimal.") tournament_ident = Attribute("Unique tournament id.") tournament_name = Attribute("Name of the tournament.") tournament_level = Attribute("Tournament level.") def parse_header(): def parse(): class _BaseStreet: def __init__(self, flop): self.pot = None self.actions = None self.cards = None self._parse_cards(flop[0]) self._parse_actions(flop[1:]) self._all_combinations = itertools.combinations(self.cards, 2) @cached_property def is_rainbow(self): return all( first.suit != second.suit for first, second in self._all_combinations ) @cached_property def is_monotone(self): return all( first.suit == second.suit for first, second in self._all_combinations ) @cached_property def is_triplet(self): return all( first.rank == second.rank for first, second in self._all_combinations ) @cached_property def has_pair(self): return any( first.rank == second.rank for first, second in self._all_combinations ) @cached_property def has_straightdraw(self): return any(1 <= diff <= 3 for diff in self._get_differences()) @cached_property def has_gutshot(self): return any(1 <= diff <= 4 for diff in self._get_differences()) @cached_property def has_flushdraw(self): return any( first.suit == second.suit for first, second in self._all_combinations ) @cached_property def players(self): if not self.actions: return None player_names = [] for action in self.actions: player_name = action.name if player_name not in player_names: player_names.append(player_name) return tuple(player_names) def _get_differences(self): return ( Rank.difference(first.rank, second.rank) for first, second in self._all_combinations ) class _BaseHandHistory: def __init__(self, hand_text): self.raw = hand_text.strip() self.header_parsed = False self.parsed = False @classmethod def from_file(cls, filename): with io.open(filename, "rt", encoding="utf-8-sig") as f: return cls(f.read()) def __str__(self): return f"<{self.__class__.__name__}: #{self.ident}>" @property def board(self): board = [] if self.flop: board.extend(self.flop.cards) if self.turn: board.append(self.turn) if self.river: board.append(self.river) return tuple(board) if board else None
MIT License
prajdabre/yanmtt
transformers/src/transformers/utils/versions.py
require_version_examples
python
def require_version_examples(requirement): hint = "Try: pip install -r examples/requirements.txt" return require_version(requirement, hint)
require_version wrapper which emits examples-specific hint on failure
https://github.com/prajdabre/yanmtt/blob/4d329c3bcb81ca432d5947bb4673897086ee7f32/transformers/src/transformers/utils/versions.py#L97-L100
import operator import re import sys from typing import Optional from packaging import version import pkg_resources ops = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def require_version(requirement: str, hint: Optional[str] = None) -> None: hint = f"\n{hint}" if hint is not None else "" if re.match(r"^[\w_\-\d]+$", requirement): pkg, op, want_ver = requirement, None, None else: match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2})(.+)", requirement) if not match: raise ValueError( f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}" ) pkg, op, want_ver = match[0] if op not in ops: raise ValueError(f"need one of {list(ops.keys())}, but got {op}") if pkg == "python": got_ver = ".".join([str(x) for x in sys.version_info[:3]]) if not ops[op](version.parse(got_ver), version.parse(want_ver)): raise pkg_resources.VersionConflict( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}." ) return try: got_ver = pkg_resources.get_distribution(pkg).version except pkg_resources.DistributionNotFound: raise pkg_resources.DistributionNotFound(requirement, ["this application", hint]) if want_ver is not None and not ops[op](version.parse(got_ver), version.parse(want_ver)): raise pkg_resources.VersionConflict( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def require_version_core(requirement): hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git master" return require_version(requirement, hint)
MIT License
pythonfreecourse/calendar
app/internal/import_file.py
_get_data_from_txt_file
python
def _get_data_from_txt_file(txt_file_path: str) -> List[Dict[str, Any]]: calendar_content: List[Dict[str, Any]] = [] for event in _get_event_from_txt_file(txt_file_path): if not _is_event_text_valid(event): return [] event_data = _get_event_data_from_text(event) if not _is_event_dates_valid( event_data["start_date"], event_data["end_date"]): return [] _add_event_component_txt(event_data, calendar_content) return calendar_content
Returns a list of event data in dictionaries from a *.txt file. Args: txt_file_path: The file path. Returns: A list of event data in dictionaries, or an empty list if the data is not valid.
https://github.com/pythonfreecourse/calendar/blob/23a33703a0038d0eae8ce7299a93ad172c8f68e9/app/internal/import_file.py#L204-L227
from collections import defaultdict from datetime import datetime from pathlib import Path import re from typing import ( Any, DefaultDict, Dict, Iterator, List, Optional, Tuple, Union ) from icalendar import cal, Calendar from loguru import logger from sqlalchemy.orm.session import Session from app.config import ( EVENT_CONTENT_LIMIT, EVENT_DURATION_LIMIT, EVENT_HEADER_LIMIT, EVENT_HEADER_NOT_EMPTY, EVENT_VALID_YEARS, LOCATION_LIMIT, MAX_EVENTS_START_DATE, MAX_FILE_SIZE_MB, VALID_FILE_EXTENSION, ) from app.routers.event import create_event DATE_FORMAT = "%m-%d-%Y" DATE_FORMAT2 = "%m-%d-%Y %H:%M" DESC_EVENT = "VEVENT" EVENT_PATTERN = re.compile(r"^(\w{" + str(int(EVENT_HEADER_NOT_EMPTY)) + "," + str(EVENT_HEADER_LIMIT) + r"}),\s(\w{0," + str(EVENT_CONTENT_LIMIT) + r"}),\s(\d{2}-\d{2}-\d{4})," + r"\s(\d{2}-\d{2}-\d{4})(?:,\s([\w\s-]{0," + str(LOCATION_LIMIT) + r"}))?$") EVENT_PATTERN2 = re.compile(r"^(\w{" + str(int(EVENT_HEADER_NOT_EMPTY)) + "," + str(EVENT_HEADER_LIMIT) + r"}),\s(\w{0," + str(EVENT_CONTENT_LIMIT) + r"}),\s(\d{2}-\d{2}-\d{4}\s\d{2}:\d{2})," + r"\s(\d{2}-\d{2}-\d{4}\s\d{2}:\d{2})" + r"(?:,\s([\w\s-]{0," + str(LOCATION_LIMIT) + r"}))?$") def import_events(path: str, user_id: int, session: Session) -> bool: if _is_file_valid_to_import(path): if _is_file_extension_valid(path, ".ics"): event_data = _get_data_from_ics_file(path) else: event_data = _get_data_from_txt_file(path) if event_data and _is_file_valid_to_save_to_database(event_data): _save_events_to_database(event_data, user_id, session) return True return False def _is_file_valid_to_import(path: str) -> bool: return (_is_file_exists(path) and _is_file_extension_valid(path) and _is_file_size_valid(path)) def _is_file_exists(path: str) -> bool: return Path(path).is_file() def _is_file_extension_valid( path: str, extension: Union[str, Tuple[str, ...]] = VALID_FILE_EXTENSION, ) -> bool: return Path(path).suffix.lower() in extension def _is_file_size_valid(path: str, max_size: int = MAX_FILE_SIZE_MB) -> bool: file_size = Path(path).stat().st_size / 1048576 return file_size <= max_size def _get_data_from_ics_file(ics_file_path: str) -> List[Dict[str, Any]]: calendar_content: List[Dict[str, Any]] = [] calendar = _get_calendar_from_ics(ics_file_path) if not calendar: return [] for component in calendar.walk(): if component.name == DESC_EVENT: if not _is_valid_data_event_ics(component): return [] _add_event_component_ics(component, calendar_content) return calendar_content def _get_calendar_from_ics(ics_file_path: str) -> Optional[Calendar]: with open(ics_file_path, "r") as ics: try: return Calendar.from_ical(ics.read()) except (IndexError, ValueError) as e: logger.error(f"open_ics function failed error message: {e}") return None def _is_valid_data_event_ics(component: cal.Event) -> bool: return not (str(component.get('summary')) is None or component.get('dtstart') is None or component.get('dtend') is None or not _is_date_in_range(component.get('dtstart').dt) or not _is_date_in_range(component.get('dtend').dt) ) def _add_event_component_ics( component: cal.Event, calendar_content: List[Dict[str, Any]]) -> None: calendar_content.append({ "Head": str(component.get('summary')), "Content": str(component.get('description')), "S_Date": component.get('dtstart').dt.replace(tzinfo=None), "E_Date": component.get('dtend').dt.replace(tzinfo=None), "Location": str(component.get('location')), })
Apache License 2.0
doc-doc/next-qa
networks/VQAModel/HGA.py
HGA.forward
python
def forward(self, vid_feats, qas, qas_lengths): if self.qns_encoder.use_bert: cand_qas = qas.permute(1, 0, 2, 3) else: cand_qas = qas.permute(1, 0, 2) cand_len = qas_lengths.permute(1, 0) v_output, v_hidden = self.vid_encoder(vid_feats) v_last_hidden = torch.squeeze(v_hidden) out = [] for idx, qa in enumerate(cand_qas): encoder_out = self.vq_encoder(v_output, v_last_hidden, qa, cand_len[idx]) out.append(encoder_out) out = torch.stack(out, 0).transpose(1, 0) _, predict_idx = torch.max(out, 1) return out, predict_idx
:param vid_feats: :param qns: :param qns_lengths: :param mode: :return:
https://github.com/doc-doc/next-qa/blob/f54f850a91e64dca4452598154838924548f3b2f/networks/VQAModel/HGA.py#L53-L79
import torch import torch.nn as nn import sys sys.path.insert(0, 'networks') from q_v_transformer import CoAttention from gcn import AdjLearner, GCN from block import fusions class HGA(nn.Module): def __init__(self, vid_encoder, qns_encoder, device): super(HGA, self).__init__() self.vid_encoder = vid_encoder self.qns_encoder = qns_encoder self.device = device hidden_size = vid_encoder.dim_hidden input_dropout_p = vid_encoder.input_dropout_p self.q_input_ln = nn.LayerNorm(hidden_size, elementwise_affine=False) self.v_input_ln = nn.LayerNorm(hidden_size, elementwise_affine=False) self.co_attn = CoAttention( hidden_size, n_layers=vid_encoder.n_layers, dropout_p=input_dropout_p) self.adj_learner = AdjLearner( hidden_size, hidden_size, dropout=input_dropout_p) self.gcn = GCN( hidden_size, hidden_size, hidden_size, num_layers=2, dropout=input_dropout_p) self.gcn_atten_pool = nn.Sequential( nn.Linear(hidden_size, hidden_size // 2), nn.Tanh(), nn.Linear(hidden_size // 2, 1), nn.Softmax(dim=-1)) self.global_fusion = fusions.Block( [hidden_size, hidden_size], hidden_size, dropout_input=input_dropout_p) self.fusion = fusions.Block([hidden_size, hidden_size], 1)
MIT License
rcos/observatory-retired
observatory/lib/dulwich/client.py
GitClient.send_pack
python
def send_pack(self, path, determine_wants, generate_pack_contents): proto, unused_can_read = self._connect('receive-pack', path) old_refs, server_capabilities = self.read_refs(proto) if 'report-status' not in server_capabilities: self._send_capabilities.remove('report-status') new_refs = determine_wants(old_refs) if not new_refs: proto.write_pkt_line(None) return {} want = [] have = [x for x in old_refs.values() if not x == ZERO_SHA] sent_capabilities = False for refname in set(new_refs.keys() + old_refs.keys()): old_sha1 = old_refs.get(refname, ZERO_SHA) new_sha1 = new_refs.get(refname, ZERO_SHA) if old_sha1 != new_sha1: if sent_capabilities: proto.write_pkt_line("%s %s %s" % (old_sha1, new_sha1, refname)) else: proto.write_pkt_line( "%s %s %s\0%s" % (old_sha1, new_sha1, refname, ' '.join(self._send_capabilities))) sent_capabilities = True if new_sha1 not in have and new_sha1 != ZERO_SHA: want.append(new_sha1) proto.write_pkt_line(None) if not want: return new_refs objects = generate_pack_contents(have, want) entries, sha = write_pack_data(proto.write_file(), objects, len(objects)) if 'report-status' in self._send_capabilities: self._parse_status_report(proto) data = proto.read() if data: raise SendPackError('Unexpected response %r' % data) return new_refs
Upload a pack to a remote repository. :param path: Repository path :param generate_pack_contents: Function that can return the shas of the objects to upload. :raises SendPackError: if server rejects the pack data :raises UpdateRefsError: if the server supports report-status and rejects ref updates
https://github.com/rcos/observatory-retired/blob/cada27eaf96998ca1ba97a4cca30d2b5ce5021ac/observatory/lib/dulwich/client.py#L138-L187
__docformat__ = 'restructuredText' import select import socket import subprocess import urlparse from lib.dulwich.errors import ( SendPackError, UpdateRefsError, ) from lib.dulwich.protocol import ( Protocol, TCP_GIT_PORT, ZERO_SHA, extract_capabilities, ) from lib.dulwich.pack import ( write_pack_data, ) def _fileno_can_read(fileno): return len(select.select([fileno], [], [], 0)[0]) > 0 COMMON_CAPABILITIES = ["ofs-delta"] FETCH_CAPABILITIES = ["multi_ack", "side-band-64k"] + COMMON_CAPABILITIES SEND_CAPABILITIES = ['report-status'] + COMMON_CAPABILITIES class GitClient(object): def __init__(self, thin_packs=True, report_activity=None): self._report_activity = report_activity self._fetch_capabilities = list(FETCH_CAPABILITIES) self._send_capabilities = list(SEND_CAPABILITIES) if thin_packs: self._fetch_capabilities.append("thin-pack") def _connect(self, cmd, path): raise NotImplementedError() def read_refs(self, proto): server_capabilities = None refs = {} for pkt in proto.read_pkt_seq(): (sha, ref) = pkt.rstrip("\n").split(" ", 1) if server_capabilities is None: (ref, server_capabilities) = extract_capabilities(ref) refs[ref] = sha return refs, server_capabilities def _parse_status_report(self, proto): unpack = proto.read_pkt_line().strip() if unpack != 'unpack ok': st = True while st is not None: st = proto.read_pkt_line() raise SendPackError(unpack) statuses = [] errs = False ref_status = proto.read_pkt_line() while ref_status: ref_status = ref_status.strip() statuses.append(ref_status) if not ref_status.startswith('ok '): errs = True ref_status = proto.read_pkt_line() if errs: ref_status = {} ok = set() for status in statuses: if ' ' not in status: continue status, ref = status.split(' ', 1) if status == 'ng': if ' ' in ref: ref, status = ref.split(' ', 1) else: ok.add(ref) ref_status[ref] = status raise UpdateRefsError('%s failed to update' % ', '.join([ref for ref in ref_status if ref not in ok]), ref_status=ref_status)
ISC License
linkedin/naarad
src/naarad/utils.py
initialize_metric
python
def initialize_metric(section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, anomaly_detection_metrics, other_options): metric = None metric_type = section.split('-')[0] if metric_type in metric_classes: if 'SAR' in metric_type: metric = metric_classes['SAR'](section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options) else: metric = metric_classes[metric_type](section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options) else: metric = Metric(section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options) return metric
Initialize appropriate metric based on type of metric. :param: section: config section name or auto discovered metric type :param: infile_list: list of input log files for the metric :param: hostname: hostname associated with the logs origin :param: output_directory: report location :param: resource_path: resource path for report :param: label: label for config section or auto discovered metric type :param: ts_start: start time for analysis :param: ts_end: end time for analysis :param: rule_strings: list of slas :param: important_sub_metrics: list of important sub metrics :param: anomaly_detection_metrics: list of metrics to use for anomaly detection. :param: other_options: kwargs :return: metric object
https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/src/naarad/utils.py#L934-L964
import argparse import calendar import ConfigParser import datetime import imp import logging import numpy import os import pytz from pytz import timezone import re import sys import time import urllib from naarad.naarad_imports import metric_classes, aggregate_metric_classes from naarad.sla import SLA from naarad.metrics.sar_metric import SARMetric from naarad.metrics.metric import Metric from naarad.graphing.plot_data import PlotData from naarad.run_steps.local_cmd import Local_Cmd import naarad.naarad_constants as CONSTANTS logger = logging.getLogger('naarad.utils') def import_modules(module_dict, is_class_type=True): return_dict = {} for module_name, module_string in module_dict.items(): try: if is_class_type: file_name, class_name = module_string.rsplit('.', 1) mod = __import__(file_name, fromlist=[class_name]) return_dict[module_name] = getattr(mod, class_name) else: return_dict[module_name] = __import__(module_string, fromlist=[module_string]) except ImportError: pass return return_dict def parse_user_defined_metric_classes(config_obj, metric_classes): user_defined_metric_list = config_obj.get('GLOBAL', 'user_defined_metrics').split() for udm_string in user_defined_metric_list: try: metric_name, metric_class_name, metric_file = udm_string.split(':') except ValueError: logger.error('Bad user defined metric specified') continue module_name = os.path.splitext(os.path.basename(metric_file))[0] try: new_module = imp.load_source(module_name, metric_file) new_class = getattr(new_module, metric_class_name) if metric_name in metric_classes.keys(): logger.warn('Overriding pre-defined metric class definition for ', metric_name) metric_classes[metric_name] = new_class except ImportError: logger.error('Something wrong with importing a user defined metric class. Skipping metric: ', metric_name) continue def is_valid_url(url): regex = re.compile(r'^(?:http|ftp)s?://' r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' r'localhost|' r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' r'(?::\d+)?' r'(?:/?|[/?]\S+)$', re.IGNORECASE) if regex.match(url): logger.info("URL given as config") return True else: return False def download_file(url): try: (local_file, headers) = urllib.urlretrieve(url) except: sys.exit("ERROR: Problem downloading config file. Please check the URL (" + url + "). Exiting...") return local_file def sanitize_string_section_name(string): string = string.replace('/', '_') string = string.replace('%', '_') return string def is_valid_metric_name(metric_name): reg = re.compile('^[a-zA-Z0-9\.\-\_]+$') if reg.match(metric_name) and not metric_name.startswith('.'): return True else: return False def get_run_time_period(run_steps): init_ts_start = get_standardized_timestamp('now', None) ts_start = init_ts_start ts_end = '0' for run_step in run_steps: if run_step.ts_start and run_step.ts_end: if run_step.ts_start < ts_start: ts_start = run_step.ts_start if run_step.ts_end > ts_end: ts_end = run_step.ts_end if ts_end == '0': ts_end = None if ts_start == init_ts_start: ts_start = None logger.info('get_run_time_period range returned ' + str(ts_start) + ' to ' + str(ts_end)) return ts_start, ts_end def get_rule_strings(config_obj, section): rule_strings = {} kwargs = dict(config_obj.items(section)) for key in kwargs.keys(): if key.endswith('.sla'): rule_strings[key.replace('.sla', '')] = kwargs[key] del kwargs[key] return rule_strings, kwargs def extract_diff_sla_from_config_file(obj, options_file): rule_strings = {} config_obj = ConfigParser.ConfigParser() config_obj.optionxform = str config_obj.read(options_file) for section in config_obj.sections(): rule_strings, kwargs = get_rule_strings(config_obj, section) for (key, val) in rule_strings.iteritems(): set_sla(obj, section, key, val) def parse_basic_metric_options(config_obj, section): infile = {} aggr_hosts = None aggr_metrics = None ts_start = None ts_end = None precision = None hostname = "localhost" rule_strings = {} important_sub_metrics = None anomaly_detection_metrics = None try: if config_obj.has_option(section, 'important_sub_metrics'): important_sub_metrics = config_obj.get(section, 'important_sub_metrics').split() config_obj.remove_option(section, 'important_sub_metrics') if config_obj.has_option(section, 'hostname'): hostname = config_obj.get(section, 'hostname') config_obj.remove_option(section, 'hostname') if config_obj.has_option(section, 'infile'): infile = config_obj.get(section, 'infile').split() config_obj.remove_option(section, 'infile') label = sanitize_string_section_name(section) if config_obj.has_option(section, 'ts_start'): ts_start = get_standardized_timestamp(config_obj.get(section, 'ts_start'), None) config_obj.remove_option(section, 'ts_start') if config_obj.has_option(section, 'ts_end'): ts_end = get_standardized_timestamp(config_obj.get(section, 'ts_end'), None) config_obj.remove_option(section, 'ts_end') if config_obj.has_option(section, 'precision'): precision = config_obj.get(section, 'precision') config_obj.remove_option(section, 'precision') if config_obj.has_option(section, 'aggr_hosts'): aggr_hosts = config_obj.get(section, 'aggr_hosts') config_obj.remove_option(section, 'aggr_hosts') if config_obj.has_option(section, 'aggr_metrics'): aggr_metrics = config_obj.get(section, 'aggr_metrics') config_obj.remove_option(section, 'aggr_metrics') if config_obj.has_option(section, 'anomaly_detection_metrics'): anomaly_detection_metrics = config_obj.get(section, 'anomaly_detection_metrics').split() config_obj.remove_option(section, 'anomaly_detection_metrics') rule_strings, other_options = get_rule_strings(config_obj, section) except ConfigParser.NoOptionError: logger.exception("Exiting.... some mandatory options are missing from the config file in section: " + section) sys.exit() return (hostname, infile, aggr_hosts, aggr_metrics, label, ts_start, ts_end, precision, aggr_metrics, other_options, rule_strings, important_sub_metrics, anomaly_detection_metrics) def parse_metric_section(config_obj, section, metric_classes, metrics, aggregate_metric_classes, outdir_default, resource_path): (hostname, infile, aggr_hosts, aggr_metrics, label, ts_start, ts_end, precision, aggr_metrics, other_options, rule_strings, important_sub_metrics, anomaly_detection_metrics) = parse_basic_metric_options(config_obj, section) metric_type = section.split('-')[0] if metric_type in aggregate_metric_classes: new_metric = initialize_aggregate_metric(section, aggr_hosts, aggr_metrics, metrics, outdir_default, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, anomaly_detection_metrics, other_options) else: new_metric = initialize_metric(section, infile, hostname, aggr_metrics, outdir_default, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, anomaly_detection_metrics, other_options) if config_obj.has_option(section, 'ignore') and config_obj.getint(section, 'ignore') == 1: new_metric.ignore = True if config_obj.has_option(section, 'calc_metrics'): new_metric.calc_metrics = config_obj.get(section, 'calc_metrics') new_metric.precision = precision return new_metric def parse_global_section(config_obj, section): ts_start = None ts_end = None if config_obj.has_option(section, 'ts_start'): ts_start = get_standardized_timestamp(config_obj.get(section, 'ts_start'), None) config_obj.remove_option(section, 'ts_start') if config_obj.has_option(section, 'ts_end'): ts_end = get_standardized_timestamp(config_obj.get(section, 'ts_end'), None) config_obj.remove_option(section, 'ts_end') return ts_start, ts_end def parse_run_step_section(config_obj, section): kill_after_seconds = None try: run_cmd = config_obj.get(section, 'run_cmd') run_rank = int(config_obj.get(section, 'run_rank')) except ConfigParser.NoOptionError: logger.exception("Exiting.... some mandatory options are missing from the config file in section: " + section) sys.exit() except ValueError: logger.error("Bad run_rank %s specified in section %s, should be integer. Exiting.", config_obj.get(section, 'run_rank'), section) sys.exit() if config_obj.has_option(section, 'run_type'): run_type = config_obj.get(section, 'run_type') else: run_type = CONSTANTS.RUN_TYPE_WORKLOAD if config_obj.has_option(section, 'run_order'): run_order = config_obj.get(section, 'run_order') else: run_order = CONSTANTS.PRE_ANALYSIS_RUN if config_obj.has_option(section, 'call_type'): call_type = config_obj.get(section, 'call_type') else: call_type = 'local' if config_obj.has_option(section, 'kill_after_seconds'): try: kill_after_seconds = int(config_obj.get(section, 'kill_after_seconds')) except ValueError: logger.error("Bad kill_after_seconds %s specified in section %s, should be integer.", config_obj.get(section, 'kill_after_seconds'), section) if call_type == 'local': run_step_obj = Local_Cmd(run_type, run_cmd, call_type, run_order, run_rank, kill_after_seconds=kill_after_seconds) else: logger.error('Unsupported RUN_STEP supplied, call_type should be local') run_step_obj = None return run_step_obj def parse_graph_section(config_obj, section, outdir_default, indir_default): graph_timezone = None graphing_library = CONSTANTS.DEFAULT_GRAPHING_LIBRARY crossplots = [] if config_obj.has_option(section, 'graphing_library'): graphing_library = config_obj.get(section, 'graphing_library') if config_obj.has_option(section, 'graphs'): graphs_string = config_obj.get(section, 'graphs') crossplots = graphs_string.split() if config_obj.has_option(section, 'outdir'): outdir_default = config_obj.get(section, 'outdir') if config_obj.has_option(section, 'output_dir'): outdir_default = config_obj.get(section, 'output_dir') if config_obj.has_option(section, 'input_dir'): indir_default = config_obj.get(section, 'input_dir') if config_obj.has_option(section, 'graph_timezone'): graph_timezone = config_obj.get(section, 'graph_timezone') if graph_timezone not in ("UTC", "PST", "PDT"): logger.warn('Unsupported timezone ' + graph_timezone + ' specified in option graph_timezone. Will use UTC instead') graph_timezone = "UTC" return graphing_library, crossplots, outdir_default, indir_default, graph_timezone def parse_report_section(config_obj, section): report_kwargs = {} if config_obj.has_option(section, 'stylesheet_includes'): report_kwargs['stylesheet_includes'] = config_obj.get(section, 'stylesheet_includes') if config_obj.has_option(section, 'javascript_includes'): report_kwargs['javascript_includes'] = config_obj.get(section, 'javascript_includes') if config_obj.has_option(section, 'header_template'): report_kwargs['header_template'] = config_obj.get(section, 'header_template') if config_obj.has_option(section, 'footer_template'): report_kwargs['footer_template'] = config_obj.get(section, 'footer_template') if config_obj.has_option(section, 'summary_content_template'): report_kwargs['summary_content_template'] = config_obj.get(section, 'summary_content_template') if config_obj.has_option(section, 'summary_page_template'): report_kwargs['summary_page_template'] = config_obj.get(section, 'summary_page_template') if config_obj.has_option(section, 'metric_page_template'): report_kwargs['metric_page_template'] = config_obj.get(section, 'metric_page_template') if config_obj.has_option(section, 'client_charting_template'): report_kwargs['client_charting_template'] = config_obj.get(section, 'client_charting_template') if config_obj.has_option(section, 'diff_client_charting_template'): report_kwargs['diff_client_charting_template'] = config_obj.get(section, 'diff_client_charting_template') if config_obj.has_option(section, 'diff_page_template'): report_kwargs['diff_page_template'] = config_obj.get(section, 'diff_page_template') return report_kwargs def reconcile_timezones(begin_ts, ts_timezone, graph_timezone): if not graph_timezone: return begin_ts if graph_timezone != ts_timezone: utc = pytz.utc pst = timezone('US/Pacific') if graph_timezone == "UTC": try: dt = pst.localize(datetime.datetime.strptime(begin_ts, "%Y-%m-%d %H:%M:%S")) except ValueError: dt = pst.localize(datetime.datetime.strptime(begin_ts, "%Y-%m-%d %H:%M:%S.%f")) begin_ts = dt.astimezone(utc).strftime("%Y-%m-%d %H:%M:%S.%f") else: try: dt = utc.localize(datetime.datetime.strptime(begin_ts, "%Y-%m-%d %H:%M:%S")) except ValueError: dt = utc.localize(datetime.datetime.strptime(begin_ts, "%Y-%m-%d %H:%M:%S.%f")) begin_ts = dt.astimezone(pst).strftime("%Y-%m-%d %H:%M:%S.%f") return begin_ts def convert_to_unixts(ts_string): try: dt_obj = datetime.datetime.strptime(ts_string, "%Y-%m-%d %H:%M:%S.%f") except ValueError: dt_obj = datetime.datetime.strptime(ts_string, "%Y-%m-%d %H:%M:%S") return float(calendar.timegm(dt_obj.utctimetuple()) * 1000.0 + dt_obj.microsecond / 1000.0) def is_number(string): try: float(string) return True except ValueError: return False def get_all_sar_objects(metrics, indir, hostname, output_directory, label, ts_start, ts_end, options): metrics = [] sar_types = ('device', 'cpuusage', 'cpuhz', 'memory', 'memutil', 'paging') for sar_metric_type in sar_types: infile = os.path.join(indir, 'sar.' + sar_metric_type + '.out') if os.path.exists(infile): obj_type = 'SAR-' + sar_metric_type metric = SARMetric(obj_type, infile, hostname, output_directory, label, ts_start, ts_end, options) metrics.append(metric) return metrics def sanitize_string(string): string = string.replace('/', '-per-') if string.startswith('%'): string = string.replace('%', 'percent-') else: string = string.replace('.%', '.percent-') string = string.replace('%', '-percent-') return string def get_default_csv(output_directory, val): val = sanitize_string(val) return os.path.join(output_directory, val + '.csv') def convert_to_24hr_format(ts): words = ts.split() if len(words) == 1: return ts if words[1] == 'PM': tmp = words[0].split(':') if tmp[0] != '12': hour = int(tmp[0]) + 12 tmp[0] = str(hour) ts = ":".join(tmp) elif words[1] == 'AM': tmp = words[0].split(':') if tmp[0] == '12': tmp[0] = '00' ts = ":".join(tmp) return ts def get_merged_csvname(output_directory, vals): return os.path.join(output_directory, '-'.join(vals) + '.csv') def get_merged_charttitle(vals): return " vs ".join(vals) def get_merged_plot_link_name(vals): return '-'.join(vals) def get_merged_png_name(vals): return '-'.join(vals) def generate_html_report(output_directory, html_string): htmlfilename = os.path.join(output_directory, 'Report.html') with open(htmlfilename, 'w') as htmlf: header = '<html><head><title>naarad analysis report</title>' dygraphs_include = '''<script type='text/javascript' src='http://dygraphs.com/dygraph-combined.js'></script> ''' sorttable_include = '<script type="text/javascript" src="http://www.kryogenix.org/code/browser/sorttable/sorttable.js"></script>' body = '</head><body>' footer = '</body></html>' htmlf.write(header) htmlf.write(sorttable_include) htmlf.write(dygraphs_include) htmlf.write(body) htmlf.write(html_string) htmlf.write(footer) def tscsv_nway_file_merge(outfile, filelist, filler): logger.info('called nway merge with %s', filelist) with open(outfile, 'w') as outf: filehandlers = [None] * len(filelist) currlines = [None] * len(filelist) for i in range(len(filelist)): try: filehandlers[i] = open(filelist[i], 'r') except IOError: logger.error('Cannot open: ' + filelist[i]) return currlines[i] = filehandlers[i].readline().strip() while True: future_time = str(datetime.datetime.utcnow() + datetime.timedelta(days=365)) min_ts = future_time for i in range(len(currlines)): if currlines[i] == "": continue ts = currlines[i].split(',')[0] if ts < min_ts: min_ts = ts if min_ts == future_time: break outwords = [] outwords.append(min_ts) for i in range(len(currlines)): if currlines[i] == "": outwords.append(filler) else: ts = currlines[i].split(',')[0] val = currlines[i].split(',')[1] if ts == min_ts: outwords.append(val) currlines[i] = filehandlers[i].readline().strip() else: outwords.append(filler) outf.write(','.join(outwords) + '\n') def nway_plotting(crossplots, metrics, output_directory, resource_path, graphing_library): listlen = len(crossplots) if listlen == 0: return '' i = 0 correlated_plots = [] while i < listlen: plot = crossplots[i] vals = plot.split(',') i += 1 if 'all' not in vals: plot_data = [] for val in vals: csv_file = get_default_csv(output_directory, val) plot_data.append(PlotData(input_csv=csv_file, csv_column=1, series_name=sanitize_string(val), y_label=sanitize_string(val), precision=None, graph_height=500, graph_width=1200, graph_type='line')) png_name = get_merged_plot_link_name(vals) graphed, div_file = Metric.graphing_modules[graphing_library].graph_data(plot_data, output_directory, resource_path, png_name) if graphed: correlated_plots.append(div_file) else: vals.remove('all') for metric in metrics: for csv in metric.csv_files: csv_filename = csv.split('/')[-1] metric_name = '.'.join(csv_filename.split('.')[0:-1]) if metric_name in vals: continue new_val = [] new_val.extend(vals) new_val.append(metric_name) new_val_str = ','.join(new_val) crossplots.append(new_val_str) listlen += 1 return correlated_plots def normalize_float_for_display(data_val): try: data_val = float(data_val) except ValueError: return data_val if data_val > 1: return '%.2f' % round(data_val, 2) else: return '%s' % float('%.2g' % data_val) def calculate_stats(data_list, stats_to_calculate=['mean', 'std'], percentiles_to_calculate=[]): stats_to_numpy_method_map = { 'mean': numpy.mean, 'avg': numpy.mean, 'std': numpy.std, 'standard_deviation': numpy.std, 'median': numpy.median, 'min': numpy.amin, 'max': numpy.amax } calculated_stats = {} calculated_percentiles = {} if len(data_list) == 0: return calculated_stats, calculated_percentiles for stat in stats_to_calculate: if stat in stats_to_numpy_method_map.keys(): calculated_stats[stat] = stats_to_numpy_method_map[stat](data_list) else: logger.error("Unsupported stat : " + str(stat)) for percentile in percentiles_to_calculate: if isinstance(percentile, float) or isinstance(percentile, int): calculated_percentiles[percentile] = numpy.percentile(data_list, percentile) else: logger.error("Unsupported percentile requested (should be int or float): " + str(percentile)) return calculated_stats, calculated_percentiles def is_valid_file(filename): if os.path.exists(filename): if not os.path.getsize(filename): logger.warning('%s : file is empty.', filename) return False else: logger.warning('%s : file does not exist.', filename) return False return True def detect_timestamp_format(timestamp): time_formats = { 'epoch': re.compile(r'^[0-9]{10}$'), 'epoch_ms': re.compile(r'^[0-9]{13}$'), 'epoch_fraction': re.compile(r'^[0-9]{10}\.[0-9]{3,9}$'), '%Y-%m-%d %H:%M:%S': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'), '%Y-%m-%dT%H:%M:%S': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9]T[0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'), '%Y-%m-%d_%H:%M:%S': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9]_[0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'), '%Y-%m-%d %H:%M:%S.%f': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'), '%Y-%m-%dT%H:%M:%S.%f': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9]T[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'), '%Y-%m-%d_%H:%M:%S.%f': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9]_[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'), '%Y%m%d %H:%M:%S': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'), '%Y%m%dT%H:%M:%S': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9]T[0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'), '%Y%m%d_%H:%M:%S': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9]_[0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'), '%Y%m%d %H:%M:%S.%f': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'), '%Y%m%dT%H:%M:%S.%f': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9]T[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'), '%Y%m%d_%H:%M:%S.%f': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9]_[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'), '%H:%M:%S': re.compile(r'^[0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'), '%H:%M:%S.%f': re.compile(r'^[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'), '%Y-%m-%dT%H:%M:%S.%f%z': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9]T[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+[+-][0-9]{4}$') } for time_format in time_formats: if re.match(time_formats[time_format], timestamp): return time_format return 'unknown' def get_standardized_timestamp(timestamp, ts_format): if not timestamp: return None if timestamp == 'now': timestamp = str(datetime.datetime.now()) if not ts_format: ts_format = detect_timestamp_format(timestamp) try: if ts_format == 'unknown': logger.error('Unable to determine timestamp format for : %s', timestamp) return -1 elif ts_format == 'epoch': ts = int(timestamp) * 1000 elif ts_format == 'epoch_ms': ts = timestamp elif ts_format == 'epoch_fraction': ts = int(timestamp[:10]) * 1000 + int(timestamp[11:]) elif ts_format in ('%H:%M:%S', '%H:%M:%S.%f'): date_today = str(datetime.date.today()) dt_obj = datetime.datetime.strptime(date_today + ' ' + timestamp, '%Y-%m-%d ' + ts_format) ts = calendar.timegm(dt_obj.utctimetuple()) * 1000 + dt_obj.microsecond / 1000 else: dt_obj = datetime.datetime.strptime(timestamp, ts_format) ts = calendar.timegm(dt_obj.utctimetuple()) * 1000 + dt_obj.microsecond / 1000 except ValueError: return -1 return str(ts) def set_sla(obj, metric, sub_metric, rules): if not hasattr(obj, 'sla_map'): return False rules_list = rules.split() for rule in rules_list: if '<' in rule: stat, threshold = rule.split('<') sla = SLA(metric, sub_metric, stat, threshold, 'lt') elif '>' in rule: stat, threshold = rule.split('>') sla = SLA(metric, sub_metric, stat, threshold, 'gt') else: if hasattr(obj, 'logger'): obj.logger.error('Unsupported SLA type defined : ' + rule) sla = None obj.sla_map[metric][sub_metric][stat] = sla if hasattr(obj, 'sla_list'): obj.sla_list.append(sla) return True def check_slas(metric): if not hasattr(metric, 'sla_map'): return for metric_label in metric.sla_map.keys(): for sub_metric in metric.sla_map[metric_label].keys(): for stat_name in metric.sla_map[metric_label][sub_metric].keys(): sla = metric.sla_map[metric_label][sub_metric][stat_name] if stat_name[0] == 'p' and hasattr(metric, 'calculated_percentiles'): if sub_metric in metric.calculated_percentiles.keys(): percentile_num = int(stat_name[1:]) if isinstance(percentile_num, float) or isinstance(percentile_num, int): if percentile_num in metric.calculated_percentiles[sub_metric].keys(): if not sla.check_sla_passed(metric.calculated_percentiles[sub_metric][percentile_num]): logger.info("Failed SLA for " + sub_metric) metric.status = CONSTANTS.SLA_FAILED if sub_metric in metric.calculated_stats.keys() and hasattr(metric, 'calculated_stats'): if stat_name in metric.calculated_stats[sub_metric].keys(): if not sla.check_sla_passed(metric.calculated_stats[sub_metric][stat_name]): logger.info("Failed SLA for " + sub_metric) metric.status = CONSTANTS.SLA_FAILED if len(metric.sla_map.keys()) > 0 and hasattr(metric, 'get_sla_csv'): sla_csv_file = metric.get_sla_csv() with open(sla_csv_file, 'w') as FH: for metric_label in metric.sla_map.keys(): for sub_metric in metric.sla_map[metric_label].keys(): for stat, sla in metric.sla_map[metric_label][sub_metric].items(): FH.write('%s\n' % (sla.get_csv_repr())) def parse_and_plot_single_metrics(metric, graph_timezone, outdir_default, indir_default, graphing_library, skip_plots): metric.graph_timezone = graph_timezone if metric.outdir is None: metric.outdir = os.path.normpath(outdir_default) updated_infile_list = [] for infile in metric.infile_list: if not infile.startswith('http://') and not infile.startswith('https://'): updated_infile_list.append(os.path.join(indir_default, infile)) else: updated_infile_list.append(infile) metric.infile_list = updated_infile_list if not metric.ignore: if metric.collect(): if metric.parse(): metric.calc() metric.calculate_stats() check_slas(metric) metric.detect_anomaly() if not skip_plots: metric.graph(graphing_library) else: logger.error('Parsing failed for metric: ' + metric.label) else: logger.error('Fetch/Collect failed for metric: ' + metric.label) def init_logging(logger, log_file, log_level): with open(log_file, 'w'): pass numeric_level = getattr(logging, log_level.upper(), None) if log_level else logging.INFO if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s' % log_level) logger.setLevel(logging.DEBUG) fh = logging.FileHandler(log_file) fh.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(numeric_level) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) logger.addHandler(fh) logger.addHandler(ch) return CONSTANTS.OK def get_argument_parser(): arg_parser = argparse.ArgumentParser() arg_parser.add_argument('-c', '--config', help="file with specifications for each metric and graphs") arg_parser.add_argument('--start', help="Start time in the format of HH:MM:SS or YYYY-mm-dd_HH:MM:SS") arg_parser.add_argument('--end', help="End time in the format of HH:MM:SS or YYYY-mm-dd_HH:MM:SS") arg_parser.add_argument('-i', '--input_dir', help="input directory used to construct full path name of the metric infile") arg_parser.add_argument('-o', '--output_dir', help="output directory where the plots and Report.html will be generated") arg_parser.add_argument('-V', '--variables', action="append", help="User defined variables (in form key=value) for substitution in the config file. " "Config should have the variable names in format %%(key)s") arg_parser.add_argument('-s', '--show_config', help="Print config associated with the provided template name", action="store_true") arg_parser.add_argument('-l', '--log', help="log level") arg_parser.add_argument('-d', '--diff', nargs=2, help="Specify the location of two naarad reports to diff separated by a space. Can be local or http(s) " "locations. The first report is used as a baseline.", metavar=("report-1", "report-2")) arg_parser.add_argument('-n', '--no_plots', help="Don't generate plot images. Useful when you only want SLA calculations. Note that on-demand charts can " "still be generated through client-charting.", action="store_true") arg_parser.add_argument('-e', '--exit_code', help="optional argument to enable exit_code for naarad", action="store_true") return arg_parser def get_variables(args): variables_dict = {} if args.variables: for var in args.variables: words = var.split('=') variables_dict[words[0]] = words[1] return variables_dict def validate_arguments(args): if args.diff: if not args.output_dir: logger.error('No Output location specified') print_usage() sys.exit(0) elif not args.output_dir: print_usage() sys.exit(0) def print_usage(): print ("Usage: " "\n To generate a diff report : naarad -d report1 report2 -o <output_location> -c <optional: config-file> -e <optional: turn on exit code>" "\n To generate an analysis report : naarad -i <input_location> -o <output_location> -c <optional: config_file> -e <optional: turn on exit code> " "-n <optional: disable plotting of images>") def discover_by_name(input_directory, output_directory): metric_list = [] log_files = os.listdir(input_directory) for log_file in log_files: if log_file in CONSTANTS.SUPPORTED_FILENAME_MAPPING.keys(): metric_list.append(initialize_metric(CONSTANTS.SUPPORTED_FILENAME_MAPPING[log_file], [log_file], None, [], output_directory, CONSTANTS.RESOURCE_PATH, CONSTANTS.SUPPORTED_FILENAME_MAPPING[log_file], None, None, {}, None, None, {})) else: logger.warning('Unable to determine metric type for file: %s', log_file) return metric_list
Apache License 2.0
openstack/networking-bagpipe
networking_bagpipe/agent/bagpipe_bgp_agent.py
HTTPClientBase.__init__
python
def __init__(self, host="127.0.0.1", port=8082, client_name="HTTP client base"): self.host = host self.port = port self.client_name = client_name
Create a new HTTP client :param host: HTTP server IP address :param port: HTTP server port
https://github.com/openstack/networking-bagpipe/blob/8a8279a61fbbf6ae70c8e7b25c89a6a6d71e898d/networking_bagpipe/agent/bagpipe_bgp_agent.py#L88-L97
import socket import httplib2 import json from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_concurrency import lockutils from oslo_serialization import jsonutils from oslo_service import loopingcall from networking_bagpipe._i18n import _ from networking_bagpipe.bagpipe_bgp import constants as bbgp_const from neutron.conf.agent import common as config from neutron_lib import constants as n_const from neutron_lib import exceptions as n_exc LOG = logging.getLogger(__name__) bagpipe_bgp_opts = [ cfg.IntOpt('ping_interval', default=10, help=_("The number of seconds the bagpipe-bgp client will " "wait between polling for restart detection.")), cfg.PortOpt('bagpipe_bgp_port', default=8082, help=_("bagpipe-bgp REST service IP port.")), ] internal_opts = [ cfg.HostAddressOpt('bagpipe_bgp_ip', default='127.0.0.1', help=_("bagpipe-bgp REST service IP address.")), ] cfg.CONF.register_opts(bagpipe_bgp_opts, "BAGPIPE") cfg.CONF.register_opts(internal_opts, "BAGPIPE") config.register_agent_state_opts_helper(cfg.CONF) VPN_TYPES = [bbgp_const.EVPN, bbgp_const.IPVPN] class BaGPipeBGPException(n_exc.NeutronException): message = "An exception occurred when calling bagpipe-bgp \ REST service: %(reason)s" class SetJSONEncoder(jsonutils.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) return json.JSONEncoder.default(self, obj) class HTTPClientBase(object):
Apache License 2.0
khammernik/sigmanet
reconstruction/common/mytorch/models/datalayer.py
DataGDLayer.__init__
python
def __init__(self, lambda_init, learnable=True): super(DataGDLayer, self).__init__() self.lambda_init = lambda_init self.data_weight = torch.nn.Parameter(torch.Tensor(1)) self.data_weight.data = torch.tensor( lambda_init, dtype=self.data_weight.dtype, ) self.data_weight.requires_grad = learnable
Args: lambda_init (float): Init value of data term weight lambda.
https://github.com/khammernik/sigmanet/blob/6eb8dbd1ee350bb9baee60eb254080f7d660bbc5/reconstruction/common/mytorch/models/datalayer.py#L36-L48
import torch import torch.nn as nn import common.mytorch as mytorch from common.mytorch.fft import fft2, ifft2 from common.mytorch.mri import ( adjointSoftSenseOpNoShift, forwardSoftSenseOpNoShift, ) class DataIDLayer(nn.Module): def __init__(self, *args, **kwargs): super(DataIDLayer, self).__init__() def forward(self, x, *args, **kwargs): return x def __repr__(self): return f'DataIDLayer()' class DataGDLayer(nn.Module):
MIT License
uzumaxy/pyvalid
pyvalid/validators/__iterable.py
IterableValidator.empty_checker
python
def empty_checker(cls, val, empty_allowed): if not empty_allowed: return len(val) != 0 else: warnings.warn("Iterable is empty, but does not impact the execution.") return True
Checks if the iterable is empty or not. Args: val (collections.abc.Iterable): Iterable whose contents needs to be validated. empty_allowed (bool): If this flag is set to ``False``, this method raises exception and terminates the execution if the iterable is empty. If set to ``True``, it raises a warning and continues with the execution. Returns (bool): True: If the iterable is not empty. False: If the iterable is empty.
https://github.com/uzumaxy/pyvalid/blob/38f1d19b612fc67a2877d6b2e637c971e2ceea77/pyvalid/validators/__iterable.py#L51-L73
import warnings try: from collections.abc import Iterable except ImportError: from collections import Iterable from pyvalid import accepts from pyvalid.validators import AbstractValidator class IterableValidator(AbstractValidator): @classmethod def iterable_type_checker(cls, val, iterable_type): return type(val) == iterable_type @classmethod
BSD 3-Clause New or Revised License
kentaroy47/datasetculling
lib/datasets/ds_utils.py
validate_boxes
python
def validate_boxes(boxes, width=0, height=0): x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] assert (x1 >= 0).all() assert (y1 >= 0).all() assert (x2 >= x1).all() assert (y2 >= y1).all() assert (x2 < width).all() assert (y2 < height).all()
Check that a set of boxes are valid.
https://github.com/kentaroy47/datasetculling/blob/d4597ac2117537e6513a70a45c275325c4f0517b/lib/datasets/ds_utils.py#L31-L42
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np def unique_boxes(boxes, scale=1.0): v = np.array([1, 1e3, 1e6, 1e9]) hashes = np.round(boxes * scale).dot(v) _, index = np.unique(hashes, return_index=True) return np.sort(index) def xywh_to_xyxy(boxes): return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1)) def xyxy_to_xywh(boxes): return np.hstack((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1))
MIT License
holoclean/holoclean
dataset/dataset.py
Dataset.get_cell_id
python
def get_cell_id(self, tuple_id, attr_name): vid = tuple_id*self.attr_count + self.attr_to_idx[attr_name] return vid
get_cell_id returns cell ID: a unique ID for every cell. Cell ID: _tid_ * (# of attributes) + attr_idx
https://github.com/holoclean/holoclean/blob/d4f5929a8e4d92d4f41eb058c04c96cdcb0af767/dataset/dataset.py#L190-L197
from enum import Enum import logging import os import time import pandas as pd from .dbengine import DBengine from .table import Table, Source from utils import dictify_df, NULL_REPR class AuxTables(Enum): c_cells = 1 dk_cells = 2 cell_domain = 3 pos_values = 4 cell_distr = 5 inf_values_idx = 6 inf_values_dom = 7 class CellStatus(Enum): NOT_SET = 0 WEAK_LABEL = 1 SINGLE_VALUE = 2 class Dataset: def __init__(self, name, env): self.id = name self.raw_data = None self.repaired_data = None self.constraints = None self.aux_table = {} for tab in AuxTables: self.aux_table[tab] = None self.engine = DBengine( env['db_user'], env['db_pwd'], env['db_name'], env['db_host'], pool_size=env['threads'], timeout=env['timeout'] ) self.attr_to_idx = {} self.attr_count = 0 self.stats_ready = False self.total_tuples = 0 self.single_attr_stats = {} self.pair_attr_stats = {} def load_data(self, name, fpath, na_values=None, entity_col=None, src_col=None): tic = time.clock() try: exclude_attr_cols = ['_tid_'] if src_col is not None: exclude_attr_cols.append(src_col) self.raw_data = Table(name, Source.FILE, na_values=na_values, exclude_attr_cols=exclude_attr_cols, fpath=fpath) df = self.raw_data.df if entity_col is None: df.insert(0, '_tid_', range(0,len(df))) else: df.rename({entity_col: '_tid_'}, axis='columns', inplace=True) df.fillna(NULL_REPR, inplace=True) logging.info("Loaded %d rows with %d cells", self.raw_data.df.shape[0], self.raw_data.df.shape[0] * self.raw_data.df.shape[1]) self.raw_data.store_to_db(self.engine.engine) status = 'DONE Loading {fname}'.format(fname=os.path.basename(fpath)) for attr in self.raw_data.get_attributes(): self.raw_data.create_db_index(self.engine,[attr]) self.attr_to_idx = {attr: idx for idx, attr in enumerate(self.raw_data.get_attributes())} self.attr_count = len(self.attr_to_idx) except Exception: logging.error('loading data for table %s', name) raise toc = time.clock() load_time = toc - tic return status, load_time def set_constraints(self, constraints): self.constraints = constraints def generate_aux_table(self, aux_table, df, store=False, index_attrs=False): try: self.aux_table[aux_table] = Table(aux_table.name, Source.DF, df=df) if store: self.aux_table[aux_table].store_to_db(self.engine.engine) if index_attrs: self.aux_table[aux_table].create_df_index(index_attrs) if store and index_attrs: self.aux_table[aux_table].create_db_index(self.engine, index_attrs) except Exception: logging.error('generating aux_table %s', aux_table.name) raise def generate_aux_table_sql(self, aux_table, query, index_attrs=False): try: self.aux_table[aux_table] = Table(aux_table.name, Source.SQL, table_query=query, db_engine=self.engine) if index_attrs: self.aux_table[aux_table].create_df_index(index_attrs) self.aux_table[aux_table].create_db_index(self.engine, index_attrs) except Exception: logging.error('generating aux_table %s', aux_table.name) raise def get_raw_data(self): if self.raw_data is None: raise Exception('ERROR No dataset loaded') return self.raw_data.df def get_attributes(self): if self.raw_data is None: raise Exception('ERROR No dataset loaded') return self.raw_data.get_attributes()
Apache License 2.0
myriadrf/pylms7002soapy
pyLMS7002Soapy/LMS7002_mSPI.py
LMS7002_mSPI.P1
python
def P1(self): return self._readReg('P1', 'P1<7:0>')
Get the value of P1<7:0>
https://github.com/myriadrf/pylms7002soapy/blob/4f828eb9282c302dc6b187d91df5e77c8a6f2d61/pyLMS7002Soapy/LMS7002_mSPI.py#L240-L244
from pyLMS7002Soapy.LMS7002_base import LMS7002_base import time class LMS7002_mSPI(LMS7002_base): __slots__ = [] def __init__(self, chip): self.chip = chip self.channel = None self.prefix = "mSPI_" @staticmethod def getOpCode(opCode): if opCode == "SFR": return 0x7E elif opCode == "IRAM_READ": return 0x78 elif opCode == "RESET_PC": return 0x70 elif opCode == "RUN_INSTR": return 0x74 else: raise ValueError("Unknown MCU opcode :" + str(opCode)) @staticmethod def _readHex(hexFileName, isString=False): if not isString: inFile = open(hexFileName, 'r') else: inFile = hexFileName.split('\n') ret = [0] * 16384 maxAddr = 0 for line in inFile: line = line.strip() if line == '': continue if line[0] != ':': raise ValueError("Line does not start with :. Is this an Intel hex file?") lineData = [] for i in range(1, len(line), 2): lineData.append(int("0x" + line[i:i + 2], 16)) nBytes = lineData[0] offset = (lineData[1] << 8) + lineData[2] data = lineData[4:4 + nBytes] ckSum = 0 for i in range(0, len(lineData) - 1): ckSum += lineData[i] ckSum = ~ckSum + 1 ckSum = ckSum % 256 if ckSum != lineData[len(lineData) - 1]: raise ValueError("Checksum error in line : " + line) for i in range(0, len(data)): if offset + i > maxAddr: maxAddr = offset + i ret[offset + i] = data[i] if not isString: inFile.close() if maxAddr < 8192: ret = ret[:8192] return ret def loadHex(self, hexFileName, mode='SRAM', isString=False): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True mcuProgram = self._readHex(hexFileName, isString) self.chip._MCUProgram(mcuProgram, mode) self.chip.SPIImmediate = immMode def reset(self): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True self.MODE = 'RESET' self.DEBUG = 0 self.EXT_INT = 0 self.RXD = 0 self.P0 = 0 self.chip.SPIImmediate = immMode def resetPC(self): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True ret = self._command([self.getOpCode("RESET_PC")], 1) self.chip.SPIImmediate = immMode return ret def runInstr(self): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True data = self._command([self.getOpCode("RUN_INSTR"), 0, 0], 3) self.chip.SPIImmediate = immMode return data[1] * 256 + data[2] def call(self, data): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True self.P0 = 0 if data != 0: self.SPISW_CTRL = 1 else: self.SPISW_CTRL = 0 self.P0 = data self.chip.SPIImmediate = immMode def waitForMCU(self, timeout=1): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True t0 = time.time() while time.time() - t0 < timeout: val = self.P1 if val != 0xFF: break if time.time() - t0 > timeout: raise ValueError("Timeout expired in waitForMCU") self.chip.SPIImmediate = immMode return val def startDebugMode(self): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True self.DEBUG = 1 self.chip.SPIImmediate = immMode def exitDebugMode(self): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True self.DEBUG = 0 self.chip.SPIImmediate = immMode def _waitUntilWritten(self, timeout=1): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True t0 = time.time() while (self.WRITE_REQ == 1) and (time.time() - t0 < timeout): pass self.chip.SPIImmediate = immMode if time.time() - t0 > timeout: raise ValueError("Timeout expired in waitUntilWritten") def _readOneByte(self, timeout=1): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True t0 = time.time() while (self.READ_REQ == 0) and (time.time() - t0 < timeout): pass data = self.DFM self.chip.SPIImmediate = immMode if time.time() - t0 > timeout: raise ValueError("Timeout expired in readOneByte") return data def _command(self, writeData, bytesToReceive): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True for data in writeData: self.DTM = data self._waitUntilWritten() recData = [] for i in range(0, bytesToReceive): recData.append(self._readOneByte()) self.chip.SPIImmediate = immMode return recData def _wait(self, n): immMode = self.chip.SPIImmediate self.chip.SPIImmediate = True for i in range(0, n // 64): pass self.chip.SPIImmediate = immMode def changeMCUFrequency(self, value): self._command([self.getOpCode("SFR"), 0x8E, value], 3) def readIRAM(self): data = [0] * 256 opCode = self.getOpCode("IRAM_READ") for i in range(0, 256): res = self._command([opCode, i, 0], 3) data[i] = res[2] self._wait(64) return data @property def P0(self): return self._readReg('P0', 'P0<7:0>') @P0.setter def P0(self, value): if not (0 <= value <= 1023): raise ValueError("Value must be [0..255]") self._writeReg('P0', 'P0<7:0>', value) @property
Apache License 2.0
obi-wan3/ob13-cogs
github/github.py
GitHub._set_rename
python
async def _set_rename(self, ctx: commands.Context, user: discord.Member, old_name: str, new_name: str): async with self.config.member(user).feeds() as feeds: if new_name in feeds: return await ctx.send("The new name is already being used!") if old_name not in feeds: return await ctx.send(NOT_FOUND) feeds[new_name] = feeds.pop(old_name) return await ctx.send("Feed successfully renamed.")
Rename a user's GitHub RSS feed.
https://github.com/obi-wan3/ob13-cogs/blob/716527f8581e0345802ea2626d43324f87edf941/github/github.py#L378-L390
import re import typing import aiohttp import html2text import feedparser from urllib.parse import urlparse from datetime import datetime, timezone import discord from discord.ext import tasks from .converters import ExplicitNone from redbot.core import commands, Config from redbot.core.utils import AsyncIter from redbot.core.utils.chat_formatting import escape, pagify COLOR = 0x7289da TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" TOKEN_REGEX: re.Pattern = re.compile(r"token=(.*)") COMMIT_REGEX: re.Pattern = re.compile(r"https://github\.com/.*?/.*?/commit/(.*?)") USER_REPO_BRANCH_REGEX: re.Pattern = re.compile(r"/(.*?)/(.*?)/?(commits)?/(.*?(?=\.atom))?") LONG_COMMIT_REGEX: re.Pattern = re.compile(r"^<pre.*?>|</pre>$|(?<=\n)\n+") LONG_RELEASE_REGEX: re.Pattern = re.compile(r"^<pre.*?>|</pre>$|(?<=\n)\n+") NO_ROLE = "You do not have the required role!" NOT_FOUND = "I could not find that feed." class GitHub(commands.Cog): def __init__(self, bot): self.bot = bot self.config = Config.get_conf(self, 14000605, force_registration=True) default_global = { "interval": 3 } default_guild = { "channel": None, "role": None, "limit": 5, "color": None, "notify": True, "timestamp": True, "short": True } default_member = { "feeds": {} } self.config.register_global(**default_global) self.config.register_guild(**default_guild) self.config.register_member(**default_member) self._github_rss.start() def cog_unload(self): self._github_rss.cancel() async def initialize(self): global_conf = await self.config.all() if global_conf["interval"] != 3: self._github_rss.change_interval(minutes=global_conf["interval"]) if global_conf.get("migrated"): return for guild_id, guild_data in (await self.config.all_guilds()).items(): if guild_data.get("migrated") or not guild_data.get("feeds"): continue for member_id, member_data in guild_data["feeds"].items(): async with self.config.member_from_ids(guild_id=guild_id, member_id=int(member_id)).feeds() as member_feeds: for feed_name, feed_data in member_data.items(): user, repo, branch, token = await self._parse_url(feed_data["url"]) member_feeds[feed_name] = { "user": user, "repo": repo, "branch": branch, "token": token, "channel": feed_data.get("channel", None), "time": feed_data["time"] } async with self.config.guild_from_id(guild_id).all() as guild_config: guild_config["migrated"] = True async with self.config.all() as global_config: global_config["migrated"] = True @staticmethod def _escape(text: str): return escape(text, formatting=True) @staticmethod async def _repo_url(**user_and_repo): return f"https://github.com/{user_and_repo['user']}/{user_and_repo['repo']}/" @staticmethod async def _invalid_url(ctx: commands.Context): return f"Invalid GitHub URL. Try doing `{ctx.clean_prefix}github whatlinks` to see the accepted formats." @staticmethod async def _url_from_config(feed_config: dict): final_url = f"https://github.com/{feed_config['user']}/{feed_config['repo']}" if feed_config['branch']: token = f"?token={feed_config['token']}" if feed_config["token"] else "" if feed_config['branch'] == "releases": return final_url + f"/{feed_config['branch']}.atom{token}" return final_url + f"/commits/{feed_config['branch']}.atom{token}" else: return final_url + f"/commits.atom" @staticmethod async def _fetch(url: str, valid_statuses: list): async with aiohttp.ClientSession() as session: async with session.get(url) as resp: html = await resp.read() if resp.status not in valid_statuses: return False return feedparser.parse(html) @staticmethod async def new_entries(entries, last_time): entries_new = [] for e in entries: e_time = datetime.strptime(e.updated, TIME_FORMAT).replace(tzinfo=timezone.utc).timestamp() if e_time > last_time: entries_new.insert(0, e) else: break return entries_new, datetime.now(tz=timezone.utc) @staticmethod async def _parse_url(url: str): if url[0] == "<" and url[-1] == ">": url = url[1:-1] parsed_url = urlparse(url) if not (user_repo_branch := USER_REPO_BRANCH_REGEX.search(parsed_url.path if (parsed_url.path.endswith("/") or ".atom" in parsed_url.path) else parsed_url.path+"/")): return None, None, None, None user, repo, branch, token = user_repo_branch.group(1), user_repo_branch.group(2), user_repo_branch.group(4), TOKEN_REGEX.fullmatch(parsed_url.query).group(1) if parsed_url.query else None if branch == "commits": branch = None if ( parsed_url.scheme != "https" or parsed_url.netloc != "github.com" or not user or not repo or (token and not branch) or (not user_repo_branch.group(3) and branch and branch != "releases") ): return None, None, None, None return user, repo, branch, token async def _parse_url_input(self, url: str, branch: str): user, repo, parsed_branch, token = await self._parse_url(url) if not any([user, repo, parsed_branch, token]): return None return {"user": user, "repo": repo, "branch": parsed_branch if token else branch, "token": token} async def _get_feed_channel(self, bot: discord.Member, guild_channel: int, feed_channel): channel = None if feed_channel: channel = self.bot.get_channel(feed_channel) elif guild_channel: channel = self.bot.get_channel(guild_channel) if not(channel and channel.permissions_for(bot).send_messages and channel.permissions_for(bot).embed_links): channel = None return channel async def _commit_embeds(self, entries: list, feed_link: str, color: int, timestamp: bool, short: bool): if not entries: return None user, repo, branch, __ = await self._parse_url(feed_link+".atom") if branch == "releases": embed = discord.Embed( title=f"[{user}/{repo}] New release published: {entries[0].title}", color=color if color is not None else COLOR, url=entries[0].link ) if not short: embed.description = html2text.html2text(entries[0].content[0].value) else: num = min(len(entries), 10) desc = "" for e in entries[:num]: if short: desc += f"[`{COMMIT_REGEX.fullmatch(e.link).group(1)[:7]}`]({e.link}) {self._escape(e.title)} – {self._escape(e.author)}\n" else: desc += f"[`{COMMIT_REGEX.fullmatch(e.link).group(1)[:7]}`]({e.link}) – {self._escape(e.author)}\n{LONG_COMMIT_REGEX.sub('', e.content[0].value)}\n\n" embed = discord.Embed( title=f"[{repo}:{branch}] {num} new commit{'s' if num > 1 else ''}", color=color if color is not None else COLOR, description=desc, url=feed_link if num > 1 else entries[0].link ) if timestamp: embed.timestamp = datetime.strptime(entries[0].updated, TIME_FORMAT).replace(tzinfo=timezone.utc) embed.set_author( name=entries[0].author, url=f"https://github.com/{entries[0].author}", icon_url=entries[0].media_thumbnail[0]["url"] ) return embed @commands.is_owner() @commands.command(name="ghinterval", hidden=True) async def _interval(self, ctx: commands.Context, interval_in_minutes: int): await self.config.interval.set(interval_in_minutes) self._github_rss.change_interval(minutes=interval_in_minutes) return await ctx.send(f"I will now check for commit updates every {interval_in_minutes} minutes (change takes effect next loop).") @commands.guild_only() @commands.bot_has_permissions(embed_links=True) @commands.admin_or_permissions(administrator=True) @commands.group(name="githubset", aliases=["ghset"]) async def _github_set(self, ctx: commands.Context): @_github_set.command(name="short") async def _set_short(self, ctx: commands.Context, short: bool): await self.config.guild(ctx.guild).short.set(short) return await ctx.send(f"The GitHub RSS feed message content length has been set to `{'short' if short else 'full'}`.") @_github_set.command(name="color") async def _set_color(self, ctx: commands.Context, hex_color: typing.Union[discord.Color, ExplicitNone]): await self.config.guild(ctx.guild).color.set(hex_color.value if hex_color is not None else None) return await ctx.send(f"The GitHub RSS feed embed color has been set to {hex_color if hex_color else 'the default'}.") @_github_set.command(name="notify") async def _set_notify(self, ctx: commands.Context, true_or_false: bool): await self.config.guild(ctx.guild).notify.set(true_or_false) return await ctx.send(f"Repo addition/removal notifications will {'now' if true_or_false else 'no longer'} be sent.") @_github_set.command(name="channel") async def _set_channel(self, ctx: commands.Context, channel: discord.TextChannel): perms = channel.permissions_for(ctx.guild.me) if not (perms.send_messages and perms.embed_links): return await ctx.send(f"I do not have the necessary permissions (send messages & embed links) in {channel.mention}!") await self.config.guild(ctx.guild).channel.set(channel.id) return await ctx.send(f"The GitHub RSS feed channel has been set to {channel.mention}.") @_github_set.command(name="role") async def _set_role(self, ctx: commands.Context, role: discord.Role = None): if not role: await self.config.guild(ctx.guild).role.set(None) return await ctx.send(f"The GitHub RSS feed role requirement has been removed.") else: await self.config.guild(ctx.guild).role.set(role.id) return await ctx.send(f"The GitHub RSS feed role has been set to {role.mention}.") @_github_set.command(name="limit") async def _set_limit(self, ctx: commands.Context, num: int = 5): if num < 1: return await ctx.send("Please enter a positive integer!") await self.config.guild(ctx.guild).limit.set(num) return await ctx.send(f"The GitHub RSS feed limit per user has been set to {num}.") @_github_set.command(name="timestamp") async def _set_timestamp(self, ctx: commands.Context, true_or_false: bool): await self.config.guild(ctx.guild).timestamp.set(true_or_false) return await ctx.send(f"GitHub feed embeds will {'now' if true_or_false else 'no longer'} have timestamps.") @_github_set.command(name="force") async def _force(self, ctx: commands.Context, user: discord.Member, name: str): async with self.config.member(user).feeds() as feeds: if not (feed_config := feeds.get(name)): return await ctx.send(NOT_FOUND) url = await self._url_from_config(feed_config) if not (parsed := await self._fetch(url, [200])): return await ctx.send(await self._invalid_url(ctx)) if feed_config["channel"]: channel = ctx.guild.get_channel(feed_config["channel"]) else: channel = ctx.guild.get_channel(await self.config.guild(ctx.guild).channel()) guild_config = await self.config.guild(ctx.guild).all() if channel and channel.permissions_for(ctx.guild.me).embed_links: return await channel.send(embed=await self._commit_embeds( entries=[parsed.entries[0]], feed_link=parsed.feed.link, color=guild_config["color"], timestamp=guild_config["timestamp"], short=guild_config["short"] )) else: return await ctx.send("Either the set channel has been removed or I do not have permissions to send embeds in the channel.") @_github_set.command(name="forceall") async def _force_all(self, ctx: commands.context): async with ctx.typing(): await self._github_rss.coro(self, guild_to_check=ctx.guild.id) return await ctx.tick() @_github_set.command(name="rename")
MIT License
seenaburns/tungsten
tungsten/core.py
Pod.format
python
def format(self): formats = {} for subpod in self.root.findall('subpod'): for elem in list(subpod): if elem.tag == 'state': continue content = elem.text if elem.tag == 'img': content = {'url': elem.get('src'), 'alt': elem.get('alt'), 'title': elem.get('title'), 'width': int(elem.get('width', 0)), 'height': int(elem.get('height', 0))} if elem.tag not in formats: formats[elem.tag] = [content] else: formats[elem.tag].append(content) return formats
Dictionary of available formats, corresponding to a list of the values Example: pod.format['plaintext'] will return a list of every plaintext content in the pod's subpods
https://github.com/seenaburns/tungsten/blob/9e865c77a11c512464f226a6b025bc43b798a8be/tungsten/core.py#L108-L144
import requests from xml.etree.ElementTree import fromstring, ElementTree class Tungsten(object): def __init__(self, appid): self.appid = appid def query(self, input = '', params = {}): payload = {'input': input, 'appid': self.appid} for key, value in params.items(): if isinstance(value, (list, tuple)): payload[key] = ','.join(value) else: payload[key] = value try: r = requests.get("http://api.wolframalpha.com/v2/query", params=payload) if r.status_code != 200: raise Exception('Invalid response status code: %s' % (r.status_code)) if r.encoding != 'utf-8': raise Exception('Invalid encoding: %s' % (r.encoding)) except Exception, e: return Result(error = e) return Result(xml = r.text) class Result(object): def __init__(self, xml = '', error = None): self.xml_tree = None if xml: self.xml_tree = ElementTree(fromstring(xml.encode('utf-8'))) self.error_msg = error @property def success(self): if not self.error_msg: return self.xml_tree.getroot().get('success') == 'true' else: return False @property def error(self): if self.error_msg: return self.error_msg error = self.xml_tree.find('error') if error is not None: return error.find('msg').text return None @property def pods(self): if not self.xml_tree: return [] return [Pod(elem) for elem in self.xml_tree.findall('pod')] class Pod(object): def __init__(self, pod_root): self.root = pod_root self.xml_tree = ElementTree(pod_root) @property def title(self): return self.root.get('title') @property def id(self): return self.root.get('id') @property def scanner(self): return self.root.get('scanner') @property
BSD 3-Clause New or Revised License
lobocv/crashreporter
crashreporter/crashreporter.py
CrashReporter.start_watcher
python
def start_watcher(self): if self._watcher and self._watcher.is_alive: self._watcher_running = True else: self.logger.info('CrashReporter: Starting watcher.') self._watcher = Thread(target=self._watcher_thread, name='offline_reporter') self._watcher.setDaemon(True) self._watcher_running = True self._watcher.start()
Start the watcher that periodically checks for offline reports and attempts to upload them.
https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L155-L166
__author__ = 'calvin' import ConfigParser import datetime import glob import json import logging import os import re import shutil import smtplib import sys import time from email import encoders from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from threading import Thread import jinja2 from api import upload_report, upload_many_reports, HQ_DEFAULT_TIMEOUT, SMTP_DEFAULT_TIMEOUT from process import CrashReportingProcess from tools import analyze_traceback, repr as safe_repr class CrashReporter(object): _report_name = "crash_report_%d" html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html') active = False application_name = None application_version = None user_identifier = None offline_report_limit = 10 recursion_depth_limit = 10 send_at_most = 3 max_string_length = 1000 obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')") def __init__(self, report_dir=None, config='', logger=None, activate=True, watcher=True, check_interval=5*60): self.logger = logger if logger else logging.getLogger('CrashReporter') self.report_dir = report_dir self.check_interval = check_interval self.watcher_enabled = watcher self._watcher = None self._watcher_running = False self.etype = None self.evalue = None self.tb = None self._recursion_error = False self.analyzed_traceback = None self.payload = None self._excepthook = None self.inspection_level = 1 self._smtp = None self._hq = None if os.path.isfile(config): self.load_configuration(config) if activate: self.enable() def setup_smtp(self, host, port, user, passwd, recipients, **kwargs): self._smtp = kwargs self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients}) try: self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT)) except Exception as e: logging.error(e) self._smtp['timeout'] = None self._smtp['from'] = kwargs.get('from', user) def setup_hq(self, server, **kwargs): self._hq = kwargs try: self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT)) except Exception as e: logging.error(e) self._hq['timeout'] = None self._hq.update({'server': server}) def enable(self): if not CrashReporter.active: CrashReporter.active = True self._excepthook = sys.excepthook sys.excepthook = self.exception_handler self.logger.info('CrashReporter: Enabled') if self.report_dir: if os.path.exists(self.report_dir): if self.get_offline_reports(): self.submit_offline_reports() remaining_reports = len(self.get_offline_reports()) if remaining_reports and self.watcher_enabled: self.start_watcher() else: os.makedirs(self.report_dir) def disable(self): if CrashReporter.active: CrashReporter.active = False sys.excepthook = self._excepthook self.stop_watcher() self.logger.info('CrashReporter: Disabled')
MIT License
forseti-security/forseti-security
google/cloud/forseti/common/gcp_api/_base_repository.py
GCPRepository._request_supports_pagination
python
def _request_supports_pagination(self, verb): return getattr(self._component, verb + '_next', None)
Determines if the API action supports pagination. Args: verb (str): Request verb (ex. insert, update, delete). Returns: bool: True when API supports pagination, False otherwise.
https://github.com/forseti-security/forseti-security/blob/de5d0f4d047c293a2a72545a76c3783980865551/google/cloud/forseti/common/gcp_api/_base_repository.py#L429-L438
from builtins import str from builtins import object import json import logging import os import threading from urllib.parse import urljoin from future import standard_library import google_auth_httplib2 import pkg_resources import uritemplate from googleapiclient import discovery from ratelimiter import RateLimiter from retrying import retry import google.auth from google.auth.credentials import with_scopes_if_required from google.cloud.forseti.common.gcp_api import _supported_apis from google.cloud.forseti.common.gcp_api import errors as api_errors from google.cloud.forseti.common.util import http_helpers from google.cloud.forseti.common.util import logger from google.cloud.forseti.common.util import replay from google.cloud.forseti.common.util import retryable_exceptions import google.oauth2.credentials standard_library.install_aliases() CLOUD_SCOPES = frozenset(['https://www.googleapis.com/auth/cloud-platform']) LOCAL_THREAD = threading.local() LOGGER = logger.get_logger(__name__) NUM_HTTP_RETRIES = 5 SUPPORT_DISCOVERY_CACHE = ( pkg_resources.get_distribution( 'google-api-python-client').version >= '1.4.2') REQUEST_RECORDER = dict() REQUEST_REPLAYER = dict() DISCOVERY_DOCS_BASE_DIR = os.path.join(os.path.abspath( os.path.dirname(__file__)), 'discovery_documents') @retry(retry_on_exception=retryable_exceptions.is_retryable_exception, wait_exponential_multiplier=1000, wait_exponential_max=10000, stop_max_attempt_number=5) def _create_service_api(credentials, service_name, version, is_private_api, developer_key=None, cache_discovery=False, cache=None, use_versioned_discovery_doc=False): try: if LOGGER.getEffectiveLevel() > logging.DEBUG: logging.getLogger(discovery.__name__).setLevel(logging.WARNING) except Exception as e: LOGGER.debug('Logging cannot be set: %s', e) if is_private_api: if use_versioned_discovery_doc: service_json = '{}_{}.json'.format(service_name, version) service_path = os.path.join(DISCOVERY_DOCS_BASE_DIR, service_json) return _build_service_from_document( credentials, service_path) discovery_kwargs = { 'serviceName': service_name, 'version': version, 'developerKey': developer_key, 'credentials': credentials} if SUPPORT_DISCOVERY_CACHE: discovery_kwargs['cache_discovery'] = cache_discovery discovery_kwargs['cache'] = cache return discovery.build(**discovery_kwargs) def _build_service_from_document(credentials, document_path): with open(document_path, 'r') as f: discovery_data = json.load(f) return discovery.build_from_document( service=discovery_data, credentials=credentials ) class BaseRepositoryClient(object): def __init__(self, api_name, versions=None, credentials=None, quota_max_calls=None, quota_period=None, use_rate_limiter=False, read_only=False, use_versioned_discovery_doc=False, cache_discovery=False, cache=None, **kwargs): self._use_cached_http = False if not credentials: self._use_cached_http = True credentials, _ = google.auth.default() self._credentials = with_scopes_if_required(credentials, list(CLOUD_SCOPES)) self._repository_lock = threading.RLock() if use_rate_limiter: self._rate_limiter = RateLimiter(max_calls=quota_max_calls, period=quota_period) else: self._rate_limiter = None self._read_only = read_only self.name = api_name supported_api = _supported_apis.SUPPORTED_APIS.get(api_name) if not supported_api: LOGGER.warning('API "%s" is not formally supported in Forseti, ' 'proceed at your own risk.', api_name) if not versions and supported_api: versions = [supported_api.get('default_version')] self.versions = versions if supported_api: for version in versions: if version not in supported_api.get('supported_versions', []): LOGGER.warning('API "%s" version %s is not formally ' 'supported in Forseti, proceed at your ' 'own risk.', api_name, version) self.is_private_api = None if supported_api: self.is_private_api = ( _supported_apis.SUPPORTED_APIS.get(api_name) .get('is_private_api')) self.gcp_services = {} for version in versions: self.gcp_services[version] = _create_service_api( self._credentials, self.name, version, self.is_private_api, kwargs.get('developer_key'), cache_discovery, cache, use_versioned_discovery_doc) def __repr__(self): return 'API: name=%s, versions=%s' % (self.name, self.versions) def _init_repository(self, repository_class, version=None): if not version: version = ( _supported_apis.SUPPORTED_APIS.get(self.name, {}) .get('default_version')) if not version or version not in self.gcp_services: version = sorted(self.gcp_services.keys())[0] with self._repository_lock: return repository_class(gcp_service=self.gcp_services[version], credentials=self._credentials, rate_limiter=self._rate_limiter, use_cached_http=self._use_cached_http, read_only=self._read_only) class GCPRepository(object): def __init__(self, gcp_service, credentials, component, num_retries=NUM_HTTP_RETRIES, key_field='project', entity_field=None, list_key_field=None, get_key_field=None, max_results_field='maxResults', search_query_field='query', resource_path_template=None, rate_limiter=None, use_cached_http=True, read_only=False): self.gcp_service = gcp_service self.read_only = read_only self._credentials = credentials components = component.split('.') self._component = getattr( self.gcp_service, components.pop(0))() for nested_component in components: self._component = getattr( self._component, nested_component)() self._entity_field = entity_field self._num_retries = num_retries if list_key_field: self._list_key_field = list_key_field else: self._list_key_field = key_field if get_key_field: self._get_key_field = get_key_field else: self._get_key_field = key_field self._max_results_field = max_results_field self._search_query_field = search_query_field self._resource_path_template = resource_path_template self._rate_limiter = rate_limiter self._use_cached_http = use_cached_http self._local = LOCAL_THREAD @property def http(self): if self._use_cached_http and hasattr(self._local, 'http'): return self._local.http authorized_http = google_auth_httplib2.AuthorizedHttp( self._credentials, http=http_helpers.build_http()) if self._use_cached_http: self._local.http = authorized_http return authorized_http def _build_request(self, verb, verb_arguments): method = getattr(self._component, verb) method_args = {str(k): v for k, v in verb_arguments.items()} return method(**method_args) def _build_next_request(self, verb, prior_request, prior_response): method = getattr(self._component, verb + '_next') return method(prior_request, prior_response) def _build_resource_link(self, **kwargs): expanded_url = uritemplate.expand(self._resource_path_template, kwargs) return urljoin(self.gcp_service._baseUrl, expanded_url)
Apache License 2.0
python-distro/distro
distro.py
LinuxDistribution.distro_release_attr
python
def distro_release_attr(self, attribute: str) -> str: return self._distro_release_info.get(attribute, "")
Return a single named information item from the distro release file data source of the OS distribution. For details, see :func:`distro.distro_release_attr`.
https://github.com/python-distro/distro/blob/28f624c116024c21e0d6aef9a8c7985b8ea80e6a/distro.py#L998-L1005
import argparse import json import logging import os import re import shlex import subprocess import sys import warnings from typing import ( Any, Callable, Dict, Iterable, Optional, Sequence, TextIO, Tuple, Type, ) try: from typing import TypedDict except ImportError: TypedDict = dict __version__ = "1.6.0" class VersionDict(TypedDict): major: str minor: str build_number: str class InfoDict(TypedDict): id: str version: str version_parts: VersionDict like: str codename: str _UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc") _UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib") _OS_RELEASE_BASENAME = "os-release" NORMALIZED_OS_ID = { "ol": "oracle", } NORMALIZED_LSB_ID = { "enterpriseenterpriseas": "oracle", "enterpriseenterpriseserver": "oracle", "redhatenterpriseworkstation": "rhel", "redhatenterpriseserver": "rhel", "redhatenterprisecomputenode": "rhel", } NORMALIZED_DISTRO_ID = { "redhat": "rhel", } _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)" ) _DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$") _DISTRO_RELEASE_IGNORE_BASENAMES = ( "debian_version", "lsb-release", "oem-release", _OS_RELEASE_BASENAME, "system-release", "plesk-release", "iredmail-release", ) def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]: warnings.warn( "distro.linux_distribution() is deprecated. It should only be used as a " "compatibility shim with Python's platform.linux_distribution(). Please use " "distro.id(), distro.version() and distro.name() instead.", DeprecationWarning, stacklevel=2, ) return _distro.linux_distribution(full_distribution_name) def id() -> str: return _distro.id() def name(pretty: bool = False) -> str: return _distro.name(pretty) def version(pretty: bool = False, best: bool = False) -> str: return _distro.version(pretty, best) def version_parts(best: bool = False) -> Tuple[str, str, str]: return _distro.version_parts(best) def major_version(best: bool = False) -> str: return _distro.major_version(best) def minor_version(best: bool = False) -> str: return _distro.minor_version(best) def build_number(best: bool = False) -> str: return _distro.build_number(best) def like() -> str: return _distro.like() def codename() -> str: return _distro.codename() def info(pretty: bool = False, best: bool = False) -> InfoDict: return _distro.info(pretty, best) def os_release_info() -> Dict[str, str]: return _distro.os_release_info() def lsb_release_info() -> Dict[str, str]: return _distro.lsb_release_info() def distro_release_info() -> Dict[str, str]: return _distro.distro_release_info() def uname_info() -> Dict[str, str]: return _distro.uname_info() def os_release_attr(attribute: str) -> str: return _distro.os_release_attr(attribute) def lsb_release_attr(attribute: str) -> str: return _distro.lsb_release_attr(attribute) def distro_release_attr(attribute: str) -> str: return _distro.distro_release_attr(attribute) def uname_attr(attribute: str) -> str: return _distro.uname_attr(attribute) try: from functools import cached_property except ImportError: class cached_property: def __init__(self, f: Callable[[Any], Any]) -> None: self._fname = f.__name__ self._f = f def __get__(self, obj: Any, owner: Type[Any]) -> Any: assert obj is not None, f"call {self._fname} on an instance" ret = obj.__dict__[self._fname] = self._f(obj) return ret class LinuxDistribution: def __init__( self, include_lsb: bool = True, os_release_file: str = "", distro_release_file: str = "", include_uname: bool = True, root_dir: Optional[str] = None, ) -> None: self.root_dir = root_dir self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR self.usr_lib_dir = ( os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR ) if os_release_file: self.os_release_file = os_release_file else: etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME) usr_lib_os_release_file = os.path.join( self.usr_lib_dir, _OS_RELEASE_BASENAME ) if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile( usr_lib_os_release_file ): self.os_release_file = etc_dir_os_release_file else: self.os_release_file = usr_lib_os_release_file self.distro_release_file = distro_release_file or "" self.include_lsb = include_lsb self.include_uname = include_uname def __repr__(self) -> str: return ( "LinuxDistribution(" "os_release_file={self.os_release_file!r}, " "distro_release_file={self.distro_release_file!r}, " "include_lsb={self.include_lsb!r}, " "include_uname={self.include_uname!r}, " "_os_release_info={self._os_release_info!r}, " "_lsb_release_info={self._lsb_release_info!r}, " "_distro_release_info={self._distro_release_info!r}, " "_uname_info={self._uname_info!r})".format(self=self) ) def linux_distribution( self, full_distribution_name: bool = True ) -> Tuple[str, str, str]: return ( self.name() if full_distribution_name else self.id(), self.version(), self.codename(), ) def id(self) -> str: def normalize(distro_id: str, table: Dict[str, str]) -> str: distro_id = distro_id.lower().replace(" ", "_") return table.get(distro_id, distro_id) distro_id = self.os_release_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_OS_ID) distro_id = self.lsb_release_attr("distributor_id") if distro_id: return normalize(distro_id, NORMALIZED_LSB_ID) distro_id = self.distro_release_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) distro_id = self.uname_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) return "" def name(self, pretty: bool = False) -> str: name = ( self.os_release_attr("name") or self.lsb_release_attr("distributor_id") or self.distro_release_attr("name") or self.uname_attr("name") ) if pretty: name = self.os_release_attr("pretty_name") or self.lsb_release_attr( "description" ) if not name: name = self.distro_release_attr("name") or self.uname_attr("name") version = self.version(pretty=True) if version: name = f"{name} {version}" return name or "" def version(self, pretty: bool = False, best: bool = False) -> str: versions = [ self.os_release_attr("version_id"), self.lsb_release_attr("release"), self.distro_release_attr("version_id"), self._parse_distro_release_content(self.os_release_attr("pretty_name")).get( "version_id", "" ), self._parse_distro_release_content( self.lsb_release_attr("description") ).get("version_id", ""), self.uname_attr("release"), ] version = "" if best: for v in versions: if v.count(".") > version.count(".") or version == "": version = v else: for v in versions: if v != "": version = v break if pretty and version and self.codename(): version = f"{version} ({self.codename()})" return version def version_parts(self, best: bool = False) -> Tuple[str, str, str]: version_str = self.version(best=best) if version_str: version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?") matches = version_regex.match(version_str) if matches: major, minor, build_number = matches.groups() return major, minor or "", build_number or "" return "", "", "" def major_version(self, best: bool = False) -> str: return self.version_parts(best)[0] def minor_version(self, best: bool = False) -> str: return self.version_parts(best)[1] def build_number(self, best: bool = False) -> str: return self.version_parts(best)[2] def like(self) -> str: return self.os_release_attr("id_like") or "" def codename(self) -> str: try: return self._os_release_info["codename"] except KeyError: return ( self.lsb_release_attr("codename") or self.distro_release_attr("codename") or "" ) def info(self, pretty: bool = False, best: bool = False) -> InfoDict: return dict( id=self.id(), version=self.version(pretty, best), version_parts=dict( major=self.major_version(best), minor=self.minor_version(best), build_number=self.build_number(best), ), like=self.like(), codename=self.codename(), ) def os_release_info(self) -> Dict[str, str]: return self._os_release_info def lsb_release_info(self) -> Dict[str, str]: return self._lsb_release_info def distro_release_info(self) -> Dict[str, str]: return self._distro_release_info def uname_info(self) -> Dict[str, str]: return self._uname_info def os_release_attr(self, attribute: str) -> str: return self._os_release_info.get(attribute, "") def lsb_release_attr(self, attribute: str) -> str: return self._lsb_release_info.get(attribute, "")
Apache License 2.0
kuri65536/python-for-android
python-build/python-libs/gdata/build/lib/gdata/youtube/service.py
YouTubeService.AddVideoResponse
python
def AddVideoResponse(self, video_id_to_respond_to, video_response): post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id_to_respond_to, 'responses') return self.Post(video_response, uri=post_uri)
Add a video response. Needs authentication. Args: video_id_to_respond_to: A string representing the ID of the video to be responded to. video_response: YouTubeVideoEntry to be posted as a response. Returns: True if video response was posted successfully.
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-build/python-libs/gdata/build/lib/gdata/youtube/service.py#L810-L825
__author__ = ('api.stephaniel@gmail.com (Stephanie Liu), ' 'api.jhartmann@gmail.com (Jochen Hartmann)') try: from xml.etree import cElementTree as ElementTree except ImportError: try: import cElementTree as ElementTree except ImportError: try: from xml.etree import ElementTree except ImportError: from elementtree import ElementTree import os import atom import gdata import gdata.service import gdata.youtube YOUTUBE_SERVER = 'gdata.youtube.com' YOUTUBE_SERVICE = 'youtube' YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin' YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime', 'flv') YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month', 'all_time') YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating', 'relevance') YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude') YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6') YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured', 'top_rated', 'most_viewed','watch_on_mobile') YOUTUBE_UPLOAD_URI = 'http://uploads.gdata.youtube.com/feeds/api/users' YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken' YOUTUBE_VIDEO_URI = 'http://gdata.youtube.com/feeds/api/videos' YOUTUBE_USER_FEED_URI = 'http://gdata.youtube.com/feeds/api/users' YOUTUBE_PLAYLIST_FEED_URI = 'http://gdata.youtube.com/feeds/api/playlists' YOUTUBE_STANDARD_FEEDS = 'http://gdata.youtube.com/feeds/api/standardfeeds' YOUTUBE_STANDARD_TOP_RATED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'top_rated') YOUTUBE_STANDARD_MOST_VIEWED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'most_viewed') YOUTUBE_STANDARD_RECENTLY_FEATURED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'recently_featured') YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'watch_on_mobile') YOUTUBE_STANDARD_TOP_FAVORITES_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'top_favorites') YOUTUBE_STANDARD_MOST_RECENT_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'most_recent') YOUTUBE_STANDARD_MOST_DISCUSSED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'most_discussed') YOUTUBE_STANDARD_MOST_LINKED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'most_linked') YOUTUBE_STANDARD_MOST_RESPONDED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'most_responded') YOUTUBE_SCHEMA = 'http://gdata.youtube.com/schemas' YOUTUBE_RATING_LINK_REL = '%s#video.ratings' % YOUTUBE_SCHEMA YOUTUBE_COMPLAINT_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA, 'complaint-reasons.cat') YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA, 'subscriptiontypes.cat') YOUTUBE_COMPLAINT_CATEGORY_TERMS = ('PORN', 'VIOLENCE', 'HATE', 'DANGEROUS', 'RIGHTS', 'SPAM') YOUTUBE_CONTACT_STATUS = ('accepted', 'rejected') YOUTUBE_CONTACT_CATEGORY = ('Friends', 'Family') UNKOWN_ERROR = 1000 YOUTUBE_BAD_REQUEST = 400 YOUTUBE_CONFLICT = 409 YOUTUBE_INTERNAL_SERVER_ERROR = 500 YOUTUBE_INVALID_ARGUMENT = 601 YOUTUBE_INVALID_CONTENT_TYPE = 602 YOUTUBE_NOT_A_VIDEO = 603 YOUTUBE_INVALID_KIND = 604 class Error(Exception): pass class RequestError(Error): pass class YouTubeError(Error): pass class YouTubeService(gdata.service.GDataService): def __init__(self, email=None, password=None, source=None, server=YOUTUBE_SERVER, additional_headers=None, client_id=None, developer_key=None, **kwargs): if developer_key and not client_id: raise YouTubeError('You must also specify the clientId') gdata.service.GDataService.__init__( self, email=email, password=password, service=YOUTUBE_SERVICE, source=source, server=server, additional_headers=additional_headers, **kwargs) if client_id is not None and developer_key is not None: self.additional_headers['X-Gdata-Client'] = client_id self.additional_headers['X-GData-Key'] = 'key=%s' % developer_key self.auth_service_url = YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL def GetYouTubeVideoFeed(self, uri): return self.Get(uri, converter=gdata.youtube.YouTubeVideoFeedFromString) def GetYouTubeVideoEntry(self, uri=None, video_id=None): if uri is None and video_id is None: raise YouTubeError('You must provide at least a uri or a video_id ' 'to the GetYouTubeVideoEntry() method') elif video_id and not uri: uri = '%s/%s' % (YOUTUBE_VIDEO_URI, video_id) return self.Get(uri, converter=gdata.youtube.YouTubeVideoEntryFromString) def GetYouTubeContactFeed(self, uri=None, username='default'): if uri is None: uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'contacts') return self.Get(uri, converter=gdata.youtube.YouTubeContactFeedFromString) def GetYouTubeContactEntry(self, uri): return self.Get(uri, converter=gdata.youtube.YouTubeContactEntryFromString) def GetYouTubeVideoCommentFeed(self, uri=None, video_id=None): if uri is None and video_id is None: raise YouTubeError('You must provide at least a uri or a video_id ' 'to the GetYouTubeVideoCommentFeed() method') elif video_id and not uri: uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'comments') return self.Get( uri, converter=gdata.youtube.YouTubeVideoCommentFeedFromString) def GetYouTubeVideoCommentEntry(self, uri): return self.Get( uri, converter=gdata.youtube.YouTubeVideoCommentEntryFromString) def GetYouTubeUserFeed(self, uri=None, username=None): if uri is None and username is None: raise YouTubeError('You must provide at least a uri or a username ' 'to the GetYouTubeUserFeed() method') elif username and not uri: uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads') return self.Get(uri, converter=gdata.youtube.YouTubeUserFeedFromString) def GetYouTubeUserEntry(self, uri=None, username=None): if uri is None and username is None: raise YouTubeError('You must provide at least a uri or a username ' 'to the GetYouTubeUserEntry() method') elif username and not uri: uri = '%s/%s' % (YOUTUBE_USER_FEED_URI, username) return self.Get(uri, converter=gdata.youtube.YouTubeUserEntryFromString) def GetYouTubePlaylistFeed(self, uri=None, username='default'): if uri is None: uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'playlists') return self.Get(uri, converter=gdata.youtube.YouTubePlaylistFeedFromString) def GetYouTubePlaylistEntry(self, uri): return self.Get(uri, converter=gdata.youtube.YouTubePlaylistEntryFromString) def GetYouTubePlaylistVideoFeed(self, uri=None, playlist_id=None): if uri is None and playlist_id is None: raise YouTubeError('You must provide at least a uri or a playlist_id ' 'to the GetYouTubePlaylistVideoFeed() method') elif playlist_id and not uri: uri = '%s/%s' % (YOUTUBE_PLAYLIST_FEED_URI, playlist_id) return self.Get( uri, converter=gdata.youtube.YouTubePlaylistVideoFeedFromString) def GetYouTubeVideoResponseFeed(self, uri=None, video_id=None): if uri is None and video_id is None: raise YouTubeError('You must provide at least a uri or a video_id ' 'to the GetYouTubeVideoResponseFeed() method') elif video_id and not uri: uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses') return self.Get( uri, converter=gdata.youtube.YouTubeVideoResponseFeedFromString) def GetYouTubeVideoResponseEntry(self, uri): return self.Get( uri, converter=gdata.youtube.YouTubeVideoResponseEntryFromString) def GetYouTubeSubscriptionFeed(self, uri=None, username='default'): if uri is None: uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'subscriptions') return self.Get( uri, converter=gdata.youtube.YouTubeSubscriptionFeedFromString) def GetYouTubeSubscriptionEntry(self, uri): return self.Get( uri, converter=gdata.youtube.YouTubeSubscriptionEntryFromString) def GetYouTubeRelatedVideoFeed(self, uri=None, video_id=None): if uri is None and video_id is None: raise YouTubeError('You must provide at least a uri or a video_id ' 'to the GetYouTubeRelatedVideoFeed() method') elif video_id and not uri: uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'related') return self.Get( uri, converter=gdata.youtube.YouTubeVideoFeedFromString) def GetTopRatedVideoFeed(self): return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_RATED_URI) def GetMostViewedVideoFeed(self): return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_VIEWED_URI) def GetRecentlyFeaturedVideoFeed(self): return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_RECENTLY_FEATURED_URI) def GetWatchOnMobileVideoFeed(self): return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI) def GetTopFavoritesVideoFeed(self): return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_FAVORITES_URI) def GetMostRecentVideoFeed(self): return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RECENT_URI) def GetMostDiscussedVideoFeed(self): return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_DISCUSSED_URI) def GetMostLinkedVideoFeed(self): return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_LINKED_URI) def GetMostRespondedVideoFeed(self): return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RESPONDED_URI) def GetUserFavoritesFeed(self, username='default'): favorites_feed_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites') return self.GetYouTubeVideoFeed(favorites_feed_uri) def InsertVideoEntry(self, video_entry, filename_or_handle, youtube_username='default', content_type='video/quicktime'): try: assert(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry)) except AssertionError: raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body':'`video_entry` must be a gdata.youtube.VideoEntry instance', 'reason':'Found %s, not VideoEntry' % type(video_entry) }) majtype, mintype = content_type.split('/') try: assert(mintype in YOUTUBE_SUPPORTED_UPLOAD_TYPES) except (ValueError, AssertionError): raise YouTubeError({'status':YOUTUBE_INVALID_CONTENT_TYPE, 'body':'This is not a valid content type: %s' % content_type, 'reason':'Accepted content types: %s' % ['video/%s' % (t) for t in YOUTUBE_SUPPORTED_UPLOAD_TYPES]}) if (isinstance(filename_or_handle, (str, unicode)) and os.path.exists(filename_or_handle)): mediasource = gdata.MediaSource() mediasource.setFile(filename_or_handle, content_type) elif hasattr(filename_or_handle, 'read'): import StringIO if hasattr(filename_or_handle, 'seek'): filename_or_handle.seek(0) file_handle = StringIO.StringIO(filename_or_handle.read()) name = 'video' if hasattr(filename_or_handle, 'name'): name = filename_or_handle.name mediasource = gdata.MediaSource(file_handle, content_type, content_length=file_handle.len, file_name=name) else: raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body': '`filename_or_handle` must be a path name or a file-like object', 'reason': ('Found %s, not path name or object ' 'with a .read() method' % type(filename_or_handle))}) upload_uri = '%s/%s/%s' % (YOUTUBE_UPLOAD_URI, youtube_username, 'uploads') self.additional_headers['Slug'] = mediasource.file_name try: try: return self.Post(video_entry, uri=upload_uri, media_source=mediasource, converter=gdata.youtube.YouTubeVideoEntryFromString) except gdata.service.RequestError, e: raise YouTubeError(e.args[0]) finally: del(self.additional_headers['Slug']) def CheckUploadStatus(self, video_entry=None, video_id=None): if video_entry is None and video_id is None: raise YouTubeError('You must provide at least a uri or a video_id ' 'to the CheckUploadStatus() method') elif video_id and not video_entry: video_entry = self.GetYouTubeVideoEntry(video_id=video_id) control = video_entry.control if control is not None: draft = control.draft if draft is not None: if draft.text == 'yes': yt_state = control.extension_elements[0] if yt_state is not None: state_value = yt_state.attributes['name'] message = '' if yt_state.text is not None: message = yt_state.text return (state_value, message) def GetFormUploadToken(self, video_entry, uri=YOUTUBE_UPLOAD_TOKEN_URI): try: response = self.Post(video_entry, uri) except gdata.service.RequestError, e: raise YouTubeError(e.args[0]) tree = ElementTree.fromstring(response) for child in tree: if child.tag == 'url': post_url = child.text elif child.tag == 'token': youtube_token = child.text return (post_url, youtube_token) def UpdateVideoEntry(self, video_entry): for link in video_entry.link: if link.rel == 'edit': edit_uri = link.href return self.Put(video_entry, uri=edit_uri, converter=gdata.youtube.YouTubeVideoEntryFromString) def DeleteVideoEntry(self, video_entry): for link in video_entry.link: if link.rel == 'edit': edit_uri = link.href return self.Delete(edit_uri) def AddRating(self, rating_value, video_entry): if rating_value < 1 or rating_value > 5: raise YouTubeError('rating_value must be between 1 and 5 in AddRating()') entry = gdata.GDataEntry() rating = gdata.youtube.Rating(min='1', max='5') rating.extension_attributes['name'] = 'value' rating.extension_attributes['value'] = str(rating_value) entry.extension_elements.append(rating) for link in video_entry.link: if link.rel == YOUTUBE_RATING_LINK_REL: rating_uri = link.href return self.Post(entry, uri=rating_uri) def AddComment(self, comment_text, video_entry): content = atom.Content(text=comment_text) comment_entry = gdata.youtube.YouTubeVideoCommentEntry(content=content) comment_post_uri = video_entry.comments.feed_link[0].href return self.Post(comment_entry, uri=comment_post_uri)
Apache License 2.0
alliefitter/boto3_type_annotations
boto3_type_annotations_with_docs/boto3_type_annotations/config/client.py
Client.delete_configuration_aggregator
python
def delete_configuration_aggregator(self, ConfigurationAggregatorName: str): pass
Deletes the specified configuration aggregator and the aggregated data associated with the aggregator. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DeleteConfigurationAggregator>`_ **Request Syntax** :: response = client.delete_configuration_aggregator( ConfigurationAggregatorName='string' ) :type ConfigurationAggregatorName: string :param ConfigurationAggregatorName: **[REQUIRED]** The name of the configuration aggregator. :returns: None
https://github.com/alliefitter/boto3_type_annotations/blob/2a88aa562b1aee6e8a6cc30402980884b3707fbb/boto3_type_annotations_with_docs/boto3_type_annotations/config/client.py#L305-L320
from typing import Optional from botocore.client import BaseClient from typing import Dict from botocore.paginate import Paginator from datetime import datetime from botocore.waiter import Waiter from typing import Union from typing import List class Client(BaseClient): def batch_get_aggregate_resource_config(self, ConfigurationAggregatorName: str, ResourceIdentifiers: List) -> Dict: pass def batch_get_resource_config(self, resourceKeys: List) -> Dict: pass def can_paginate(self, operation_name: str = None): pass def delete_aggregation_authorization(self, AuthorizedAccountId: str, AuthorizedAwsRegion: str): pass def delete_config_rule(self, ConfigRuleName: str): pass
MIT License
s2e/pykvm
pykvm/kvm.py
VCPU.run
python
def run(self): logger.info('Running KVM') while True: try: fcntl.ioctl(self._vcpu_fd, KVM_RUN) except IOError as e: if e.errno != errno.EINTR: raise reason = KVMExitReason(self._run_obj.exit_reason) if reason == KVMExitReason.KVM_EXIT_INTERNAL_ERROR: raise RuntimeError(KVMInternalError(self._run_obj.exit_reasons.internal.suberror)) elif reason == KVMExitReason.KVM_EXIT_IO: logger.info('%s %s', reason, self._run_obj.exit_reasons.io) break elif reason == KVMExitReason.KVM_EXIT_MMIO: logger.info('%s %s', reason, self._run_obj.exit_reasons.mmio) break elif reason == KVMExitReason.KVM_EXIT_HLT: logger.info('CPU halted, exiting (%s)', reason) break elif reason == KVMExitReason.KVM_EXIT_SHUTDOWN: logger.info('Shutting down') break elif reason == KVMExitReason.KVM_EXIT_INTR: pass elif reason == KVMExitReason.KVM_EXIT_FLUSH_DISK: pass elif reason == KVMExitReason.KVM_EXIT_SAVE_DEV_STATE: pass elif reason == KVMExitReason.KVM_EXIT_RESTORE_DEV_STATE: pass elif reason == KVMExitReason.KVM_EXIT_CLONE_PROCESS: raise RuntimeError('Multi-core mode not supported') else: raise RuntimeError('Unhandled exit code %s' % reason)
Runs the virtual machine until an exit condition occurs. One way to terminate execution is for the guest to execute the HLT instruction. We don't support I/O, MMIO, and some other cases, so this function will terminate when it encounters them.
https://github.com/s2e/pykvm/blob/c7c71c8c3c9cc94423ca55459123ed8e01cb2b27/pykvm/kvm.py#L221-L277
import array import errno import fcntl import logging import mmap import os from argparse import ArgumentParser import ctypes from ctypes import c_int, c_size_t, c_void_p from ctypes.util import find_library as ctypes_find_library from hexdump import hexdump from pykvm.kvm_types import * logger = logging.getLogger(__name__) libc = ctypes.cdll.LoadLibrary(ctypes_find_library('c')) libc.mmap.argtypes = [c_void_p, c_size_t, c_int, c_int, c_size_t] libc.mmap.restype = c_void_p MAP_FAILED = 0xffffffffffffffff class RAM(object): def __init__(self, size, vm): if size % 0x1000: raise RuntimeError('Ram size must be a multiple of 4KB') self._size = size self._vm = vm logger.debug('Allocating %d bytes for RAM', size) self._pointer = libc.mmap(-1, self._size, mmap.PROT_READ | mmap.PROT_WRITE, mmap.MAP_ANON | mmap.MAP_PRIVATE, -1, 0) if self._pointer == MAP_FAILED or not self._pointer: raise RuntimeError('Could not allocate buffer of size %#x' % size) logger.debug('RAM is at %#lx', self._pointer) self.obj = (ctypes.c_ubyte * size).from_address(self._pointer) def get_kvm_region(self, slot): ram = KVMUserSpaceMemoryRegion() ram.slot = slot ram.flags = 0 ram.guest_phys_addr = 0 ram.memory_size = self._size ram.userspace_addr = self._pointer return ram def write(self, addr, data): if addr + len(data) > self._size: raise RuntimeError('Buffer overflow') if self._vm.has_mem_rw: b = (ctypes.c_ubyte * len(data)).from_buffer_copy(data) m = KVMMemRW() m.source = ctypes.addressof(b) m.dest = self._pointer + addr m.is_write = 1 m.length = len(data) logger.debug('Writing to %#lx from %#lx, size=%#lx', m.dest, m.source, m.length) fcntl.ioctl(self._vm.fd, KVM_MEM_RW, m) else: for i, c in enumerate(data): self.obj[addr + i] = ord(c) def read(self, addr, size): if addr + size > self._size: raise RuntimeError('Buffer overflow') if self._vm.has_mem_rw: ret = (ctypes.c_ubyte * size)() m = KVMMemRW() m.source = self._pointer + addr m.dest = ctypes.addressof(ret) m.is_write = 0 m.length = size fcntl.ioctl(self._vm.fd, KVM_MEM_RW, m) else: ret = self.obj[addr:addr+size] return array.array('B', ret).tostring() def _get_32bit_code_segment(): s = KVMSegment() s.base = 0 s.limit = 0xffffffff s.selector = 0 s.type = 0xc s.present = 1 s.dpl = 0 s.db = 1 s.s = 1 s.l = 0 s.g = 1 return s def _get_32bit_data_segment(): s = KVMSegment() s.base = 0 s.limit = 0xffffffff s.selector = 0 s.type = 0x2 s.present = 1 s.dpl = 0 s.db = 1 s.s = 1 s.l = 0 s.g = 1 return s class VCPU(object): def __init__(self, kvm_fd, vm_fd): self._vm_fd = vm_fd self._vcpu_fd = fcntl.ioctl(vm_fd, KVM_CREATE_VCPU) logger.debug('Created VCPU fd=%d', self._vcpu_fd) self._vcpu_size = fcntl.ioctl(kvm_fd, KVM_GET_VCPU_MMAP_SIZE) logger.debug('VCPU requires %d bytes for kvm_run structure', self._vcpu_size) self._pointer = mmap.mmap(self._vcpu_fd, self._vcpu_size) self._run_obj = KVMRun.from_buffer(self._pointer) def init_state(self, rip=0, rsp=0, bits=32): sregs = KVMSRegs() fcntl.ioctl(self._vcpu_fd, KVM_GET_SREGS, sregs) if bits == 16: sregs.cs.base = 0 sregs.cs.selector = 0 elif bits == 32: sregs.cs = _get_32bit_code_segment() sregs.ds = _get_32bit_data_segment() sregs.es = sregs.ds sregs.ss = sregs.ds sregs.fs = sregs.ds sregs.gs = sregs.ds sregs.cr0 = 0x1 else: raise ValueError('Unsupported number of bits %d' % bits) fcntl.ioctl(self._vcpu_fd, KVM_SET_SREGS, sregs) regs = KVMRegs() fcntl.ioctl(self._vcpu_fd, KVM_GET_REGS, regs) regs.rip = rip regs.rsp = rsp regs.rflags = 2 fcntl.ioctl(self._vcpu_fd, KVM_SET_REGS, regs) def dump_regs(self): regs = KVMRegs() fcntl.ioctl(self._vcpu_fd, KVM_GET_REGS, regs) logger.info('rax=%#lx rbx=%#lx rcx=%#lx rdx=%#lx', regs.rax, regs.rbx, regs.rcx, regs.rdx) logger.info('rsi=%#lx rdi=%#lx rbp=%#lx rsp=%#lx', regs.rsi, regs.rdi, regs.rbp, regs.rsp) logger.info('rip=%#lx', regs.rip)
MIT License
googleapis/python-iot
samples/api-client/accesstoken_example/accesstoken.py
parse_command_line_args
python
def parse_command_line_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( "--algorithm", default="RS256", choices=("RS256", "ES256"), help="Encryption algorithm used to generate the device JWT.", ) parser.add_argument("--private_key_file", help="Path to private key file.") parser.add_argument( "--cloud_region", default="us-central1", help="GCP cloud region." ) parser.add_argument("--device_id", default=None, help="Device ID.") parser.add_argument( "--scope", default=None, help="Scope for OAuth 2.0 access token. Space delimited strings. See the full list of scopes at: https://developers.google.com/identity/protocols/oauth2/scopes", ) parser.add_argument( "--project_id", default=os.environ.get("GOOGLE_CLOUD_PROJECT"), help="GCP cloud project name.", ) parser.add_argument( "--registry_id", default=None, help="Registry ID.", ) parser.add_argument( "--topic_id", default=None, help="Cloud Pub/Sub topic ID.", ) parser.add_argument( "--bucket_name", default=None, help="Cloud Storage bucket name.", ) parser.add_argument( "--data_path", default=None, help="Path to file to be uploaded.", ) parser.add_argument( "--service_account_email", default=None, help="Service account email to exchange device access token to service account token.", ) parser.add_argument( "--device_access_token", default=None, help="Device access token to exchange for service account access token.", ) parser.add_argument( "--command_to_be_sent_to_device", default=None, help="Command to be sent to the IoT device.", ) command = parser.add_subparsers(dest="command") command.add_parser("generate-access-token", help=generate_access_token.__doc__) command.add_parser("publish-pubsub-message", help=publish_pubsub_message.__doc__) command.add_parser( "send-command-to-iot-device", help=send_iot_command_to_device.__doc__ ) command.add_parser( "download-cloud-storage-file", help=download_cloud_storage_file.__doc__ ) command.add_parser( "exchange-device-token-for-service-account-token", help=exchange_device_access_token_for_service_account_access_token.__doc__, ) return parser.parse_args()
Parse command line arguments.
https://github.com/googleapis/python-iot/blob/87df16600d419be91ae91dc9600e9d31c3b267f0/samples/api-client/accesstoken_example/accesstoken.py#L360-L431
import argparse import base64 from datetime import datetime, timedelta import io import json import os import time import jwt import requests as req def create_jwt(project_id, algorithm, private_key_file): jwt_payload = '{{"iat":{},"exp":{},"aud":"{}"}}'.format( time.time(), time.mktime((datetime.now() + timedelta(hours=6)).timetuple()), project_id, ) private_key_bytes = "" with io.open(private_key_file) as f: private_key_bytes = f.read() encoded_jwt = jwt.encode( json.loads(jwt_payload), private_key_bytes, algorithm=algorithm ) return encoded_jwt.decode() if isinstance(encoded_jwt, bytes) else encoded_jwt def generate_access_token( cloud_region, project_id, registry_id, device_id, scope, algorithm, private_key_file ): jwt = create_jwt(project_id, algorithm, private_key_file) resource_path = "projects/{}/locations/{}/registries/{}/devices/{}".format( project_id, cloud_region, registry_id, device_id ) request_url = "https://cloudiottoken.googleapis.com/v1beta1/{}:generateAccessToken".format( resource_path ) headers = {"authorization": "Bearer {}".format(jwt)} request_payload = {"scope": scope, "device": resource_path} resp = req.post(url=request_url, data=request_payload, headers=headers) assert resp.ok, resp.raise_for_status() access_token = resp.json()["access_token"] print("Device access token: {}".format(access_token)) return access_token def publish_pubsub_message( cloud_region, project_id, registry_id, device_id, algorithm, rsa_private_key_path, topic_id, ): scope = "https://www.googleapis.com/auth/pubsub" access_token = generate_access_token( cloud_region, project_id, registry_id, device_id, scope, algorithm, rsa_private_key_path, ) request_path = "https://pubsub.googleapis.com/v1/projects/{}/topics/{}".format( project_id, topic_id ) headers = { "Authorization": "Bearer {}".format(access_token), "content-type": "application/json", "cache-control": "no-cache", } resp = req.put(url=request_path, data={}, headers=headers) assert resp.ok, resp.raise_for_status() print("Successfully created Pub/Sub topic: {}.".format(topic_id)) publish_payload = { "messages": [ {"data": str(base64.b64encode(bytes("MESSAGE_DATA", "utf-8")), "utf-8")} ] } publish_request_path = "https://pubsub.googleapis.com/v1/projects/{}/topics/{}:publish".format( project_id, topic_id ) publish_resp = req.post( url=publish_request_path, data=json.dumps(publish_payload), headers=headers ) assert publish_resp.ok, publish_resp.raise_for_status() print( "Pub/Sub message has been successfully published to {}: {}".format( topic_id, publish_resp.json() ) ) pubsub_delete_request_path = "https://pubsub.googleapis.com/v1/projects/{}/topics/{}".format( project_id, topic_id ) delete_resp = req.delete(url=pubsub_delete_request_path, headers=headers) assert delete_resp.ok, delete_resp.raise_for_status() print("Successfully deleted Pub/Sub topic: {}".format(topic_id)) def download_cloud_storage_file( cloud_region, project_id, registry_id, device_id, algorithm, rsa_private_key_path, bucket_name, data_path, ): scope = "https://www.googleapis.com/auth/devstorage.full_control" access_token = generate_access_token( cloud_region, project_id, registry_id, device_id, scope, algorithm, rsa_private_key_path, ) create_payload = { "name": bucket_name, "location": cloud_region, "storageClass": "STANDARD", "iamConfiguration": {"uniformBucketLevelAccess": {"enabled": True}}, } create_request_path = "https://storage.googleapis.com/storage/v1/b?project={}".format( project_id ) headers = { "authorization": "Bearer {}".format(access_token), "content-type": "application/json", "cache-control": "no-cache", } create_resp = req.post( url=create_request_path, data=bytes(json.dumps(create_payload), "utf-8"), headers=headers, ) assert create_resp.ok, create_resp.raise_for_status() print("Successfully created Storage bucket: {}".format(bucket_name)) data_name = "testFile.ext" binary_data = open(data_path, "rb").read() upload_request_path = "https://storage.googleapis.com/upload/storage/v1/b/{}/o?uploadType=media&name={}".format( bucket_name, data_name ) upload_resp = req.post(url=upload_request_path, data=binary_data, headers=headers) assert upload_resp.ok, upload_resp.raise_for_status() print( "Successfully uploaded {} as {} to bucket {}.".format( data_path, data_name, bucket_name ) ) download_request_path = "https://storage.googleapis.com/storage/v1/b/{}/o/{}?alt=media".format( bucket_name, data_name ) download_resp = req.get(url=download_request_path, headers=headers) assert download_resp.ok, download_resp.raise_for_status() print("Successfully downloaded {} from bucket {}.".format(data_name, bucket_name)) delete_request_path = "https://storage.googleapis.com/storage/v1/b/{}/o/{}".format( bucket_name, data_name ) delete_data_resp = req.delete(url=delete_request_path, headers=headers) assert delete_data_resp.ok, delete_data_resp.raise_for_status() print("Successfully deleted {} from bucket {}.".format(data_name, bucket_name)) gcs_delete_request_path = "https://storage.googleapis.com/storage/v1/b/{}".format( bucket_name ) delete_resp = req.delete(url=gcs_delete_request_path, headers=headers) assert delete_resp.ok, delete_resp.raise_for_status() print("Successfully deleted bucket: {}".format(bucket_name)) def exchange_device_access_token_for_service_account_access_token( device_access_token, service_account_email ): scope = "https://www.googleapis.com/auth/cloud-platform" headers = { "Authorization": "Bearer {}".format(device_access_token), "content-type": "application/json", "cache-control": "no-cache", } exchange_payload = {"scope": [scope]} exchange_url = "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/{}:generateAccessToken".format( service_account_email ) exchange_resp = req.post( url=exchange_url, data=json.dumps(exchange_payload), headers=headers ) assert exchange_resp.ok, exchange_resp.raise_for_status() service_account_token = exchange_resp.json()["accessToken"] print("Service account access token: {}".format(service_account_token)) return service_account_token def send_iot_command_to_device( cloud_region, project_id, registry_id, device_id, algorithm, rsa_private_key_path, service_account_email, command_to_be_sent_to_device, ): scope = "https://www.googleapis.com/auth/cloud-platform" access_token = generate_access_token( cloud_region, project_id, registry_id, device_id, scope, algorithm, rsa_private_key_path, ) service_account_token = exchange_device_access_token_for_service_account_access_token( access_token, service_account_email ) command_payload = json.dumps( { "binaryData": base64.urlsafe_b64encode( command_to_be_sent_to_device.encode("utf-8") ).decode("utf-8") } ) command_url = "https://cloudiot.googleapis.com/v1/projects/{}/locations/{}/registries/{}/devices/{}:sendCommandToDevice".format( project_id, cloud_region, registry_id, device_id ) command_resp = req.post( url=command_url, data=command_payload, headers={ "authorization": "Bearer {}".format(service_account_token), "content-type": "application/json", "cache-control": "no-cache", }, ) assert command_resp.ok, command_resp.raise_for_status() print( "Successfully sent command {} to device.".format(command_to_be_sent_to_device) )
Apache License 2.0
maxjiang93/space_time_pde
src/unet3d.py
UNet3d.__init__
python
def __init__(self, in_features=4, out_features=32, igres=(4, 32, 32), ogres=None, nf=16, mf=512): super(UNet3d, self).__init__() self.igres = igres self.nf = nf self.mf = mf self.in_features = in_features self.out_features = out_features if ogres is None: self.ogres = self.igres else: self.ogres = ogres mul = np.array(self.ogres) / np.array(self.igres) fac = np.log2(mul) if not np.allclose(fac%1, np.zeros_like(fac)): raise ValueError("ogres must be 2^k times greater than igres where k >= 0. " "Instead igres: {}, ogres: {}".format(igres, ogres)) if not np.all(fac>=0): raise ValueError("ogres must be greater or equal to igres. " "Instead igres: {}, ogres: {}".format(igres, ogres)) self.exp_fac = fac.astype(np.int32) if not np.allclose(self.exp_fac, np.zeros_like(self.exp_fac)): self.expand = True else: self.expand = False if isinstance(self.igres, int): self.igres = tuple([self.igres] * 3) if isinstance(self.ogres, int): self.ogres = tuple([self.ogres] * 3) self._check_grid_res() self.li = math.log(np.max(np.array(self.igres)), 2) self.lo = math.log(np.max(np.array(self.ogres)), 2) assert self.li % 1 == 0 assert self.lo % 1 == 0 self.li = int(self.li) self.lo = int(self.lo) self._create_layers()
initialize 3D UNet. Args: in_features: int, number of input features. out_features: int, number of output features. igres: tuple, input grid resolution in each dimension. each dimension must be integer powers of 2. ogres: tuple, output grid resolution in each dimension. each dimension must be integer powers of 2. #NOTE for now must be same as igres or must be 2^k multipliers of igres. nf: int, number of base feature layers. mf: int, a cap for max number of feature layers throughout the network.
https://github.com/maxjiang93/space_time_pde/blob/5e355b0434baf1757d071ce993b84073c8426223/src/unet3d.py#L63-L120
import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F class ResBlock3D(nn.Module): def __init__(self, in_channels, neck_channels, out_channels, final_relu=True): super(ResBlock3D, self).__init__() self.in_channels = in_channels self.neck_channels = neck_channels self.out_channels = out_channels self.conv1 = nn.Conv3d(in_channels, neck_channels, kernel_size=1, stride=1) self.conv2 = nn.Conv3d(neck_channels, neck_channels, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv3d(neck_channels, out_channels, kernel_size=1, stride=1) self.bn1 = nn.BatchNorm3d(num_features=neck_channels) self.bn2 = nn.BatchNorm3d(num_features=neck_channels) self.bn3 = nn.BatchNorm3d(num_features=out_channels) self.shortcut = nn.Conv3d(in_channels, out_channels, kernel_size=1, stride=1) self.final_relu = final_relu def forward(self, x): identity = x x = self.conv1(x) x = self.bn1(x) x = F.relu(x) x = self.conv2(x) x = self.bn2(x) x = F.relu(x) x = self.conv3(x) x = self.bn3(x) x += self.shortcut(identity) if self.final_relu: x = F.relu(x) return x class UNet3d(nn.Module):
MIT License
aws-samples/aws-device-farm-appium-python-tests-for-android-sample-app
tests/pages/alerts_page.py
AlertsPage.alert_text_is_displayed
python
def alert_text_is_displayed(self): alert_text = self.driver.find_element_by_name(self.ALERT_MESSAGE_NAME) return alert_text.is_displayed()
Returns visibility of alert's message as a boolean.
https://github.com/aws-samples/aws-device-farm-appium-python-tests-for-android-sample-app/blob/86182ec2fae531f7376fc4b7261529700d67eb0f/tests/pages/alerts_page.py#L28-L31
from tests.pages.base_pages.base_page import BasePage class AlertsPage(BasePage): ALERT_BUTTON_NAME = "ALERT" ALERT_MESSAGE_NAME = "This is the alert message" OK_BUTTON_NAME = "OK" def click_alert_button(self): alert_button = self.driver.find_element_by_name(self.ALERT_BUTTON_NAME) alert_button.click()
Apache License 2.0
googlecloudplatform/data-pipeline
app/src/pipelines/stages/hadoopshutdown.py
HadoopShutdown.run
python
def run(self, config): ShutdownHadoopCluster(config)
Deletes Google Compute Engine instances of the Hadoop cluster.
https://github.com/googlecloudplatform/data-pipeline/blob/0bdd1664bc9ff5e36928c4609ef6127ef1e1fb3f/app/src/pipelines/stages/hadoopshutdown.py#L90-L92
import logging from src import auth from src.hadoop import datastore from src.hadoop import hadoop_cluster from src.pipelines import pipeline SCOPE = ['https://www.googleapis.com/auth/compute'] class HadoopShutdownError(Exception): def ShutdownHadoopCluster(config): logging.debug('Shutdown Hadoop cluster: %s', str(config)) try: project = config['project'] prefix = config['prefix'] except KeyError as e: raise HadoopShutdownError( 'Hadoop Shutdown: Missing required parameter: %s' % str(e)) clusters = datastore.ClusterInfo.query( datastore.ClusterInfo.project == project, datastore.ClusterInfo.prefix == prefix).fetch() if not clusters: raise HadoopShutdownError( 'Hadoop Shutdown: No cluster found in project "%s" with prefix "%s"', project, prefix) for cluster_info in clusters: logging.info('Shutdown Hadoop cluster: %s', str(cluster_info.key.id())) cluster = hadoop_cluster.HadoopCluster( auth.Service.HttpFromServiceAccount(SCOPE), cluster_id=cluster_info.key.id()) cluster.TeardownCluster() class HadoopShutdown(pipeline.Pipeline): @staticmethod def GetHelp(): return """Shutdown the hadoop cluster. The stage config should look like this: ```python { "project": "hadoop project name", "prefix": "" } ``` """
Apache License 2.0
idea-fasoc/fasoc
generators/memory-gen/bin/MemGen.py
MemGen.toolenv
python
def toolenv(self): self.hspicepath=os.popen("which hspice").read().replace('\n','') self.spectrepath=os.popen("which spectre").read().replace('\n','') self.liberatepath=os.popen("which liberate").read().replace('\n','') self.virtuosopath=os.popen("which virtuoso").read().replace('\n','') if os.path.exists(self.hspicepath): vli=os.popen("hspice -v").read().split() self.hversion=vli[vli.index("Version")+1] log.info(" Using %s hspice version for the library characterization"%self.hversion) elif os.path.exists(self.spectrepath): log.info(" Using spectre for the library characterization") else: log.warning("No simulators are set. Lib generation for SRAM might fail...") if os.path.exists(self.virtuosopath): log.info(" Using Ocean for the design space exploration") else: log.error("OCEAN is not set. Can not generate the SRAM. Exiting now...") sys.exit(1) if os.path.exists(self.spectrepath): log.info(" Using spectre for the design space exploration") else: log.error("Spectre simulator is not set. Can not generate the SRAM. Exiting now...") sys.exit(1) if os.path.exists(self.liberatepath): log.info(" Using Liberate for Lib characterization") else: log.warning("Liberate is not set. Can not generate Lib files for SRAM ")
Checks the environment of the required tools for running the sims.
https://github.com/idea-fasoc/fasoc/blob/bb978c68e92b2f4cf8eb52d98a347b7643be83b3/generators/memory-gen/bin/MemGen.py#L590-L633
import os import sys import re import datetime import time import smtplib import logging import getpass from optparse import OptionParser import math import shutil import tempfile import subprocess as sp import json global log log=logging.getLogger(__name__) class PDKError(Exception): pass class MemGen(): def __init__(self): if p_options['Toolenvt']: pass else: try: os.mkdir('runfiles') except OSError: if os.path.exists('runfiles'): log.debug("Directory already exists") else: log.error("Unable to create the dir, check the permissions of the run dir %s"%rundir) sys.exit(1) self.runfilesdir = os.path.join(rundir,'runfiles') self.digitalflowdir = os.path.join(rundir, 'apr') self.digitalflowsrcdir = os.path.join(self.digitalflowdir, 'src') if p_options['Mode'] == 'verilog': self.ConfigParser() self.SRAMConfig() self.VerilogGen() self.Filemanage() elif p_options['Mode'] == 'macro': binDir = os.path.dirname(os.path.abspath(__file__)) genDir = os.path.dirname(binDir) genName = os.path.split(genDir)[1] genParentdir = os.path.split(genDir)[0] genParentdirName = os.path.split(genParentdir)[1] Fasocdir = os.path.split(genParentdir)[0] self.Fasocdir = Fasocdir self.digitalflowdir = os.path.join(Fasocdir, 'private', genParentdirName, genName, 'apr') self.digitalflowsrcdir = os.path.join(self.digitalflowdir, 'src') self.ConfigParser() self.SRAMConfig() self.VerilogGen() log.info("Successfully generated the SRAM verilog files") if os.path.exists(os.path.join(Fasocdir, 'private')): self.DataPrep() self.Synthesis() self.PNR() self.Filemanage() log.info("Successfully generated the syntehsizable SRAM. Refer the outputs dir for all the generated outputs") else: log.error("Unable to find the directory %s with MemGen synthesis and apr scripts. Unable to run the synthesis and PNR of the memory"%os.path.join(Fasocdir, 'private')) sys.exit(1) def ConfigParser(self): log.info(" Loading the memory specs config file %s"%p_options['Config']) try: with open(p_options['Config']) as file: self.config = json.load(file) except ValueError as e: log.error('Error occurred opening or loading json file.') log.error('Exception: %s' % str(e)) sys.exit(1) self.sram_name = self.config["module_name"] self.nowords = self.config["specifications"]["nowords"] self.word_size = self.config["specifications"]["word_size"] def SRAMConfig(self): log.info("Determining the SRAM Configuration") nbfra, nbwhole = math.modf((self.nowords*self.word_size)/16384); if nbfra: log.error("The memory for the specified words %d can not be generated"%self.nowords) log.error("Due to current limitations of the tool, the memory not in multiples of 2 Kilo Bytes can not be generated") log.error("Please specify the size in multiples of 2KB and re-run") sys.exit(1) elif not nbfra: bsbfra, bsbwhole = math.modf(math.log2(nbwhole)) if bsbfra: log.error("The number of banks should be integer powers of 2 Ex: 2^n, where n=natural number (1,2,3,4). Please specify the correct size and re-run the MemGen") sys.exit(1) else: self.no_banks = int(nbwhole) self.banksel_bits = int(bsbwhole) self.bank_address_size = 9 self.address_size = (self.banksel_bits+self.bank_address_size) self.decoder_bits = self.banksel_bits self.muxsel_bits = self.banksel_bits log.info("Below is the SRAM configuration:\n SRAM Capacity : %d KB \n No of Banks : %d \n Address bits : %d \n Bank Address Size : %d \n"%(self.no_banks*2, self.no_banks, self.address_size, self.bank_address_size)) def VerilogGen(self): log.info("Generating the verilog modules") self.Verilogrundir = os.path.join(self.runfilesdir, 'VerilogGen') try: os.mkdir(self.Verilogrundir) os.chdir(self.Verilogrundir) except OSError: if os.path.exists(self.Verilogrundir): log.info("Directory %s already exists"%self.Verilogrundir) log.info(" Cleaning up the existing SRAM verilog files") os.chdir(self.Verilogrundir) evs=li=os.popen("ls *.v*").read().split() for l in evs: log.info("Removing %s"%l) os.remove(l) else: log.error("Unable to create the dir %s, Can not run the VerilogGen. Exitting MemGen.."%self.Verilogrundir) sys.exit(1) self.decoder_name = "decoder_%dto%d"%(self.decoder_bits, self.no_banks) self.mux_name = "mux_%dto1"%self.no_banks self.VerilogGen_decoder() self.VerilogGen_mux() self.VerilogGen_SRAM() log.info("Successfully completed the generation of all the required verilog modules.") os.chdir(rundir) def VerilogGen_decoder(self): self.decoder=os.path.join(self.Verilogrundir,self.decoder_name+'.v') log.info("Generating the verilog file %s"%self.decoder_name) dfh=open(self.decoder, 'w') dfh.write("module %s (DATA_REQ, ADDR, DATA_REQIN);\n"%self.decoder_name) dfh.write(" parameter no_banks = %d;\n"%self.no_banks) dfh.write(" parameter banksel_bits = %d;\n"%self.banksel_bits) dfh.write(" output [no_banks-1:0] DATA_REQ;\n") dfh.write(" input [banksel_bits-1:0] ADDR;\n") dfh.write(" input DATA_REQIN;\n") dfh.write(" reg [no_banks-1:0] DATA_REQ;\n") dfh.write(" always @(DATA_REQIN or ADDR) begin\n") dfh.write(" if (DATA_REQIN == 1'b1)\n") dfh.write(" case (ADDR)\n") addr = '0'*self.banksel_bits for i in range (1, self.no_banks+1): decoded_addr='0'*self.no_banks if i!=1: addr=self.add('1',addr) decoded_addr=decoded_addr[0:self.no_banks-i]+'1'+decoded_addr[self.no_banks-i:self.no_banks-1] dfh.write(" %d'b%s: DATA_REQ = %d'b%s;\n"%(self.banksel_bits, addr, self.no_banks, decoded_addr)) dfh.write(" endcase\n") dfh.write(" else if (DATA_REQIN == 0)\n") data_req_val = '0'*self.no_banks dfh.write(" DATA_REQ = %d'b%s;\n"%(self.no_banks, data_req_val)) dfh.write(" end\n") dfh.write("endmodule\n") dfh.close() log.info("Successfully generated the verilog file %s"%self.decoder_name) def VerilogGen_mux(self): self.mux=os.path.join(self.Verilogrundir,self.mux_name+'.v') log.info("Generating the verilog file %s"%self.mux_name) mfh=open(self.mux, 'w') mfh.write("module %s (DATA_OUT, DATA_IN, DATA_SEL);\n"%self.mux_name) mfh.write(" parameter word_size = %d;\n"%self.word_size) mfh.write(" parameter no_banks = %d;\n"%self.no_banks) mfh.write(" parameter banksel_bits = %d;\n"%self.banksel_bits) mfh.write(" output [word_size-1:0] DATA_OUT;\n") mfh.write(" input [word_size-1:0] DATA_IN [no_banks-1:0];\n") mfh.write(" input [banksel_bits-1:0] DATA_SEL;\n") mfh.write(" reg [word_size-1:0] DATA_OUT;\n") DATA_IN_str ='' for ba in range(0, self.no_banks): DATA_IN_str=DATA_IN_str+"DATA_IN[%d]"%ba DATA_IN_str=DATA_IN_str+' or ' mfh.write(" always @(%s DATA_SEL) begin\n"%DATA_IN_str) mfh.write(" case ( DATA_SEL )\n") addr = '0'*self.banksel_bits for k in range(0, self.no_banks): if k!=0: addr=self.add('1',addr) mfh.write(" %d'b%s: DATA_OUT = DATA_IN[%d];\n"%(self.banksel_bits, addr, k)) default_data_out = 'x'*self.word_size mfh.write(" default: DATA_OUT = %d'b%s;\n"%(self.word_size, default_data_out)) mfh.write(" endcase\n") mfh.write(" end\n") mfh.write("endmodule\n") mfh.close() log.info("Successfully generated the verilog file %s"%self.mux_name) def VerilogGen_SRAM(self): self.sram=os.path.join(self.Verilogrundir,self.sram_name+'.v') log.info("Generating the verilog file %s"%self.sram_name) sfh=open(self.sram, 'w') sfh.write("`timescale 1ns / 1ns\n") sfh.write('`include "%s.v"\n'%self.decoder_name) sfh.write('`include "%s.v"\n\n\n'%self.mux_name) if p_options['PDK'] =='gf12lp': sfh.write("module %s (DOUT, ADDR, CLK, CEN, DIN, WE);\n"%self.sram_name) else: sfh.write("module %s (DOUT, ADDR, CLK, DBE, CEN, DIN, WE);\n"%self.sram_name) sfh.write(" parameter address_size = %d;\n"%self.address_size) sfh.write(" parameter no_banks = %d;\n"%self.no_banks) sfh.write(" parameter bank_address_size = %d;\n"%self.bank_address_size) sfh.write(" parameter word_size = %d;\n"%self.word_size) sfh.write(" input CLK, CEN, WE;\n") if p_options['PDK'] !='gf12lp': sfh.write(" input [3:0] DBE;\n") sfh.write(" input [address_size-1:0] ADDR;\n") sfh.write(" input [word_size-1:0] DIN;\n") sfh.write(" output [word_size-1:0] DOUT;\n") sfh.write(" wire [no_banks-1:0] DATA_REQ;\n") sfh.write(" wire [word_size-1:0] DATA_SRAM_BANK_OUT [no_banks-1:0];\n") sfh.write(" %s DI (.DATA_REQ(DATA_REQ), .ADDR(ADDR[address_size-1:bank_address_size]), .DATA_REQIN(CEN));\n"%(self.decoder_name)) for j in range(0, self.no_banks): if p_options['PDK'] =='gf12lp': sfh.write(" SRAM_2KB_GF12 SR%d ( .DOUT(DATA_SRAM_BANK_OUT[%d]), .ADDR_IN(ADDR[bank_address_size-1:0]), .CLK_IN(CLK), .CE_IN(DATA_REQ[%d]), .DIN(DIN), .WE_IN(WE));\n"%(j, j, j)) else: sfh.write(" SRAM_2KB SR%d ( .DOUT(DATA_SRAM_BANK_OUT[%d]), .ADDR_IN(ADDR[bank_address_size-1:0]), .CLK_IN(CLK), .DATA_BE_IN(DBE), .DATA_REQ_IN(DATA_REQ[%d]), .DIN(DIN), .WE_IN(WE));\n"%(j, j, j)) sfh.write(" %s MI (.DATA_OUT(DOUT), .DATA_IN(DATA_SRAM_BANK_OUT), .DATA_SEL(ADDR[address_size-1:bank_address_size]));\n"%(self.mux_name)) sfh.write("endmodule\n") sfh.close() log.info("Successfully generated the verilog file %s"%self.sram_name) def DataPrep(self): log.info("Preparing the Data for running the synthesis and PNR") if not os.path.isdir(self.digitalflowsrcdir): log.info("The dir %s does not exists"%self.digitalflowsrcdir) try: log.info("Creating the directory %s"%self.digitalflowsrcdir) os.mkdir(self.digitalflowsrcdir) except OSError: log.error("Unable to create the %s directory"%self.digitalflowsrcdir) log.error("Can not proceed to synthesis and PNR. Exiting.....") sys.exit(1) try: shutil.copy(self.sram, self.digitalflowsrcdir) except IOError as e: log.error("Unable to copy the file to %s under the digital flow source directory %s"%(self.sram, self.digitalflowsrcdir)) if not os.path.exists(self.sram): log.error("%s verilog is not created. Check the run log for the errors"%self.sram) log.error("SRAM generation failed. Check the log file for more information.") sys.exit(1) try: shutil.copy(self.decoder, self.digitalflowsrcdir) except IOError as e: log.error("Unable to copy the file to %s under the digital flow source directory %s"%(self.decoder, self.digitalflowsrcdir)) if not os.path.exists(self.decoder): log.error("%s verilog is not created. Check the run log for the errors"%self.decoder) log.error("Decoder generation failed. Check the log file for more information.") sys.exit(1) try: shutil.copy(self.mux, self.digitalflowsrcdir) except IOError as e: log.error("Unable to copy the file to %s under the digital flow source directory %s"%(self.mux, self.digitalflowsrcdir)) if not os.path.exists(self.mux): log.error("%s verilog is not created. Check the run log for the errors"%self.mux) log.error("Multiplexer generation failed. Check the log file for more information.") sys.exit(1) log.info("Updating the include.mk file") with open(self.digitalflowdir + '/include.mk', 'r') as file: filedata = file.read() filedata = re.sub(r'export DESIGN_NAME :=.*', r'export DESIGN_NAME := ' + self.sram_name, filedata) filedata = re.sub(r'export PLATFORM *:=.*', r'export PLATFORM := ' + p_options["PDK"], filedata) with open(self.digitalflowdir + '/include.mk', 'w') as file: file.write(filedata) self.scriptsdir=self.digitalflowdir + '/scripts/' if not os.path.exists(self.scriptsdir): log.error("%s does not exist. The PDK specific are not found. Make sure the apr scripts exists"%self.scriptsdir) sys.exit(1) else: self.dcscriptsdir=self.scriptsdir+'dc' self.invscriptsdir=self.scriptsdir+'innovus' if not os.path.isdir(self.dcscriptsdir): os.makedirs(self.dcscriptsdir) if not os.path.isdir(self.invscriptsdir): os.makedirs(self.invscriptsdir) try: pdkdcdir=self.scriptsdir+'%s/dc'%p_options["PDK"] files = os.listdir(pdkdcdir) for f in files: file_name = os.path.join(pdkdcdir, f) if (os.path.isfile(file_name)): shutil.copy(file_name, self.dcscriptsdir) except IOError as e: log.error("Unable to copy the pdk specific dc compiler scripts under the directory %s"%(pdkdcdir)) if not os.path.exists(pdkdcdir): log.error("%s does not exist. Make sure the apr scripts are present in the path"%pdkdcdir) sys.exit(1) try: pdkinvdir=self.scriptsdir+'%s/innovus'%p_options["PDK"] files = os.listdir(pdkinvdir) for f in files: file_name = os.path.join(pdkinvdir, f) if (os.path.isfile(file_name)): shutil.copy(file_name, self.invscriptsdir) except IOError as e: log.error("Unable to copy the pdk specific innovus scripts under the directory %s"%(pdkinvdir)) if not os.path.exists(pdkinvdir): log.error("%s does not exist. Make sure the apr scripts are present in the path"%pdkinvdir) sys.exit(1) with open(self.digitalflowdir + '/scripts/dc/dc.filelist.tcl', 'r') as file: filedata = file.read() sourcefile = "./src/"+self.sram_name+'.v' filedata = re.sub(r'set SVERILOG_SOURCE_FILES ".*"', r'set SVERILOG_SOURCE_FILES "' + sourcefile + '\"', filedata) with open(self.digitalflowdir + '/scripts/dc/dc.filelist.tcl', 'w') as file: file.write(filedata) log.info('Loading platform_config file...') try: with open(self.Fasocdir+'/config/platform_config.json') as file: platformConfig = json.load(file) except ValueError as e: log.error('Error occurred opening or loading json file.') log.error('Exception: %s' % str(e)) sys.exit(1) digitalflowexprdir=os.path.join(self.digitalflowdir,'blocks/SRAM_2KB/export') SRAM_2KB_dir=os.path.join(platformConfig["platforms"][p_options["PDK"]]["aux_lib"],'SRAM_2KB/latest') if not os.path.isdir(digitalflowexprdir): os.makedirs(digitalflowexprdir) log.info("Copying the SRAM_2KB macro related files to %s dir "%digitalflowexprdir) try: files = os.listdir(SRAM_2KB_dir) for f in files: file_name = os.path.join(SRAM_2KB_dir, f) if (os.path.isfile(file_name)): shutil.copy(file_name, digitalflowexprdir) except IOError as e: log.error("Unable to copy the SRAM_2KB library under the directory %s"%(digitalflowexprdir)) if not os.path.exists(SRAM_2KB_dir): log.error("%s does not exist. Provide the correct path in the config json file"%SRAM_2KB_dir) sys.exit(1) def Synthesis(self): log.info("Running the synthesis for the design %s"%self.sram_name) os.chdir(self.digitalflowdir) p1=sp.Popen("make synth", shell=True) p1.wait() with open(self.digitalflowdir + '/reports/dc/' + self.sram_name + '.mapped.area.rpt', 'r')as file: filedata = file.read() m = re.search('Total cell area: *([0-9.]*)', filedata) if m: self.coreCellArea = float(m.group(1)) log.info("Completed the synthesis for the design %s"%self.sram_name) else: log.error('Synthesis Failed') sys.exit(1) os.chdir(rundir) def PNR(self): log.info("Running the APR for the design %s"%self.sram_name) os.chdir(self.digitalflowdir) p2 = sp.Popen("make design", shell=True) p2.wait() p3 = sp.Popen(['make','lvs'], shell=True) p3.wait() p4 = sp.Popen(['make','drc'], shell=True) p4.wait() p5 = sp.Popen("make export", shell=True) p5.wait() with open(self.digitalflowdir + '/reports/innovus/' + self.sram_name + '.main.htm.ascii', 'r') as file: filedata = file.read() m = re.search('Total area of Chip: ([0-9.]*)', filedata) if m: self.designArea = float(m.group(1)) log.info("Completed the APR for the design %s"%self.sram_name) else: log.error('APR Failed') sys.exit(1) os.chdir(rundir) def Filemanage(self): fileName= self.digitalflowdir +'/export/'+self.sram_name if p_options['Mode'] == 'verilog': try: shutil.copy(self.decoder, p_options['Opdir']) shutil.copy(self.mux, p_options['Opdir']) shutil.copy(self.sram, p_options['Opdir']) except IOError as e: log.error("Unable to copy the generated verilog files to the outputs dir, %s"%(p_options['Opdir'])) if not os.path.exists(self.decoder): log.error("SRAM decoder verilog is not created.") log.error("SRAM Macro Generation Failed. Check the log file for more information.") sys.exit(1) if not os.path.exists(self.mux): log.error("SRAM Multiplexer verilog is not created.") log.error("SRAM Macro Generation Failed. Check the log file for more information.") sys.exit(1) if not os.path.exists(self.sram): log.error("SRAM macro verilog is not created.") log.error("SRAM Macro Generation Failed. Check the log file for more information.") sys.exit(1) self.config['results'] = {'platform': p_options['PDK']} mem_x = 310.05 ; mem_y = 198.06 ; BLOCK_OFFSET = 10 ; nofbanks = self.no_banks ; cols = nofbanks/2 ; rows=2 ; MACROS_WIDTH = cols*(BLOCK_OFFSET+mem_x+BLOCK_OFFSET) ; MACROS_HEIGHT = rows*(BLOCK_OFFSET+mem_y+BLOCK_OFFSET) ; CORE_WIDTH = MACROS_WIDTH CORE_HEIGHT = MACROS_HEIGHT+50 ; mem_area = CORE_WIDTH*CORE_HEIGHT mem_ar = CORE_WIDTH/CORE_HEIGHT mem_power = nofbanks*100e-6*1.2 ; self.config['results'].update({'area': mem_area}) self.config['results'].update({'AspectRatio': mem_ar}) self.config['results'].update({'Power': mem_power}) self.config['results'].update({'word_size': self.word_size }) self.config['results'].update({'nowords': self.nowords}) with open(p_options['Opdir'] + '/' + self.sram_name + '.json', 'w') as resultSpecfile: json.dump(self.config, resultSpecfile, indent=True) elif p_options['Mode'] == 'macro': fileName= self.digitalflowdir +'/export/'+self.sram_name try: vf = fileName+'.lvs.v' shutil.copy(vf, p_options['Opdir']) except IOError as e: log.error("Unable to copy the file %s to the outputs dir, %s"%(vf, p_options['Opdir'])) if not os.path.exists(vf): log.error("%s verilog is not created. Check if the APR is successful and the verilog file is generated. Check the run log"%vf) log.error("SRAM Macro Generation Failed. Check the log file for more information.") sys.exit(1) try: gds = fileName+'.gds.gz' shutil.copy(gds, p_options['Opdir']) except IOError as e: log.error("Unable to copy the file %s to the outputs dir, %s"%(gds, p_options['Opdir'])) if not os.path.exists(gds): log.error("%s GDS is not created. Check if the APR is successful and the GDS is generated. Check the run log"%gds) log.error("SRAM Macro Generation Failed. Check the log file for more information.") sys.exit(1) try: lef = fileName+'.lef' shutil.copy(lef, p_options['Opdir']) except IOError as e: log.error("Unable to copy the file %s to the outputs dir, %s"%(lef, p_options['Opdir'])) if not os.path.exists(lef): log.error("%s LEF is not created. Check if the APR is successful and the GDS is generated. Check the run log"%lef) log.error("SRAM Macro Generation Failed. Check the log file for more information.") sys.exit(1) try: lib = fileName+'_typ.lib' shutil.copy(lib, p_options['Opdir']) except IOError as e: log.error("Unable to copy the file %s to the outputs dir, %s"%(lib, p_options['Opdir'])) if not os.path.exists(lib): log.error("%s Lib is not created. Check if the APR is successful and the GDS is generated. Check the run log"%lib) log.error("SRAM Macro Generation Failed. Check the log file for more information.") sys.exit(1) try: db = fileName+'_typ.db' shutil.copy(db, p_options['Opdir']) except IOError as e: log.error("Unable to copy the file %s to the outputs dir, %s"%(db, p_options['Opdir'])) if not os.path.exists(db): log.error("%s DB is not created. Check if the APR is successful and the GDS is generated. Check the run log"%db) log.error("SRAM Macro Generation Failed. Check the log file for more information.") sys.exit(1) ''' #CDL #Commented out for now as CADRE is not enabled for calibre DRC and LVS. CDL will generated as part of LVS process. try: cdl = fileName+'.spi' shutil.copy(cdl, p_options['Opdir']) except IOError as e: log.error("Unable to copy the file %s to the outputs dir, %s"%(cdl, p_options['Opdir'])) if not os.path.exists(cdl): log.error("%s Spice netlist is not created. Check if the APR is successful and the GDS is generated. Check the run log"%cdl) log.error("SRAM Macro Generation Failed. Check the log file for more information.") sys.exit(1) ''' self.config['results'] = {'platform': p_options['PDK']} mem_x = 310.05 ; mem_y = 198.06 ; BLOCK_OFFSET = 10 ; nofbanks = self.no_banks ; cols = nofbanks/2 ; rows=2 ; MACROS_WIDTH = cols*(BLOCK_OFFSET+mem_x+BLOCK_OFFSET) ; MACROS_HEIGHT = rows*(BLOCK_OFFSET+mem_y+BLOCK_OFFSET) ; CORE_WIDTH = MACROS_WIDTH CORE_HEIGHT = MACROS_HEIGHT+50 ; mem_area = CORE_WIDTH*CORE_HEIGHT mem_ar = CORE_WIDTH/CORE_HEIGHT mem_power = nofbanks*100e-6*1.2 ; self.config['results'].update({'area': mem_area}) self.config['results'].update({'AspectRatio': mem_ar}) self.config['results'].update({'Power': mem_power}) self.config['results'].update({'AspectRatio': mem_ar}) self.config['results'].update({'Power': mem_power}) self.config['results'].update({'word_size': self.word_size }) self.config['results'].update({'nowords': self.nowords}) with open(p_options['Opdir'] + '/' + self.sram_name + '.json', 'w') as resultSpecfile: json.dump(self.config, resultSpecfile, indent=True)
MIT License
cortex-lab/phy
phy/cluster/supervisor.py
TaskLogger._eval
python
def _eval(self, task): sender, name, args, kwargs = task logger.log(5, "Calling %s.%s(%s)", sender.__class__.__name__, name, args, kwargs) f = getattr(sender, name) callback = partial(self._callback, task) argspec = inspect.getfullargspec(f) argspec = argspec.args + argspec.kwonlyargs if 'callback' in argspec: f(*args, **kwargs, callback=callback) else: def _cluster_callback(tsender, up): self._callback(task, up) connect(_cluster_callback, event='cluster', sender=self.supervisor) f(*args, **kwargs) unconnect(_cluster_callback)
Evaluate a task and call a callback function.
https://github.com/cortex-lab/phy/blob/9a330b9437a3d0b40a37a201d147224e6e7fb462/phy/cluster/supervisor.py#L101-L117
from functools import partial import inspect import logging import numpy as np from ._history import GlobalHistory from ._utils import create_cluster_meta from .clustering import Clustering from phylib.utils import Bunch, emit, connect, unconnect from phy.gui.actions import Actions from phy.gui.qt import _block, set_busy, _wait from phy.gui.widgets import Table, HTMLWidget, _uniq, Barrier logger = logging.getLogger(__name__) def _process_ups(ups): if len(ups) == 0: return elif len(ups) == 1: return ups[0] elif len(ups) == 2: up = ups[0] up.update(ups[1]) return up else: raise NotImplementedError() def _ensure_all_ints(l): if (l is None or l == []): return for i in range(len(l)): l[i] = int(l[i]) class TaskLogger(object): auto_select_after_action = False def __init__(self, cluster_view=None, similarity_view=None, supervisor=None): self.cluster_view = cluster_view self.similarity_view = similarity_view self.supervisor = supervisor self._processing = False self._history = [] self._queue = [] def enqueue(self, sender, name, *args, output=None, **kwargs): logger.log( 5, "Enqueue %s %s %s %s (%s)", sender.__class__.__name__, name, args, kwargs, output) self._queue.append((sender, name, args, kwargs)) def dequeue(self): return self._queue.pop(0) if self._queue else None def _callback(self, task, output): self._log(task, output) self.enqueue_after(task, output) self.process()
BSD 3-Clause New or Revised License
raamana/pyradigm
pyradigm/pyradigm.py
MLDataset.__iter__
python
def __iter__(self): for subject, data in self.data.items(): yield subject, data
Iterator over samples
https://github.com/raamana/pyradigm/blob/7c40b228fcd3b447c738852ef9c870a8d679da98/pyradigm/pyradigm.py#L991-L995
__all__ = ['MLDataset', 'cli_run', 'check_compatibility'] import argparse import copy import logging import numpy as np import os import pickle import random import sys import traceback from warnings import warn, catch_warnings, filterwarnings, simplefilter from collections.abc import Sequence from collections import Counter, OrderedDict from itertools import islice from os.path import basename, dirname, exists as pexists, isfile, join as pjoin, realpath class MLDataset(object): def __init__(self, filepath=None, in_dataset=None, arff_path=None, data=None, labels=None, classes=None, description='', feature_names=None, encode_nonnumeric=False): if filepath is not None: if isfile(realpath(filepath)): self.__load(filepath) else: raise IOError('Specified file could not be read.') elif arff_path is not None: arff_path = realpath(arff_path) if isfile(arff_path): self.__load_arff(arff_path, encode_nonnumeric) else: raise IOError('Given ARFF can not be found!') elif in_dataset is not None: if not isinstance(in_dataset, MLDataset): raise ValueError('Invalid class input: MLDataset expected!') if in_dataset.num_samples <= 0: raise ValueError('Dataset to copy is empty.') self.__copy(in_dataset) elif data is None and labels is None and classes is None: self.__data = OrderedDict() self.__labels = OrderedDict() self.__classes = OrderedDict() self.__num_features = 0 self.__dtype = None self.__description = '' self.__feature_names = None elif data is not None and labels is not None and classes is not None: self.__validate(data, labels, classes) self.__data = OrderedDict(data) self.__labels = OrderedDict(labels) self.__classes = OrderedDict(classes) self.__description = description sample_ids = list(data) features0 = data[sample_ids[0]] self.__num_features = features0.size if isinstance(features0, np.ndarray) else len( features0) self.__dtype = type(data[sample_ids[0]]) if feature_names is None: self.__feature_names = self.__str_names(self.num_features) else: self.__feature_names = feature_names else: raise ValueError('Incorrect way to construct the dataset.') @property def data(self): return self.__data def data_and_labels(self): sample_ids = np.array(self.keys) label_dict = self.labels matrix = np.full([self.num_samples, self.num_features], np.nan) labels = np.full([self.num_samples, 1], np.nan) for ix, sample in enumerate(sample_ids): matrix[ix, :] = self.__data[sample] labels[ix] = label_dict[sample] return matrix, np.ravel(labels), sample_ids @data.setter def data(self, values, feature_names=None): if isinstance(values, dict): if self.__labels is not None and len(self.__labels) != len(values): raise ValueError( 'number of samples do not match the previously assigned labels') elif len(values) < 1: raise ValueError('There must be at least 1 sample in the dataset!') else: self.__data = values self.__num_features = len(values[self.keys[0]]) if feature_names is None: self.__feature_names = self.__str_names(self.num_features) else: self.feature_names = feature_names else: raise ValueError('data input must be a dictionary!') @property def labels(self): return self.__labels @labels.setter def labels(self, values): if isinstance(values, dict): if self.__data is not None and len(self.__data) != len(values): raise ValueError( 'number of samples do not match the previously assigned data') elif set(self.keys) != set(list(values)): raise ValueError('sample ids do not match the previously assigned ids.') else: self.__labels = values else: raise ValueError('labels input must be a dictionary!') @property def classes(self): return self.__classes @classes.setter def classes(self, values): if isinstance(values, dict): if self.__data is not None and len(self.__data) != len(values): raise ValueError( 'number of samples do not match the previously assigned data') elif set(self.keys) != set(list(values)): raise ValueError('sample ids do not match the previously assigned ids.') else: self.__classes = values else: raise ValueError('classes input must be a dictionary!') @property def feature_names(self): return self.__feature_names @feature_names.setter def feature_names(self, names): if len(names) != self.num_features: raise ValueError("Number of names do not match the number of features!") if not isinstance(names, (Sequence, np.ndarray, np.generic)): raise ValueError("Input is not a sequence. " "Ensure names are in the same order " "and length as features.") self.__feature_names = np.array(names) @property def class_sizes(self): return Counter(self.classes.values()) @staticmethod def __take(nitems, iterable): return dict(islice(iterable, int(nitems))) @staticmethod def __str_names(num): return np.array(['f{}'.format(x) for x in range(num)]) def glance(self, nitems=5): nitems = max([1, min([nitems, self.num_samples - 1])]) return self.__take(nitems, iter(self.__data.items())) def summarize_classes(self): class_sizes = np.zeros(len(self.class_set)) for idx, cls in enumerate(self.class_set): class_sizes[idx] = self.class_sizes[cls] return self.class_set, self.label_set, class_sizes @classmethod def check_features(self, features): if not isinstance(features, np.ndarray): features = np.asarray(features) if features.size <= 0: raise ValueError('provided features are empty.') if features.ndim > 1: features = np.ravel(features) return features def add_sample(self, sample_id, features, label, class_id=None, overwrite=False, feature_names=None): if sample_id in self.__data and not overwrite: raise ValueError('{} already exists in this dataset!'.format(sample_id)) if class_id is None: class_id = str(label) features = self.check_features(features) if self.num_samples <= 0: self.__data[sample_id] = features self.__labels[sample_id] = label self.__classes[sample_id] = class_id self.__dtype = type(features) self.__num_features = features.size if isinstance(features, np.ndarray) else len( features) if feature_names is None: self.__feature_names = self.__str_names(self.num_features) else: if self.__num_features != features.size: raise ValueError('dimensionality of this sample ({}) ' 'does not match existing samples ({})' ''.format(features.size, self.__num_features)) if not isinstance(features, self.__dtype): raise TypeError("Mismatched dtype. Provide {}".format(self.__dtype)) self.__data[sample_id] = features self.__labels[sample_id] = label self.__classes[sample_id] = class_id if feature_names is not None: if self.__feature_names is None: self.__feature_names = np.array(feature_names) else: if not np.array_equal(self.feature_names, np.array(feature_names)): raise ValueError( "supplied feature names do not match the existing names!") def del_sample(self, sample_id): if sample_id not in self.__data: warn('Sample to delete not found in the dataset - nothing to do.') else: self.__data.pop(sample_id) self.__classes.pop(sample_id) self.__labels.pop(sample_id) print('{} removed.'.format(sample_id)) def get_feature_subset(self, subset_idx): subset_idx = np.asarray(subset_idx) if not (max(subset_idx) < self.__num_features) and (min(subset_idx) >= 0): raise UnboundLocalError('indices out of range for the dataset. ' 'Max index: {} Min index : 0'.format( self.__num_features)) sub_data = {sample: features[subset_idx] for sample, features in self.__data.items()} new_descr = 'Subset features derived from: \n ' + self.__description subdataset = MLDataset(data=sub_data, labels=self.__labels, classes=self.__classes, description=new_descr, feature_names=self.__feature_names[subset_idx]) return subdataset @staticmethod def keys_with_value(dictionary, value): subset = [key for key in dictionary if dictionary[key] == value] return subset def get_class(self, class_id): if class_id in [None, '']: raise ValueError("class id can not be empty or None.") if isinstance(class_id, str): class_ids = [class_id, ] else: class_ids = class_id non_existent = set(self.class_set).intersection(set(class_ids)) if len(non_existent) < 1: raise ValueError( 'These classes {} do not exist in this dataset.'.format(non_existent)) subsets = list() for class_id in class_ids: subsets_this_class = self.keys_with_value(self.__classes, class_id) subsets.extend(subsets_this_class) return self.get_subset(subsets) def transform(self, func, func_description=None): if not callable(func): raise TypeError('Given function {} is not a callable'.format(func)) xfm_ds = MLDataset() for sample, data in self.__data.items(): try: xfm_data = func(data) except: print('Unable to transform features for {}. Quitting.'.format(sample)) raise xfm_ds.add_sample(sample, xfm_data, label=self.__labels[sample], class_id=self.__classes[sample]) xfm_ds.description = "{}\n{}".format(func_description, self.__description) return xfm_ds def train_test_split_ids(self, train_perc=None, count_per_class=None): _ignore1, _ignore2, class_sizes = self.summarize_classes() smallest_class_size = np.min(class_sizes) if count_per_class is None and (0.0 < train_perc < 1.0): if train_perc < 1.0 / smallest_class_size: raise ValueError('Training percentage selected too low ' 'to return even one sample from the smallest class!') train_set = self.random_subset_ids(perc_per_class=train_perc) elif train_perc is None and count_per_class > 0: if count_per_class >= smallest_class_size: raise ValueError( 'Selections would exclude the smallest class from test set. ' 'Reduce sample count per class for the training set!') train_set = self.random_subset_ids_by_count(count_per_class=count_per_class) else: raise ValueError('Invalid or out of range selection: ' 'only one of count or percentage can be used to select subset.') test_set = list(set(self.keys) - set(train_set)) if len(train_set) < 1 or len(test_set) < 1: raise ValueError( 'Selection resulted in empty training or test set - check your selections or dataset!') return train_set, test_set def random_subset_ids_by_count(self, count_per_class=1): class_sizes = self.class_sizes subsets = list() if count_per_class < 1: warn('Atleast one sample must be selected from each class') return list() elif count_per_class >= self.num_samples: warn('All samples requested - returning a copy!') return self.keys for class_id, class_size in class_sizes.items(): this_class = self.keys_with_value(self.classes, class_id) random.shuffle(this_class) subset_size_this_class = max(0, min(class_size, count_per_class)) if subset_size_this_class < 1 or this_class is None: warn('No subjects from class {} were selected.'.format(class_id)) else: subsets_this_class = this_class[0:count_per_class] subsets.extend(subsets_this_class) if len(subsets) > 0: return subsets else: warn('Zero samples were selected. Returning an empty list!') return list() def random_subset_ids(self, perc_per_class=0.5): class_sizes = self.class_sizes subsets = list() if perc_per_class <= 0.0: warn('Zero percentage requested - returning an empty dataset!') return list() elif perc_per_class >= 1.0: warn('Full or a larger dataset requested - returning a copy!') return self.keys for class_id, class_size in class_sizes.items(): this_class = self.keys_with_value(self.classes, class_id) random.shuffle(this_class) subset_size_this_class = np.int64(np.floor(class_size * perc_per_class)) subset_size_this_class = max(1, min(class_size, subset_size_this_class)) if subset_size_this_class < 1 or len(this_class) < 1 or this_class is None: raise ValueError( 'No subjects from class {} were selected.'.format(class_id)) else: subsets_this_class = this_class[0:subset_size_this_class] subsets.extend(subsets_this_class) if len(subsets) > 0: return subsets else: warn('Zero samples were selected. Returning an empty list!') return list() def random_subset(self, perc_in_class=0.5): subsets = self.random_subset_ids(perc_in_class) if len(subsets) > 0: return self.get_subset(subsets) else: warn('Zero samples were selected. Returning an empty dataset!') return MLDataset() def sample_ids_in_class(self, class_id): subset_ids = self.keys_with_value(self.classes, class_id) return subset_ids def get_subset(self, subset_ids): num_existing_keys = sum([1 for key in subset_ids if key in self.__data]) if subset_ids is not None and num_existing_keys > 0: data = self.__get_subset_from_dict(self.__data, subset_ids) labels = self.__get_subset_from_dict(self.__labels, subset_ids) if self.__classes is not None: classes = self.__get_subset_from_dict(self.__classes, subset_ids) else: classes = None subdataset = MLDataset(data=data, labels=labels, classes=classes) subdataset.description += '\n Subset derived from: ' + self.description subdataset.feature_names = self.__feature_names subdataset.__dtype = self.dtype return subdataset else: warn('subset of IDs requested do not exist in the dataset!') return MLDataset() def get_data_matrix_in_order(self, subset_ids): if len(subset_ids) < 1: warn('subset must have atleast one ID - returning empty matrix!') return np.empty((0, 0)) if isinstance(subset_ids, set): raise TypeError('Input set is not ordered, hence can not guarantee order! ' 'Must provide a list or tuple.') if isinstance(subset_ids, str): subset_ids = [subset_ids, ] num_existing_keys = sum([1 for key in subset_ids if key in self.__data]) if num_existing_keys < len(subset_ids): raise ValueError('One or more IDs from subset do not exist in the dataset!') matrix = np.full((num_existing_keys, self.num_features), np.nan) for idx, sid in enumerate(subset_ids): matrix[idx, :] = self.__data[sid] return matrix def __contains__(self, item): if item in self.keys: return True else: return False def get(self, item, not_found_value=None): if item in self.keys: return self.__data[item] else: return not_found_value def __getitem__(self, item): if item in self.keys: return self.__data[item] else: raise KeyError('{} not found in dataset.'.format(item)) def __setitem__(self, item, features): if item in self.__data: features = self.check_features(features) if self.__num_features != features.size: raise ValueError('dimensionality of supplied features ({}) ' 'does not match existing samples ({})' ''.format(features.size, self.__num_features)) self.__data[item] = features else: raise KeyError('{} not found in dataset.' ' Can not replace features of a non-existing sample.' ' Add it first via .add_sample()'.format(item))
MIT License
openstack/manila
manila/share/drivers/generic.py
GenericShareDriver.get_network_allocations_number
python
def get_network_allocations_number(self): return 0
Get number of network interfaces to be created.
https://github.com/openstack/manila/blob/34d209484366cd921e052d37c5f9daef5e97af20/manila/share/drivers/generic.py#L888-L893
import os import time from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import units import six from manila.common import constants as const from manila import compute from manila import context from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers import service_instance from manila import utils from manila import volume LOG = log.getLogger(__name__) share_opts = [ cfg.StrOpt('smb_template_config_path', default='$state_path/smb.conf', help="Path to smb config."), cfg.StrOpt('volume_name_template', default='manila-share-%s', help="Volume name template."), cfg.StrOpt('volume_snapshot_name_template', default='manila-snapshot-%s', help="Volume snapshot name template."), cfg.StrOpt('share_mount_path', default='/shares', help="Parent path in service instance where shares " "will be mounted."), cfg.IntOpt('max_time_to_create_volume', default=180, help="Maximum time to wait for creating cinder volume."), cfg.IntOpt('max_time_to_extend_volume', default=180, help="Maximum time to wait for extending cinder volume."), cfg.IntOpt('max_time_to_attach', default=120, help="Maximum time to wait for attaching cinder volume."), cfg.StrOpt('service_instance_smb_config_path', default='$share_mount_path/smb.conf', help="Path to SMB config in service instance."), cfg.ListOpt('share_helpers', default=[ 'CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess', 'NFS=manila.share.drivers.helpers.NFSHelper', ], help='Specify list of share export helpers.'), cfg.StrOpt('share_volume_fstype', default='ext4', choices=['ext4', 'ext3'], help='Filesystem type of the share volume.'), cfg.StrOpt('cinder_volume_type', help='Name or id of cinder volume type which will be used ' 'for all volumes created by driver.'), ] CONF = cfg.CONF CONF.register_opts(share_opts) BLOCK_DEVICE_SIZE_INDEX = 1 USED_SPACE_INDEX = 2 def ensure_server(f): def wrap(self, context, *args, **kwargs): server = kwargs.get('share_server') if not self.driver_handles_share_servers: if not server: server = self.service_instance_manager.get_common_server() kwargs['share_server'] = server else: raise exception.ManilaException( _("Share server handling is not available. " "But 'share_server' was provided. '%s'. " "Share network should not be used.") % server.get('id')) elif not server: raise exception.ManilaException( _("Share server handling is enabled. But 'share_server' " "is not provided. Make sure you used 'share_network'.")) if not server.get('backend_details'): raise exception.ManilaException( _("Share server '%s' does not have backend details.") % server['id']) if not self.service_instance_manager.ensure_service_instance( context, server['backend_details']): raise exception.ServiceInstanceUnavailable() return f(self, context, *args, **kwargs) return wrap class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver): def __init__(self, *args, **kwargs): super(GenericShareDriver, self).__init__( [False, True], *args, **kwargs) self.admin_context = context.get_admin_context() self.configuration.append_config_values(share_opts) self._helpers = {} self.backend_name = self.configuration.safe_get( 'share_backend_name') or "Cinder_Volumes" self.ssh_connections = {} self._setup_service_instance_manager() self.private_storage = kwargs.get('private_storage') def _setup_service_instance_manager(self): self.service_instance_manager = ( service_instance.ServiceInstanceManager( driver_config=self.configuration)) def _ssh_exec(self, server, command, check_exit_code=True): LOG.debug("_ssh_exec - server: %s, command: %s, check_exit_code: %s", server, command, check_exit_code) connection = self.ssh_connections.get(server['instance_id']) ssh_conn_timeout = self.configuration.ssh_conn_timeout if not connection: ssh_pool = utils.SSHPool(server['ip'], 22, ssh_conn_timeout, server['username'], server.get('password'), server.get('pk_path'), max_size=1) ssh = ssh_pool.create() self.ssh_connections[server['instance_id']] = (ssh_pool, ssh) else: ssh_pool, ssh = connection if not ssh.get_transport().is_active(): ssh_pool.remove(ssh) ssh = ssh_pool.create() self.ssh_connections[server['instance_id']] = (ssh_pool, ssh) wrap = lambda token: "\"" + token + "\"" command = [wrap(tkn) if tkn.count(' ') else tkn for tkn in command] return processutils.ssh_execute(ssh, ' '.join(command), check_exit_code=check_exit_code) def check_for_setup_error(self): def do_setup(self, context): super(GenericShareDriver, self).do_setup(context) self.compute_api = compute.API() self.volume_api = volume.API() self._setup_helpers() common_sv_available = False share_server = None sv_fetch_retry_interval = 5 while not (common_sv_available or self.driver_handles_share_servers): try: share_server = ( self.service_instance_manager.get_common_server()) common_sv_available = self._is_share_server_active( context, share_server) except Exception as ex: LOG.error(ex) if not common_sv_available: time.sleep(sv_fetch_retry_interval) LOG.warning("Waiting for the common service VM to become " "available. " "Driver is currently uninitialized. " "Share server: %(share_server)s " "Retry interval: %(retry_interval)s", dict(share_server=share_server, retry_interval=sv_fetch_retry_interval)) def _setup_helpers(self): helpers = self.configuration.share_helpers if helpers: for helper_str in helpers: share_proto, __, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) self._helpers[share_proto.upper()] = helper( self._execute, self._ssh_exec, self.configuration) else: raise exception.ManilaException( "No protocol helpers selected for Generic Driver. " "Please specify using config option 'share_helpers'.") @ensure_server def create_share(self, context, share, share_server=None): return self._create_share( context, share, snapshot=None, share_server=share_server, ) def _create_share(self, context, share, snapshot, share_server=None): helper = self._get_helper(share) server_details = share_server['backend_details'] volume = self._allocate_container( self.admin_context, share, snapshot=snapshot) volume = self._attach_volume( self.admin_context, share, server_details['instance_id'], volume) if not snapshot: self._format_device(server_details, volume) self._mount_device(share, server_details, volume) export_locations = helper.create_exports( server_details, share['name']) return export_locations @utils.retry(retry_param=exception.ProcessExecutionError, backoff_rate=1) def _is_device_file_available(self, server_details, volume): command = ['sudo', 'test', '-b', volume['mountpoint']] self._ssh_exec(server_details, command) def _format_device(self, server_details, volume): self._is_device_file_available(server_details, volume) command = ['sudo', 'mkfs.%s' % self.configuration.share_volume_fstype, volume['mountpoint']] self._ssh_exec(server_details, command) def _is_device_mounted(self, mount_path, server_details, volume=None): log_data = { 'mount_path': mount_path, 'server_id': server_details['instance_id'], } if volume and volume.get('mountpoint', ''): log_data['volume_id'] = volume['id'] log_data['dev_mount_path'] = volume['mountpoint'] msg = ("Checking whether volume '%(volume_id)s' with mountpoint " "'%(dev_mount_path)s' is mounted on mount path '%(mount_p" "ath)s' on server '%(server_id)s' or not." % log_data) else: msg = ("Checking whether mount path '%(mount_path)s' exists on " "server '%(server_id)s' or not." % log_data) LOG.debug(msg) mounts_list_cmd = ['sudo', 'mount'] output, __ = self._ssh_exec(server_details, mounts_list_cmd) mounts = output.split('\n') for mount in mounts: mount_elements = mount.split(' ') if (len(mount_elements) > 2 and mount_path == mount_elements[2]): if volume: if (volume.get('mountpoint', '') == mount_elements[0]): return True else: return True return False def _add_mount_permanently(self, share_id, server_details): try: self._ssh_exec( server_details, ['grep', share_id, const.MOUNT_FILE_TEMP, '|', 'sudo', 'tee', '-a', const.MOUNT_FILE], ) except exception.ProcessExecutionError as e: LOG.error("Failed to add 'Share-%(share_id)s' mount " "permanently on server '%(instance_id)s'.", {"share_id": share_id, "instance_id": server_details['instance_id']}) raise exception.ShareBackendException(msg=six.text_type(e)) try: self._ssh_exec(server_details, ['sudo', 'mount', '-a']) except exception.ProcessExecutionError: LOG.error("Failed to mount all shares on server '%s'.", server_details['instance_id']) def _remove_mount_permanently(self, share_id, server_details): try: self._ssh_exec( server_details, ['sudo', 'sed', '-i', '\'/%s/d\'' % share_id, const.MOUNT_FILE], ) except exception.ProcessExecutionError as e: LOG.error("Failed to remove 'Share-%(share_id)s' mount " "permanently on server '%(instance_id)s'.", {"share_id": share_id, "instance_id": server_details['instance_id']}) raise exception.ShareBackendException(msg=six.text_type(e)) def _mount_device(self, share, server_details, volume): @utils.synchronized('generic_driver_mounts_' '%s' % server_details['instance_id']) def _mount_device_with_lock(): mount_path = self._get_mount_path(share) device_path = volume['mountpoint'] log_data = { 'dev': device_path, 'path': mount_path, 'server': server_details['instance_id'], } try: if not self._is_device_mounted(mount_path, server_details, volume): LOG.debug("Mounting '%(dev)s' to path '%(path)s' on " "server '%(server)s'.", log_data) mount_cmd = ( 'sudo', 'mkdir', '-p', mount_path, '&&', 'sudo', 'mount', device_path, mount_path, '&&', 'sudo', 'chmod', '777', mount_path, '&&', 'sudo', 'umount', mount_path, '&&', 'sudo', 'e2fsck', '-y', '-f', device_path, '&&', 'sudo', 'tune2fs', '-U', 'random', device_path, '&&', 'sudo', 'mount', device_path, mount_path, ) self._ssh_exec(server_details, mount_cmd) self._add_mount_permanently(share.id, server_details) else: LOG.warning("Mount point '%(path)s' already exists on " "server '%(server)s'.", log_data) except exception.ProcessExecutionError as e: raise exception.ShareBackendException(msg=six.text_type(e)) return _mount_device_with_lock() @utils.retry(retry_param=exception.ProcessExecutionError) def _unmount_device(self, share, server_details): @utils.synchronized('generic_driver_mounts_' '%s' % server_details['instance_id']) def _unmount_device_with_lock(): mount_path = self._get_mount_path(share) log_data = { 'path': mount_path, 'server': server_details['instance_id'], } if self._is_device_mounted(mount_path, server_details): LOG.debug("Unmounting path '%(path)s' on server " "'%(server)s'.", log_data) unmount_cmd = ['sudo', 'umount', mount_path, '&&', 'sudo', 'rmdir', mount_path] self._ssh_exec(server_details, unmount_cmd) self._remove_mount_permanently(share.id, server_details) else: LOG.warning("Mount point '%(path)s' does not exist on " "server '%(server)s'.", log_data) return _unmount_device_with_lock() def _get_mount_path(self, share): return os.path.join(self.configuration.share_mount_path, share['name']) def _attach_volume(self, context, share, instance_id, volume): @utils.synchronized( "generic_driver_attach_detach_%s" % instance_id, external=True) def do_attach(volume): if volume['status'] == 'in-use': attached_volumes = [vol.id for vol in self.compute_api.instance_volumes_list( self.admin_context, instance_id)] if volume['id'] in attached_volumes: return volume else: raise exception.ManilaException( _('Volume %s is already attached to another instance') % volume['id']) @utils.retry(retries=3, interval=2, backoff_rate=1) def attach_volume(): self.compute_api.instance_volume_attach( self.admin_context, instance_id, volume['id']) attach_volume() t = time.time() while time.time() - t < self.configuration.max_time_to_attach: volume = self.volume_api.get(context, volume['id']) if volume['status'] == 'in-use': return volume elif volume['status'] not in ('attaching', 'reserved'): raise exception.ManilaException( _('Failed to attach volume %s') % volume['id']) time.sleep(1) else: err_msg = { 'volume_id': volume['id'], 'max_time': self.configuration.max_time_to_attach } raise exception.ManilaException( _('Volume %(volume_id)s has not been attached in ' '%(max_time)ss. Giving up.') % err_msg) return do_attach(volume) def _get_volume_name(self, share_id): return self.configuration.volume_name_template % share_id def _get_volume(self, context, share_id): volume_id = self.private_storage.get(share_id, 'volume_id') if volume_id is not None: return self.volume_api.get(context, volume_id) else: return self._get_volume_legacy(context, share_id) def _get_volume_legacy(self, context, share_id): volume_name = self._get_volume_name(share_id) search_opts = {'name': volume_name} if context.is_admin: search_opts['all_tenants'] = True volumes_list = self.volume_api.get_all(context, search_opts) if len(volumes_list) == 1: return volumes_list[0] elif len(volumes_list) > 1: LOG.error( "Expected only one volume in volume list with name " "'%(name)s', but got more than one in a result - " "'%(result)s'.", { 'name': volume_name, 'result': volumes_list}) raise exception.ManilaException( _("Error. Ambiguous volumes for name '%s'") % volume_name) return None def _get_volume_snapshot(self, context, snapshot_id): volume_snapshot_id = self.private_storage.get( snapshot_id, 'volume_snapshot_id') if volume_snapshot_id is not None: return self.volume_api.get_snapshot(context, volume_snapshot_id) else: return self._get_volume_snapshot_legacy(context, snapshot_id) def _get_volume_snapshot_legacy(self, context, snapshot_id): volume_snapshot_name = ( self.configuration.volume_snapshot_name_template % snapshot_id) volume_snapshot_list = self.volume_api.get_all_snapshots( context, {'name': volume_snapshot_name}) volume_snapshot = None if len(volume_snapshot_list) == 1: volume_snapshot = volume_snapshot_list[0] elif len(volume_snapshot_list) > 1: LOG.error( "Expected only one volume snapshot in list with name " "'%(name)s', but got more than one in a result - " "'%(result)s'.", { 'name': volume_snapshot_name, 'result': volume_snapshot_list}) raise exception.ManilaException( _('Error. Ambiguous volume snaphots')) return volume_snapshot def _detach_volume(self, context, share, server_details): instance_id = server_details['instance_id'] @utils.synchronized( "generic_driver_attach_detach_%s" % instance_id, external=True) def do_detach(): attached_volumes = [vol.id for vol in self.compute_api.instance_volumes_list( self.admin_context, instance_id)] try: volume = self._get_volume(context, share['id']) except exception.VolumeNotFound: LOG.warning("Volume not found for share %s. " "Possibly already deleted.", share['id']) volume = None if volume and volume['id'] in attached_volumes: self.compute_api.instance_volume_detach( self.admin_context, instance_id, volume['id'] ) t = time.time() while time.time() - t < self.configuration.max_time_to_attach: volume = self.volume_api.get(context, volume['id']) if volume['status'] in (const.STATUS_AVAILABLE, const.STATUS_ERROR): break time.sleep(1) else: err_msg = { 'volume_id': volume['id'], 'max_time': self.configuration.max_time_to_attach } raise exception.ManilaException( _('Volume %(volume_id)s has not been detached in ' '%(max_time)ss. Giving up.') % err_msg) do_detach() def _allocate_container(self, context, share, snapshot=None): volume_snapshot = None if snapshot: volume_snapshot = self._get_volume_snapshot(context, snapshot['id']) volume = self.volume_api.create( context, share['size'], self.configuration.volume_name_template % share['id'], '', snapshot=volume_snapshot, volume_type=self.configuration.cinder_volume_type, availability_zone=share['availability_zone']) self.private_storage.update( share['id'], {'volume_id': volume['id']}) msg_error = _('Failed to create volume') msg_timeout = ( _('Volume has not been created in %ss. Giving up') % self.configuration.max_time_to_create_volume ) return self._wait_for_available_volume( volume, self.configuration.max_time_to_create_volume, msg_error=msg_error, msg_timeout=msg_timeout ) def _wait_for_available_volume(self, volume, timeout, msg_error, msg_timeout, expected_size=None): t = time.time() while time.time() - t < timeout: if volume['status'] == const.STATUS_AVAILABLE: if expected_size and volume['size'] != expected_size: LOG.debug("The volume %(vol_id)s is available but the " "volume size does not match the expected size. " "A volume resize operation may be pending. " "Expected size: %(expected_size)s, " "Actual size: %(volume_size)s.", dict(vol_id=volume['id'], expected_size=expected_size, volume_size=volume['size'])) else: break elif 'error' in volume['status'].lower(): raise exception.ManilaException(msg_error) time.sleep(1) volume = self.volume_api.get(self.admin_context, volume['id']) else: raise exception.ManilaException(msg_timeout) return volume def _deallocate_container(self, context, share): try: volume = self._get_volume(context, share['id']) except exception.VolumeNotFound: LOG.info("Volume not found. Already deleted?") volume = None if volume: if volume['status'] == 'in-use': raise exception.ManilaException( _('Volume is still in use and ' 'cannot be deleted now.')) self.volume_api.delete(context, volume['id']) t = time.time() while (time.time() - t < self.configuration.max_time_to_create_volume): try: volume = self.volume_api.get(context, volume['id']) except exception.VolumeNotFound: LOG.debug('Volume was deleted successfully') break time.sleep(1) else: raise exception.ManilaException( _('Volume have not been ' 'deleted in %ss. Giving up') % self.configuration.max_time_to_create_volume) def _update_share_stats(self): data = dict( share_backend_name=self.backend_name, storage_protocol='NFS_CIFS', reserved_percentage=self.configuration.reserved_share_percentage, reserved_snapshot_percentage=( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), ) super(GenericShareDriver, self)._update_share_stats(data) @ensure_server def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): return self._create_share( context, share, snapshot=snapshot, share_server=share_server, ) @ensure_server def extend_share(self, share, new_size, share_server=None): server_details = share_server['backend_details'] helper = self._get_helper(share) helper.disable_access_for_maintenance(server_details, share['name']) self._unmount_device(share, server_details) volume = self._get_volume(self.admin_context, share['id']) if int(new_size) > volume['size']: self._detach_volume(self.admin_context, share, server_details) volume = self._extend_volume(self.admin_context, volume, new_size) volume = self._attach_volume( self.admin_context, share, server_details['instance_id'], volume) self._resize_filesystem(server_details, volume, new_size=new_size) self._mount_device(share, server_details, volume) helper.restore_access_after_maintenance(server_details, share['name']) def _extend_volume(self, context, volume, new_size): self.volume_api.extend(context, volume['id'], new_size) msg_error = _('Failed to extend volume %s') % volume['id'] msg_timeout = ( _('Volume has not been extended in %ss. Giving up') % self.configuration.max_time_to_extend_volume ) return self._wait_for_available_volume( volume, self.configuration.max_time_to_extend_volume, msg_error=msg_error, msg_timeout=msg_timeout, expected_size=new_size ) @ensure_server def shrink_share(self, share, new_size, share_server=None): server_details = share_server['backend_details'] helper = self._get_helper(share) export_location = share['export_locations'][0]['path'] mount_path = helper.get_share_path_by_export_location( server_details, export_location) consumed_space = self._get_consumed_space(mount_path, server_details) LOG.debug("Consumed space on share: %s", consumed_space) if consumed_space >= new_size: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) volume = self._get_volume(self.admin_context, share['id']) helper.disable_access_for_maintenance(server_details, share['name']) self._unmount_device(share, server_details) try: self._resize_filesystem(server_details, volume, new_size=new_size) except exception.Invalid: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) except Exception as e: msg = _("Cannot shrink share: %s") % six.text_type(e) raise exception.Invalid(msg) finally: self._mount_device(share, server_details, volume) helper.restore_access_after_maintenance(server_details, share['name']) def _resize_filesystem(self, server_details, volume, new_size=None): check_command = ['sudo', 'fsck', '-pf', volume['mountpoint']] self._ssh_exec(server_details, check_command) command = ['sudo', 'resize2fs', volume['mountpoint']] if new_size: command.append("%sG" % six.text_type(new_size)) try: self._ssh_exec(server_details, command) except processutils.ProcessExecutionError as e: if e.stderr.find('New size smaller than minimum') != -1: msg = (_("Invalid 'new_size' provided: %s") % six.text_type(new_size)) raise exception.Invalid(msg) else: msg = _("Cannot resize file-system: %s") % six.text_type(e) raise exception.ManilaException(msg) def _is_share_server_active(self, context, share_server): has_active_share_server = ( share_server and share_server.get('backend_details') and self.service_instance_manager.ensure_service_instance( context, share_server['backend_details'])) return has_active_share_server def delete_share(self, context, share, share_server=None): helper = self._get_helper(share) if not self.driver_handles_share_servers: share_server = self.service_instance_manager.get_common_server() if self._is_share_server_active(context, share_server): helper.remove_exports( share_server['backend_details'], share['name']) self._unmount_device(share, share_server['backend_details']) self._detach_volume(self.admin_context, share, share_server['backend_details']) self._deallocate_container(self.admin_context, share) self.private_storage.delete(share['id']) def create_snapshot(self, context, snapshot, share_server=None): model_update = {} volume = self._get_volume( self.admin_context, snapshot['share_instance_id']) volume_snapshot_name = (self.configuration. volume_snapshot_name_template % snapshot['id']) volume_snapshot = self.volume_api.create_snapshot_force( self.admin_context, volume['id'], volume_snapshot_name, '') t = time.time() while time.time() - t < self.configuration.max_time_to_create_volume: if volume_snapshot['status'] == const.STATUS_AVAILABLE: break if volume_snapshot['status'] == const.STATUS_ERROR: raise exception.ManilaException(_('Failed to create volume ' 'snapshot')) time.sleep(1) volume_snapshot = self.volume_api.get_snapshot( self.admin_context, volume_snapshot['id']) self.private_storage.update( snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']}) model_update['provider_location'] = volume_snapshot['id'] else: raise exception.ManilaException( _('Volume snapshot have not been ' 'created in %ss. Giving up') % self.configuration.max_time_to_create_volume) return model_update def delete_snapshot(self, context, snapshot, share_server=None): volume_snapshot = self._get_volume_snapshot(self.admin_context, snapshot['id']) if volume_snapshot is None: return self.volume_api.delete_snapshot(self.admin_context, volume_snapshot['id']) t = time.time() while time.time() - t < self.configuration.max_time_to_create_volume: try: snapshot = self.volume_api.get_snapshot(self.admin_context, volume_snapshot['id']) except exception.VolumeSnapshotNotFound: LOG.debug('Volume snapshot was deleted successfully') self.private_storage.delete(snapshot['id']) break time.sleep(1) else: raise exception.ManilaException( _('Volume snapshot have not been ' 'deleted in %ss. Giving up') % self.configuration.max_time_to_create_volume) @ensure_server def ensure_share(self, context, share, share_server=None): helper = self._get_helper(share) volume = self._get_volume(context, share['id']) if volume: volume = self._attach_volume( context, share, share_server['backend_details']['instance_id'], volume) self._mount_device(share, share_server['backend_details'], volume) helper.create_exports( share_server['backend_details'], share['name'], recreate=True) @ensure_server def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): self._get_helper(share).update_access(share_server['backend_details'], share['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules) def _get_helper(self, share): helper = self._helpers.get(share['share_proto']) if helper: return helper else: raise exception.InvalidShare( reason="Wrong, unsupported or disabled protocol")
Apache License 2.0
pyxll/remote-notebook
pyxll_notebook/client/kernel.py
Kernel.run_notebook
python
async def run_notebook(self, path): url = self.__url + "/api/contents/" + path async with aiohttp.ClientSession(cookie_jar=self.__authenticator.cookie_jar) as session: async with session.get(url, headers=self.__authenticator.headers) as response: try: await response.read() response.raise_for_status() file = await response.json() except Exception: self.__authenticator.reset() raise await self.execute(f"__pyxll_notebook_session__ = '{self.__session_id}'") await self.execute(f"__pyxll_pickle_protocol__ = {pickle.HIGHEST_PROTOCOL}") cells = file["content"]["cells"] code = [c["source"] for c in cells if len(c["source"]) > 0 and c["cell_type"] == "code"] for c in code: await self.execute(c)
Run all cells in a notebook
https://github.com/pyxll/remote-notebook/blob/321c3fd6978a7e6a7c8500cb7d6d1727d21ecbee/pyxll_notebook/client/kernel.py#L100-L121
from .handler import Handler from .events import MessageReplyEvent from ..errors import * from typing import * import datetime as dt import urllib.parse import websockets import logging import aiohttp import asyncio import pickle import json import uuid import os import re _log = logging.getLogger(__name__) class Kernel: default_handler_cls = Handler message_protocol_version = "5.0" def __init__(self, url, authenticator, handler=None): if handler is None: handler = self.default_handler_cls(self) self.__url = url self.__handler = handler self.__kernel = None self.__ws = None self.__session_id = uuid.uuid1().hex self.__username = os.getlogin() self.__kernel_url = None self.__ws_url = None self.__authenticator = authenticator self.__message_events: Dict[str, MessageReplyEvent] = {} async def start(self): url = self.__url ws_url = None if not self.__authenticator.authenticated: await self.__authenticator.authenticate() kernels_url = url + "/api/kernels" async with aiohttp.ClientSession(cookie_jar=self.__authenticator.cookie_jar) as session: async with session.post(kernels_url, headers=self.__authenticator.headers) as response: try: await response.read() response.raise_for_status() except Exception: self.__authenticator.reset() raise if not re.match(r"^application/(?:[\w.+-]+?\+)?json", response.content_type, re.IGNORECASE): raise KernelStartError("Response ito kernel start request is not JSON data. " "Check the notebook server is running.") kernel = await response.json() if not "id" in kernel: raise KernelStartError(kernel.get("message")) kernel_id = kernel["id"] _log.debug(f"Started new kernel {kernel_id}.") self.__kernel = kernel self.__id = kernel_id self.__kernel_url = kernels_url + "/" + self.__kernel["id"] if ws_url is None: u = urllib.parse.urlparse(url) scheme = "wss" if u.scheme == "https" else "ws" port = f":{u.port}" if u.port else "" ws_url = f"{scheme}://{u.hostname}{port}{u.path}" ws_headers = dict(self.__authenticator.headers) cookies = self.__authenticator.cookie_jar.filter_cookies(kernels_url) cookies = [f"{k}={c.value};" for k, c in cookies.items()] ws_headers["Cookie"] = " ".join(cookies) self.__ws_url = f"{ws_url}/api/kernels/{kernel_id}/channels?session_id={self.__session_id}" self.__ws = await websockets.connect(self.__ws_url, max_size=None, extra_headers=ws_headers) loop = asyncio.get_event_loop() loop.create_task(self.__poll_ws())
MIT License
trustyjaid/trusty-cogs
hockey/gamedaychannels.py
GameDayChannels.gdc_delete
python
async def gdc_delete(self, ctx: commands.Context) -> None: await self.delete_gdc(ctx.guild) await ctx.send(_("Game day channels deleted."))
Delete all current game day channels for the server
https://github.com/trustyjaid/trusty-cogs/blob/41978fa07fc2964f0dec8c0bbfc5d112802df321/hockey/gamedaychannels.py#L101-L106
import logging from datetime import datetime from typing import Optional import discord from redbot.core import commands from redbot.core.i18n import Translator from redbot.core.utils.chat_formatting import humanize_list from redbot.core.utils.menus import start_adding_reactions from .abc import MixinMeta from .constants import TEAMS from .game import Game from .helper import HockeyStates, HockeyTeams, utc_to_local log = logging.getLogger("red.trusty-cogs.Hockey") _ = Translator("Hockey", __file__) class GameDayChannels(MixinMeta): @commands.group() @commands.mod_or_permissions(manage_channels=True) @commands.guild_only() async def gdc(self, ctx: commands.Context) -> None: @gdc.command(name="settings") async def gdc_settings(self, ctx: commands.Context) -> None: async with ctx.typing(): guild = ctx.message.guild create_channels = await self.config.guild(guild).create_channels() if create_channels is None: return team = await self.config.guild(guild).gdc_team() if team is None: team = "None" channels = await self.config.guild(guild).gdc() category = guild.get_channel(await self.config.guild(guild).category()) delete_gdc = await self.config.guild(guild).delete_gdc() game_states = await self.config.guild(guild).gdc_state_updates() if category is not None: category = category.name if channels is not None: created_channels = "" for channel in channels: chn = guild.get_channel(channel) if chn is not None: created_channels += chn.mention else: created_channels += "<#{}>\n".format(channel) if len(channels) == 0: created_channels = "None" else: created_channels = "None" if not ctx.channel.permissions_for(guild.me).embed_links: msg = _( "```GDC settings for {guild}\nCreate Game Day Channels: {create_channels}" "\nDelete Game Day Channels: {delete_gdc}\nTeam: {team}\n" "Current Channels: {created_channels}\nDefault Game State: {game_states}\n```" ).format( guild=guild.name, create_channels=create_channels, delete_gdc=delete_gdc, team=team, created_channels=created_channels, game_states=humanize_list(game_states), ) await ctx.send(msg) if ctx.channel.permissions_for(guild.me).embed_links: em = discord.Embed(title=_("GDC settings for ") + guild.name) em.colour = await ctx.embed_colour() em.add_field(name=_("Create Game Day Channels"), value=str(create_channels)) em.add_field(name=_("Delete Game Day Channels"), value=str(delete_gdc)) em.add_field(name=_("Team"), value=str(team)) em.add_field(name=_("Current Channels"), value=created_channels[:1024]) if not game_states: game_states = ["None"] em.add_field(name=_("Default Game States"), value=humanize_list(game_states)) await ctx.send(embed=em) @gdc.command(name="delete")
MIT License
wright-group/wrighttools
WrightTools/_open.py
open
python
def open(filepath, edit_local=False): filepath = os.fspath(filepath) ds = np.DataSource(None) if edit_local is False: tf = tempfile.mkstemp(prefix="", suffix=".wt5") with _open(tf[1], "w+b") as tff: with ds.open(str(filepath), "rb") as f: tff.write(f.read()) filepath = tf[1] f = h5py.File(filepath, "r") class_name = f["/"].attrs["class"] name = f["/"].attrs["name"] f.close() if class_name == "Data": obj = wt_data.Data(filepath=str(filepath), name=name, edit_local=True) elif class_name == "Collection": obj = wt_collection.Collection(filepath=str(filepath), name=name, edit_local=True) else: obj = wt_group.Group(filepath=str(filepath), name=name, edit_local=True) if edit_local is False: setattr(obj, "_tmpfile", tf) weakref.finalize(obj, obj.close) return obj
Open any wt5 file, returning the top-level object (data or collection). Parameters ---------- filepath : path-like Path to file. Can be either a local or remote file (http/ftp). Can be compressed with gz/bz2, decompression based on file name. edit_local : boolean (optional) If True, the file itself will be opened for editing. Otherwise, a copy will be created. Default is False. Returns ------- WrightTools Collection or Data Root-level object in file.
https://github.com/wright-group/wrighttools/blob/7531965dec9a8f52557fbd3c60e12dcd3b6e000b/WrightTools/_open.py#L30-L70
import os import tempfile import weakref import h5py import numpy as np from . import collection as wt_collection from . import data as wt_data from . import _group as wt_group __all__ = ["open"] _open = open
MIT License
vemel/mypy_boto3_builder
mypy_boto3_builder/import_helpers/import_record.py
ImportRecord.is_standalone
python
def is_standalone(self) -> bool: if not self.name or self.fallback: return True return False
Whether import record should not be grouped.
https://github.com/vemel/mypy_boto3_builder/blob/07c9a4273404ea0bb8aa6c14b9ed8b3af1f1a3dd/mypy_boto3_builder/import_helpers/import_record.py#L163-L170
from __future__ import annotations from mypy_boto3_builder.constants import MODULE_NAME, TYPE_DEFS_NAME from mypy_boto3_builder.import_helpers.import_string import ImportString class ImportRecord: builtins_import_string = ImportString("builtins") third_party_import_strings = ( ImportString("boto3"), ImportString("botocore"), ) def __init__( self, source: ImportString, name: str = "", alias: str = "", min_version: tuple[int, ...] = (3, 8), fallback: ImportRecord | None = None, ) -> None: self.source = source self.name = name self.alias = alias self.min_version = min_version self.fallback = fallback def __bool__(self) -> bool: return bool(self.source) @classmethod def empty(cls) -> ImportRecord: return cls(ImportString.empty()) def render(self) -> str: if self.name and self.alias: return f"from {self.source} import {self.name} as {self.alias}" if self.name: return f"from {self.source} import {self.name}" if self.alias: return f"import {self.source} as {self.alias}" if self.source: return f"import {self.source}" return "" def __str__(self) -> str: return self.render() def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other: object) -> bool: if not isinstance(other, ImportRecord): raise ValueError(f"Cannot compare ImportString with {other}") return str(self) == str(other) def __ne__(self, other: object) -> bool: if not isinstance(other, ImportRecord): raise ValueError(f"Cannot compare ImportString with {other}") return not self == other def __gt__(self, other: ImportRecord) -> bool: if self.fallback is not None and other.fallback is None: return True if other.fallback is not None and self.fallback is None: return False if self.source == other.source: return self.name > other.name if self.is_local() and not other.is_local(): return True if other.is_local() and not self.is_local(): return False if self.is_third_party() and not other.is_third_party(): return True if other.is_third_party() and not self.is_third_party(): return False return self.source > other.source def __lt__(self, other: "ImportRecord") -> bool: return not self > other def get_local_name(self) -> str: return self.alias or self.name or self.source.render() def is_builtins(self) -> bool: return self.source.startswith(self.builtins_import_string) def is_type_defs(self) -> bool: return self.source.parts[-1] == TYPE_DEFS_NAME def is_third_party(self) -> bool: for third_party_import_string in self.third_party_import_strings: if self.source.startswith(third_party_import_string): return True return False def is_local(self) -> bool: if not self.source: return False if self.source.master_name.startswith(MODULE_NAME): return True if self.is_type_defs(): return True return False def get_external(self, module_name: str) -> "ImportRecord": return self
MIT License
fhpythonutils/cli2gui
cli2gui/application/widgets.py
Widgets.dropdown
python
def dropdown(self, key: str, argItems: list[str]) -> Element: return self.pySimpleGui.Drop( tuple(argItems), size=self.sizes["input_size"], pad=self.sizes["padding"], key=key, )
Return a dropdown.
https://github.com/fhpythonutils/cli2gui/blob/73c3755a66c5cb9bf25ced2125c08e1205a52b12/cli2gui/application/widgets.py#L101-L108
from __future__ import annotations import io from typing import Any from PIL import Image, ImageTk from PySimpleGUI import Element class Widgets: def __init__(self, sizes: dict[str, Any], pySimpleGui: Any): self.sizes = sizes self.pySimpleGui = pySimpleGui """Utility functions that manipulate images and text. """ def getImgData(self, imagePath: str, first: bool = False) -> bytes: img = Image.open(imagePath) img.thumbnail((self.sizes["title_size"] * 3, self.sizes["title_size"] * 3)) if first: bio = io.BytesIO() img.save(bio, format="PNG") del img return bio.getvalue() return ImageTk.PhotoImage(img) def stringTitlecase(self, string: str, splitStr: str = "ALL"): _ = self titleCase = "" if len(string) > 0: if splitStr == "ALL": titleCase = " ".join( (part[0].upper() + part[1:]) for part in string.replace("-", "_").split("_") ) else: titleCase = " ".join( (part[0].upper() + part[1:]) for part in string.split(splitStr) ) return titleCase def stringSentencecase(self, string: str) -> str: _ = self if string: return string[0].upper() + string[1:] return "" """Individual widgets """ def inputText(self, key: str) -> Element: return self.pySimpleGui.InputText( size=self.sizes["input_size"], pad=self.sizes["padding"], key=key, font=("sans", self.sizes["text_size"]), ) def check(self, key: str) -> Element: return self.pySimpleGui.Check( "", size=self.sizes["input_size"], pad=self.sizes["padding"], key=key ) def button(self, text: str) -> Element: return self.pySimpleGui.Button( text, size=self.sizes["button"], pad=self.sizes["padding"], font=("sans", self.sizes["text_size"]), ) def label(self, text: str, font: int = 11) -> Element: return self.pySimpleGui.Text( text, size=( int(self.sizes["label_size"][0] * 11 / font), self.sizes["label_size"][1], ), pad=self.sizes["padding"], font=("sans", font), )
MIT License
joshuaskelly/quake-cli-tools
qcli/bsp2svg/converter.py
simplify_number
python
def simplify_number(number): return int(number) if int(number) == number else number
Will convert the given number to an integer if number has not fractional part. Args: number: The number to convert to an integer. Returns: A number.
https://github.com/joshuaskelly/quake-cli-tools/blob/3322e71724a7a13f899281e7acae6c9f2bf2ad55/qcli/bsp2svg/converter.py#L9-L19
import os import svgwrite from progress.bar import IncrementalBar from .api import Bsp
MIT License
dmlc/dgl
examples/pytorch/ogb/deepwalk/model.py
init_empty_grad
python
def init_empty_grad(emb_dimension, walk_length, batch_size): grad_u = torch.zeros((batch_size * walk_length, emb_dimension)) grad_v = torch.zeros((batch_size * walk_length, emb_dimension)) return grad_u, grad_v
initialize gradient matrix
https://github.com/dmlc/dgl/blob/8341244a2dac850bd0c1153c7641c3b8a2bbfc30/examples/pytorch/ogb/deepwalk/model.py#L93-L98
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init import random import numpy as np import dgl.multiprocessing as mp from dgl.multiprocessing import Queue def init_emb2pos_index(walk_length, window_size, batch_size): idx_list_u = [] idx_list_v = [] for b in range(batch_size): for i in range(walk_length): for j in range(i-window_size, i): if j >= 0: idx_list_u.append(j + b * walk_length) idx_list_v.append(i + b * walk_length) for j in range(i + 1, i + 1 + window_size): if j < walk_length: idx_list_u.append(j + b * walk_length) idx_list_v.append(i + b * walk_length) index_emb_posu = torch.LongTensor(idx_list_u) index_emb_posv = torch.LongTensor(idx_list_v) return index_emb_posu, index_emb_posv def init_emb2neg_index(walk_length, window_size, negative, batch_size): idx_list_u = [] for b in range(batch_size): for i in range(walk_length): for j in range(i-window_size, i): if j >= 0: idx_list_u += [i + b * walk_length] * negative for j in range(i+1, i+1+window_size): if j < walk_length: idx_list_u += [i + b * walk_length] * negative idx_list_v = list(range(batch_size * walk_length)) * negative * window_size * 2 random.shuffle(idx_list_v) idx_list_v = idx_list_v[:len(idx_list_u)] index_emb_negu = torch.LongTensor(idx_list_u) index_emb_negv = torch.LongTensor(idx_list_v) return index_emb_negu, index_emb_negv def init_weight(walk_length, window_size, batch_size): weight = [] for b in range(batch_size): for i in range(walk_length): for j in range(i-window_size, i): if j >= 0: weight.append(1. - float(i - j - 1)/float(window_size)) for j in range(i + 1, i + 1 + window_size): if j < walk_length: weight.append(1. - float(j - i - 1)/float(window_size)) return torch.Tensor(weight).unsqueeze(1)
Apache License 2.0
yxgeee/bake
imagenet/pycls/core/builders.py
register_model
python
def register_model(name, ctor): _models[name] = ctor
Registers a model dynamically.
https://github.com/yxgeee/bake/blob/07c4f668ea19311d5b50121026e73d2f035d5765/imagenet/pycls/core/builders.py#L56-L58
import torch from pycls.core.config import cfg from pycls.models.anynet import AnyNet from pycls.models.effnet import EffNet from pycls.models.resnet import ResNet from pycls.models.mobilenetv2 import MobileNetV2 from pycls.models.resnest import ResNeSt _models = { "anynet": AnyNet, "effnet": EffNet, "resnet": ResNet, "mobilenetv2": MobileNetV2, "resnest": ResNeSt, } _loss_funs = {"cross_entropy": torch.nn.CrossEntropyLoss} def get_model(): err_str = "Model type '{}' not supported" assert cfg.MODEL.TYPE in _models.keys(), err_str.format(cfg.MODEL.TYPE) return _models[cfg.MODEL.TYPE] def get_loss_fun(): err_str = "Loss function type '{}' not supported" assert cfg.MODEL.LOSS_FUN in _loss_funs.keys(), err_str.format(cfg.TRAIN.LOSS) return _loss_funs[cfg.MODEL.LOSS_FUN] def build_model(): return get_model()() def build_loss_fun(): return get_loss_fun()()
MIT License
kjosib/booze-tools
boozetools/parsing/automata.py
find_conflicts
python
def find_conflicts(graph, follow_sets, grammar) -> List[ConflictData]: result = [] for q, state in enumerate(graph): degree = collections.Counter(state.shift.keys()) for rule_id in state.reduce: for token in follow_sets[q,rule_id]: prefer_shift = token in state.shift and grammar.decide_shift_reduce(token, rule_id) == context_free.RIGHT if not prefer_shift: degree[token] += 1 conflicted_tokens = set(token for token, count in degree.items() if count > 1) conflict = ConflictData({token: set() for token in conflicted_tokens}, {}) for rule_id in state.reduce: contribution = conflicted_tokens & follow_sets[q,rule_id] conflict.rules[rule_id] = contribution for token in contribution: conflict.tokens[token].add(rule_id) result.append(conflict) return result
This drives one of the central ideas of the Minimal-LR(1) algorithm: Learn which tokens are involved in conflicts, and which rules contribute to those conflicts for each token (as known to LALR). Subtleties: 1. If an S/R conflict is DECLARED to shift, then it does not impugn the token, but the token still refers to the rule in case some other rule MAY reduce. This routine (and the stuff that uses it) is coded with the idea that the grammar may leave certain things deliberately non-deterministic. Wherever that is the case, these algorithms will respect it. 2. There is a way to improve the treatment of R/R conflicts if it is known in advance that the table will be used deterministically, or if the R/R is resolved by rule precedence. It involves a pass over the LR(1) item cores considering the groups that eventually lead to a R/R conflict (they have the same suffix and follower): among those groups only the "winning" reduction item needs to stay in the core. This "normalizes" the LR(1) cores so that potentially fewer distinct ones might be generated. Alas, idea #2 is not yet implemented. Complex R/R conflicts may still lead to more states than strictly necessary for a deterministic table. In practice, this is unlikely to be a real problem: deterministic tables are usually made from grammars with few LALR conflicts, and in the non-deterministic case nothing is wasted. Nevertheless, this remains an avenue for improvement.
https://github.com/kjosib/booze-tools/blob/ed3333643e0df99231202c024da8c86a9bb5b2bc/boozetools/parsing/automata.py#L580-L623
import collections, sys from typing import NamedTuple, Iterable, TypeVar, Generic, List, Dict, Set, Tuple from ..support import foundation, pretty, interfaces from . import context_free T = TypeVar('T') class PurityError(ValueError): class HFA(Generic[T]): graph: List[T] initial: List[int] accept: List[int] grammar: context_free.ContextFreeGrammar bft: foundation.BreadthFirstTraversal def __init__(self, *, graph, initial, accept, grammar, bft): self.graph, self.initial,self.accept, self.grammar, self.bft = graph, initial, accept, grammar, bft def display_situation(self, q: int, lookahead: str): head, *tail = self.bft.shortest_path_to(q) print('==============\nIn language %r, consider:' % self.grammar.start[self.initial.index(head)]) print('\t' + ' '.join(map(self.bft.breadcrumbs.__getitem__, tail)), pretty.DOT, lookahead) def earley_core(self, q:int): return sorted(set((r,p) for r, p, *_ in self.bft.traversal[q])) def traverse(self, q: int, symbols:Iterable) -> int: for s in symbols: q = self.graph[q].shift[s] return q def make_dot_file(self, path): with open(path, 'w') as fh: fh.write("digraph {\n") for q, state in enumerate(self.graph): sym = self.bft.breadcrumbs[q] or '' sym = sym.replace('"', r'\"') if sym.endswith('\\'): sym = sym + ' ' fh.write("%d [label=\"%d: %s\"]\n"%(q, q, sym)) for i in state.shift.values(): fh.write("\t%d -> %d\n"%(q,i)) fh.write('}\n') pass def trial_parse(self, sentence: Iterable): language_index = 0 initial, accept = self.initial[language_index], self.accept[language_index] def reduce(stack, rule_id): rule = self.grammar.rules[rule_id] for i in range(len(rule.rhs)): stack = stack[1] return self.graph[stack[0]].shift[rule.lhs], stack root = (initial, None) alive = [root] for lexeme in sentence: next = [] for stack in alive: state = self.graph[stack[0]] if lexeme in state.shift: next.append((state.shift[lexeme], stack)) for rule_id in state.reductions_before(lexeme): alive.append(reduce(stack, rule_id)) alive = next if not alive: raise interfaces.GeneralizedParseError("Parser died midway at something ungrammatical.") for stack in alive: q = stack[0] if q == accept: return True for rule_id in self.graph[q].reductions_before(interfaces.END_OF_TOKENS): alive.append(reduce(stack, rule_id)) raise interfaces.GeneralizedParseError("Parser recognized a viable prefix, but not a complete sentence.") class LR0_State(NamedTuple): shift: Dict[str, int] reduce: Set[int] def reductions_before(self, lexeme): return self.reduce def lr0_construction(grammar: context_free.ContextFreeGrammar) -> HFA[LR0_State]: def build_state(core: frozenset): step, reduce = collections.defaultdict(set), set() def visit(item): rule_id, position = item if position < len(RHS[rule_id]): next_symbol = RHS[rule_id][position] step[next_symbol].add((rule_id,position+1)) return symbol_front.get(next_symbol) elif rule_id<len(grammar.rules): reduce.add(rule_id) foundation.transitive_closure(core, visit) graph.append(LR0_State(shift=ure.find_shifts(step), reduce=reduce, )) assert grammar.start RHS = grammar.augmented_rules() def front(rule_ids): return frozenset([(r,0) for r in rule_ids]) symbol_front = {symbol: front(rule_ids) for symbol, rule_ids in grammar.symbol_rule_ids.items()} bft = foundation.BreadthFirstTraversal() ure = UnitReductionEliminator(grammar, bft) graph = [] initial = [bft.lookup(front([rule_id])) for rule_id in grammar.initial()] bft.execute(build_state) accept = [graph[qi].shift[language] for qi, language in zip(initial, grammar.start)] return HFA(graph=graph, initial=initial, accept=accept, grammar=grammar, bft=bft) class UnitReductionEliminator: def __init__(self, grammar: context_free.ContextFreeGrammar, bft: foundation.BreadthFirstTraversal): self.bft = bft self.unit_rules = {} self.eligible_rhs = set() for rule_id, rule in enumerate(grammar.rules): if rule.is_rename(): self.unit_rules[rule_id] = rule.lhs self.eligible_rhs.add(rule.rhs[0]) def find_shifts(self, step: dict) -> dict: replace = {} for symbol in self.eligible_rhs & step.keys(): each_item = iter(step[symbol]) rule_id = next(each_item)[0] if rule_id in self.unit_rules and all(item[0] == rule_id for item in each_item): replace[symbol] = self.unit_rules[rule_id] shifts = {} for symbol in step.keys(): proxy = symbol while proxy in replace: proxy = replace[proxy] shifts[symbol] = self.bft.lookup(frozenset(step[proxy]), breadcrumb=proxy) return shifts class LA_State(NamedTuple): shift: Dict[str, int] reduce: Dict[str, List[int]] def reductions_before(self, lexeme): return self.reduce.get(lexeme, ()) def lalr_construction(grammar: context_free.ContextFreeGrammar) -> HFA[LA_State]: lr0 = lr0_construction(grammar) token_sets, follow = lalr_first_and_follow(lr0) def make_lalr_state(q:int, node:LR0_State) -> LA_State: reduce = {} for rule_id in node.reduce: for token in token_sets[follow[q, rule_id]]: assert isinstance(token, str) if token not in reduce: reduce[token] = [rule_id] else: reduce[token].append(rule_id) step = reachable(node.shift, reduce, grammar) return LA_State(step, reduce) return HFA( graph=[make_lalr_state(q, node) for q, node in enumerate(lr0.graph)], initial=lr0.initial, accept=lr0.accept, grammar=grammar, bft=lr0.bft ) def lalr_first_and_follow(lr0:HFA[LR0_State]) -> Tuple[list, dict]: grammar = lr0.grammar terminals = grammar.apparent_terminals() token_sets = [terminals.intersection(node.shift.keys()) for node in lr0.graph] for q in lr0.accept: token_sets[q].add(interfaces.END_OF_TOKENS) follow = {} for q, node in enumerate(lr0.graph): for rule_id in node.reduce: follow[q,rule_id] = foundation.allocate(token_sets, set()) inflow:List[Set[int]] = [set() for _ in token_sets] for q, node in enumerate(lr0.graph): for symbol, target in node.shift.items(): if symbol in grammar.symbol_rule_ids: for rule_id in grammar.symbol_rule_ids[symbol]: rule_end = lr0.traverse(q, grammar.rules[rule_id].rhs) if (rule_end, rule_id) in follow: inflow[rule_end].add(target) inflow[follow[rule_end, rule_id]].add(target) for component in foundation.strongly_connected_components_by_tarjan(inflow): tokens = set() for k in component: tokens.update(token_sets[k]) tokens.update(*[token_sets[j] for j in inflow[k]]) token_sets[k] = tokens return token_sets, follow def canonical_lr1(grammar: context_free.ContextFreeGrammar) -> HFA[LA_State]: def front(symbol, follower, goto_transparent, iso_q): iso_state = lr0.graph[iso_q] goto_state = lr0.graph[iso_state.shift[symbol]] after = set(goto_state.shift.keys()) & terminals if goto_transparent: after.add(follower) items = [] for rule_id in grammar.symbol_rule_ids[symbol]: items.extend((rule_id, 0, lookahead) for lookahead in after) return items lr0 = lr0_construction(grammar) terminals = grammar.apparent_terminals() return abstract_lr1_construction( grammar, front = front, note_reduce = lambda reduce, follower, rule_id, iso_q: reduce[follower].append(rule_id), initial_item = lambda rule_id: (rule_id, 0, interfaces.END_OF_TOKENS), lr0_catalog=lr0.bft.catalog, ) def abstract_lr1_construction( grammar: context_free.ContextFreeGrammar, *, front, note_reduce, initial_item, lr0_catalog, ) -> HFA[LA_State]: def build_state(core: frozenset): iso_q = lr0_catalog[frozenset((r, p) for (r, p, f) in core)] step, reduce = collections.defaultdict(set), collections.defaultdict(list) def visit(item): rule_id, position, follower = item rhs = RHS[rule_id] if position < len(rhs): next_symbol = rhs[position] step[next_symbol].add((rule_id, position+1, follower)) if next_symbol in grammar.symbol_rule_ids: return front(next_symbol, follower, transparent(rule_id, position + 1), iso_q) elif rule_id<len(grammar.rules): note_reduce(reduce, follower, rule_id, iso_q) foundation.transitive_closure(core, visit) graph.append(LA_State(shift=ure.find_shifts(reachable(step, reduce, grammar)), reduce=reduce,)) RHS = grammar.augmented_rules() transparent = find_transparent(grammar.find_epsilon(), RHS) bft = foundation.BreadthFirstTraversal() ure = UnitReductionEliminator(grammar, bft) graph = [] initial = [bft.lookup(frozenset([initial_item(rule_id)])) for rule_id in grammar.initial()] bft.execute(build_state) accept = [graph[qi].shift[language] for qi, language in zip(initial, grammar.start)] return HFA(graph=graph, initial=initial, accept=accept, grammar=grammar, bft=bft) def find_transparent(epsilon:Set[str], right_hand_sides:List[List[str]]): def transparency_threshold(rhs): position = len(rhs) while position > 0 and rhs[position-1] in epsilon: position -= 1 return position thresholds = list(map(transparency_threshold, right_hand_sides)) return lambda rule_id, position: thresholds[rule_id] <= position def minimal_lr1(grammar: context_free.ContextFreeGrammar) -> HFA[LA_State]: def front(symbol, follower, goto_transparent, iso_q): isostate = lr0.graph[iso_q] items = [] goto_q = isostate.shift[symbol] goto_conflict = conflict_data[goto_q].tokens.keys() for sub_rule_id in grammar.symbol_rule_ids[symbol]: reach = lr0.traverse(iso_q, grammar.rules[sub_rule_id].rhs) if follower is None: items.append((sub_rule_id, 0, None)) reach_conflict = conflict_data[reach].rules.get(sub_rule_id, EMPTY) possible_follow = reach_conflict & token_sets[goto_q] if reach != iso_q: possible_follow -= goto_conflict for token in possible_follow: items.append((sub_rule_id, 0, token)) else: if follower in conflict_data[reach].tokens and goto_transparent: assert follower in goto_conflict items.append((sub_rule_id, 0, follower)) return items def note_reduce(reduce, follower, rule_id, iso_q): if follower is None: for t in token_sets[follow[iso_q, rule_id]] - conflict_data[iso_q].rules[rule_id]: assert t not in reduce reduce[t] = [rule_id] else: assert follower in conflict_data[iso_q].rules[rule_id] if follower in reduce: assert rule_id not in reduce[follower] reduce[follower].append(rule_id) else: reduce[follower] = [rule_id] EMPTY = frozenset() lr0 = lr0_construction(grammar) token_sets, follow = lalr_first_and_follow(lr0) conflict_data = find_conflicts(lr0.graph, {(q,r):token_sets[i] for (q,r),i in follow.items()}, grammar) return abstract_lr1_construction( grammar, front = front, note_reduce = note_reduce, initial_item = lambda rule_id: (rule_id, 0, None), lr0_catalog=lr0.bft.catalog, ) def reachable(step:dict, reduce:dict, grammar) -> dict: for token, rule_id_list in list(reduce.items()): if token not in step: continue decide = [grammar.decide_shift_reduce(token, rule_id) for rule_id in rule_id_list] ways = set(decide) assert context_free.BOGUS not in ways, "This is guaranteed by grammar.validate(...), called earlier." if len(ways) == 1: decision = ways.pop() if decision == context_free.LEFT: del step[token] elif decision == context_free.RIGHT: del reduce[token] elif decision == context_free.NONASSOC: del step[token] reduce[token] = () else: assert decision is None elif ways == {context_free.LEFT, context_free.NONASSOC}: del step[token] reduce[token] = tuple(r for r,d in zip(rule_id_list, decide) if d == context_free.LEFT) elif ways == {context_free.RIGHT, None}: reduce[token] = tuple(r for r, d in zip(rule_id_list, decide) if d != context_free.RIGHT) else: print("Fair Warning:", token, "triggers a bizarre operator-precedence corner case.", file=sys.stderr) return step class ConflictData(NamedTuple): tokens: Dict[str, Set[int]] rules: Dict[int, Set[str]]
MIT License
html5rocks/updates.html5rocks.com
lib/pygments/lexer.py
Lexer.add_filter
python
def add_filter(self, filter_, **options): if not isinstance(filter_, Filter): filter_ = get_filter_by_name(filter_, **options) self.filters.append(filter_)
Add a new stream filter to this lexer.
https://github.com/html5rocks/updates.html5rocks.com/blob/144b5ff9a36a7d37924d30c14810a0debdbd76ff/lib/pygments/lexer.py#L99-L105
import re try: set except NameError: from sets import Set as set from pygments.filter import apply_filters, Filter from pygments.filters import get_filter_by_name from pygments.token import Error, Text, Other, _TokenType from pygments.util import get_bool_opt, get_int_opt, get_list_opt, make_analysator __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', 'LexerContext', 'include', 'flags', 'bygroups', 'using', 'this'] _default_analyse = staticmethod(lambda x: 0.0) class LexerMeta(type): def __new__(cls, name, bases, d): if 'analyse_text' in d: d['analyse_text'] = make_analysator(d['analyse_text']) return type.__new__(cls, name, bases, d) class Lexer(object): name = None aliases = [] filenames = [] alias_filenames = [] mimetypes = [] __metaclass__ = LexerMeta def __init__(self, **options): self.options = options self.stripnl = get_bool_opt(options, 'stripnl', True) self.stripall = get_bool_opt(options, 'stripall', False) self.tabsize = get_int_opt(options, 'tabsize', 0) self.encoding = options.get('encoding', 'latin1') self.filters = [] for filter_ in get_list_opt(options, 'filters', ()): self.add_filter(filter_) def __repr__(self): if self.options: return '<pygments.lexers.%s with %r>' % (self.__class__.__name__, self.options) else: return '<pygments.lexers.%s>' % self.__class__.__name__
Apache License 2.0
petercorke/spatialmath-python
spatialmath/geom2d.py
Polygon2.__init__
python
def __init__(self, vertices=None): if isinstance(vertices, (list, tuple)): vertices = np.array(vertices).T elif isinstance(vertices, np.ndarray): if vertices.shape[0] != 2: raise ValueError('ndarray must be 2xN') elif vertices is None: return else: raise TypeError('expecting list of 2-tuples or ndarray(2,N)') vertices = np.hstack((vertices, vertices[:, 0:1])) self.path = Path(vertices.T, closed=True) self.path0 = self.path
Create planar polygon from vertices :param vertices: vertices of polygon, defaults to None :type vertices: ndarray(2, N), optional Create a polygon from a set of points provided as columns of the 2D array ``vertices``. A closed polygon is created so the last vertex should not equal the first. Example: .. runblock:: pycon >>> from spatialmath import Polygon2 >>> p = Polygon2([[1, 3, 2], [2, 2, 4]]) .. warning:: The points must be sequential around the perimeter and counter clockwise. .. note:: The polygon is represented by a Matplotlib ``Path``
https://github.com/petercorke/spatialmath-python/blob/a3116021b8bd95b4f6015b180053941599ebd6cc/spatialmath/geom2d.py#L27-L68
from functools import reduce from spatialmath.base.graphics import axes_logic from spatialmath import base, SE2 import matplotlib.pyplot as plt from matplotlib.path import Path from matplotlib.patches import PathPatch from matplotlib.transforms import Affine2D from matplotlib.collections import PatchCollection import numpy as np class Polygon2:
MIT License
rouge8/20questions
web/session.py
Session._generate_session_id
python
def _generate_session_id(self): while True: rand = os.urandom(16) now = time.time() secret_key = self._config.secret_key session_id = sha1("%s%s%s%s" %(rand, now, utils.safestr(web.ctx.ip), secret_key)) session_id = session_id.hexdigest() if session_id not in self.store: break return session_id
Generate a random id for session
https://github.com/rouge8/20questions/blob/8845184114109c7e34eecd69c2689d6ef6fc3084/web/session.py#L113-L124
import os, time, datetime, random, base64 try: import cPickle as pickle except ImportError: import pickle try: import hashlib sha1 = hashlib.sha1 except ImportError: import sha sha1 = sha.new import utils import webapi as web __all__ = [ 'Session', 'SessionExpired', 'Store', 'DiskStore', 'DBStore', ] web.config.session_parameters = utils.storage({ 'cookie_name': 'webpy_session_id', 'cookie_domain': None, 'timeout': 86400, 'ignore_expiry': True, 'ignore_change_ip': True, 'secret_key': 'fLjUfxqXtfNoIldA0A0J', 'expired_message': 'Session expired', }) class SessionExpired(web.HTTPError): def __init__(self, message): web.HTTPError.__init__(self, '200 OK', {}, data=message) class Session(utils.ThreadedDict): def __init__(self, app, store, initializer=None): self.__dict__['store'] = store self.__dict__['_initializer'] = initializer self.__dict__['_last_cleanup_time'] = 0 self.__dict__['_config'] = utils.storage(web.config.session_parameters) if app: app.add_processor(self._processor) def _processor(self, handler): self._cleanup() self._load() try: return handler() finally: self._save() def _load(self): cookie_name = self._config.cookie_name cookie_domain = self._config.cookie_domain self.session_id = web.cookies().get(cookie_name) if self.session_id and not self._valid_session_id(self.session_id): self.session_id = None self._check_expiry() if self.session_id: d = self.store[self.session_id] self.update(d) self._validate_ip() if not self.session_id: self.session_id = self._generate_session_id() if self._initializer: if isinstance(self._initializer, dict): self.update(self._initializer) elif hasattr(self._initializer, '__call__'): self._initializer() self.ip = web.ctx.ip def _check_expiry(self): if self.session_id and self.session_id not in self.store: if self._config.ignore_expiry: self.session_id = None else: return self.expired() def _validate_ip(self): if self.session_id and self.get('ip', None) != web.ctx.ip: if not self._config.ignore_change_ip: return self.expired() def _save(self): cookie_name = self._config.cookie_name cookie_domain = self._config.cookie_domain if not self.get('_killed'): web.setcookie(cookie_name, self.session_id, domain=cookie_domain) self.store[self.session_id] = dict(self) else: web.setcookie(cookie_name, self.session_id, expires=-1, domain=cookie_domain)
MIT License
replicahq/doppelganger
doppelganger/bayesnets.py
BayesianNetworkModel.train
python
def train(input_data, structure, fields, prior_data=None): type_to_network = {} for type_, data in input_data.type_to_data.items(): if prior_data is not None: data = list(data) + list(prior_data) bayesian_network = BayesianNetwork.from_structure(data, structure) type_to_network[type_] = bayesian_network return BayesianNetworkModel(type_to_network, fields, segmenter=input_data.segmenter)
Creates bayesian networks from the given data with the given structure. The given data cannot contain any missing data. If called multiple times, the old model will be replaced. To update the model with new data, see `update`. Args: input_data (SegmentedData): typed data to train on structure (iterable(iterable)): structure as returned from define_bayes_net_structure fields (list(unicode)): field names to learn prior_data (list(data)): optional list of training samples to use as a prior for each network. Return: BayesianNetworkModel: A predictive model training on the given data
https://github.com/replicahq/doppelganger/blob/dc4709b87d48794b48c05d9776fb606fad4a8038/doppelganger/bayesnets.py#L172-L198
from __future__ import ( absolute_import, division, print_function, unicode_literals ) from builtins import range, str from collections import defaultdict, Counter import json import itertools import sys import pandas from pomegranate import BayesianNetwork def default_segmenter(x): return 'one_segment' class SegmentedData(object): def __init__(self, type_to_data, segmenter=None): self.type_to_data = type_to_data self.segmenter = segmenter @staticmethod def from_data(cleaned_data, fields, weight_field=None, segmenter=None): segmenter = segmenter or default_segmenter type_to_data = defaultdict(list) for _, row in cleaned_data.data.iterrows(): type_ = segmenter(row) weight = row[weight_field] if weight_field else 1 cleaned_row = tuple(row[fields]) for _ in range(weight): type_to_data[type_].append(cleaned_row) return SegmentedData(type_to_data, segmenter) def num_rows_data(self): return sum(len(data) for data in self.type_to_data.values()) def types(self): return self.type_to_data.keys() class BayesianNetworkModel(object): def __init__(self, type_to_network, fields, segmenter=None): self.type_to_network = type_to_network self.fields = fields self.distribution_cache = {} self.segmenter = segmenter or default_segmenter @staticmethod def from_file(filename, segmenter=None): with open(filename) as infile: json_string = infile.read() return BayesianNetworkModel.from_json(json_string, segmenter) def write(self, outfilename): with open(outfilename, 'w') as outfile: json_string = self.to_json() outfile.write(json_string) def to_json(self): blob = {'fieldnames': self.fields} blob['type_to_network'] = { type_: json.loads(network.to_json()) for type_, network in self.type_to_network.items() } return json.dumps(blob, indent=4, sort_keys=True) @staticmethod def _df_from_conditional(probabilities): state_map = defaultdict(dict) for row in probabilities: evidence = tuple(row[:-2]) value = row[-2] probability = float(row[-1]) state_map[evidence][value] = probability return pandas.DataFrame.from_dict(state_map).transpose() @staticmethod def _df_from_discrete(probabilities): return pandas.DataFrame(probabilities) def probabilities_as_dataframes(self): segment_to_states = {} for segment, network in self.type_to_network.items(): state_to_dataframes = [] for state in network.states: distribution = json.loads(str(state))['distribution'] if distribution['name'] == 'ConditionalProbabilityTable': probabilities = BayesianNetworkModel._df_from_conditional( distribution['table']) else: probabilities = BayesianNetworkModel._df_from_discrete( distribution['parameters'] ) state_to_dataframes.append(probabilities) segment_to_states[segment] = state_to_dataframes return segment_to_states @staticmethod def from_json(json_string, segmenter=None): json_blob = json.loads(json_string) type_to_network = {} for type_, network_json in json_blob['type_to_network'].items(): type_to_network[type_] = BayesianNetwork.from_json(json.dumps(network_json)) fields = list(json_blob['fieldnames']) return BayesianNetworkModel(type_to_network, fields, segmenter) @staticmethod
Apache License 2.0
talaia-labs/python-teos
teos/watcher.py
Watcher.get_all_responder_trackers
python
def get_all_responder_trackers(self): return self.db_manager.load_responder_trackers()
Returns a dictionary with all the trackers stored in the db for the responder.
https://github.com/talaia-labs/python-teos/blob/66bc075d2432c45691af77cde20cbecd46341107/teos/watcher.py#L729-L731
from queue import Queue from threading import Thread from collections import OrderedDict from readerwriterlock import rwlock from teos.logger import get_logger import common.receipts as receipts from common.appointment import AppointmentStatus from common.tools import compute_locator from common.exceptions import BasicException, EncryptionError, InvalidParameter, SignatureError from common.cryptographer import Cryptographer, hash_160 from teos.cleaner import Cleaner from teos.chain_monitor import ChainMonitor from teos.gatekeeper import SubscriptionExpired from teos.extended_appointment import ExtendedAppointment from teos.block_processor import InvalidTransactionFormat class AppointmentLimitReached(BasicException): class AppointmentAlreadyTriggered(BasicException): class AppointmentNotFound(BasicException): class LocatorCache: def __init__(self, blocks_in_cache): self.logger = get_logger(component=LocatorCache.__name__) self.cache = dict() self.blocks = OrderedDict() self.cache_size = blocks_in_cache self.rw_lock = rwlock.RWLockWrite() def init(self, last_known_block, block_processor): target_block_hash = last_known_block for _ in range(self.cache_size): if not target_block_hash: break target_block = block_processor.get_block(target_block_hash, blocking=True) if not target_block: break locator_txid_map = {compute_locator(txid): txid for txid in target_block.get("tx")} self.cache.update(locator_txid_map) self.blocks[target_block_hash] = list(locator_txid_map.keys()) target_block_hash = target_block.get("previousblockhash") self.blocks = OrderedDict(reversed((list(self.blocks.items())))) def get_txid(self, locator): with self.rw_lock.gen_rlock(): return self.cache.get(locator) def update(self, block_hash, locator_txid_map): with self.rw_lock.gen_wlock(): self.cache.update(locator_txid_map) self.blocks[block_hash] = list(locator_txid_map.keys()) self.logger.debug("Block added to cache", block_hash=block_hash) if self.is_full(): self.remove_oldest_block() def is_full(self): with self.rw_lock.gen_rlock(): full = len(self.blocks) > self.cache_size return full def remove_oldest_block(self): with self.rw_lock.gen_wlock(): block_hash, locators = self.blocks.popitem(last=False) for locator in locators: del self.cache[locator] self.logger.debug("Block removed from cache", block_hash=block_hash) def fix(self, last_known_block, block_processor): tmp_cache = LocatorCache(self.cache_size) target_block_hash = last_known_block for _ in range(tmp_cache.cache_size): target_block = block_processor.get_block(target_block_hash, blocking=True) if target_block: locator_txid_map = {compute_locator(txid): txid for txid in target_block.get("tx")} tmp_cache.cache.update(locator_txid_map) tmp_cache.blocks[target_block_hash] = list(locator_txid_map.keys()) target_block_hash = target_block.get("previousblockhash") with self.rw_lock.gen_wlock(): self.blocks = OrderedDict(reversed((list(tmp_cache.blocks.items())))) self.cache = tmp_cache.cache class Watcher: def __init__(self, db_manager, gatekeeper, block_processor, responder, sk, max_appointments, blocks_in_cache): self.logger = get_logger(component=Watcher.__name__) self.appointments = dict() self.locator_uuid_map = dict() self.block_queue = Queue() self.db_manager = db_manager self.gatekeeper = gatekeeper self.block_processor = block_processor self.responder = responder self.max_appointments = max_appointments self.signing_key = sk self.last_known_block = db_manager.load_last_block_hash_watcher() self.locator_cache = LocatorCache(blocks_in_cache) self.rw_lock = rwlock.RWLockWrite() @property def tower_id(self): return Cryptographer.get_compressed_pk(self.signing_key.public_key) @property def n_registered_users(self): return self.gatekeeper.n_registered_users @property def n_watcher_appointments(self): with self.rw_lock.gen_rlock(): return len(self.appointments) @property def n_responder_trackers(self): return self.responder.n_responder_trackers def awake(self): watcher_thread = Thread(target=self.do_watch, daemon=True) watcher_thread.start() return watcher_thread def register(self, user_id): available_slots, subscription_expiry, registration_receipt = self.gatekeeper.add_update_user(user_id) signature = Cryptographer.sign(registration_receipt, self.signing_key) return available_slots, subscription_expiry, signature def get_appointment(self, locator, user_signature): message = "get appointment {}".format(locator).encode("utf-8") user_id = self.gatekeeper.authenticate_user(message, user_signature) has_expired, expiry = self.gatekeeper.has_subscription_expired(user_id) if has_expired: raise SubscriptionExpired(f"Your subscription expired at block {expiry}") uuid = hash_160("{}{}".format(locator, user_id)) with self.rw_lock.gen_rlock(): if uuid in self.appointments: appointment_data = self.db_manager.load_watcher_appointment(uuid) status = AppointmentStatus.BEING_WATCHED elif self.responder.has_tracker(uuid): appointment_data = self.db_manager.load_responder_tracker(uuid) status = AppointmentStatus.DISPUTE_RESPONDED else: raise AppointmentNotFound("Cannot find {}".format(locator)) return appointment_data, status def add_appointment(self, appointment, user_signature): with self.rw_lock.gen_wlock(): if len(self.appointments) >= self.max_appointments: message = "Maximum appointments reached, appointment rejected" self.logger.info(message, locator=appointment.locator) raise AppointmentLimitReached(message) user_id = self.gatekeeper.authenticate_user(appointment.serialize(), user_signature) has_subscription_expired, expiry = self.gatekeeper.has_subscription_expired(user_id) if has_subscription_expired: raise SubscriptionExpired(f"Your subscription expired at block {expiry}") start_block = self.block_processor.get_block(self.last_known_block).get("height") extended_appointment = ExtendedAppointment( appointment.locator, appointment.encrypted_blob, appointment.to_self_delay, user_id, user_signature, start_block, ) uuid = hash_160("{}{}".format(extended_appointment.locator, user_id)) if self.responder.has_tracker(uuid): message = "Appointment already in Responder" self.logger.info(message) raise AppointmentAlreadyTriggered(message) available_slots = self.gatekeeper.add_update_appointment(user_id, uuid, extended_appointment) dispute_txid = self.locator_cache.get_txid(extended_appointment.locator) if dispute_txid: try: penalty_txid, penalty_rawtx = self.check_breach(uuid, extended_appointment, dispute_txid) receipt = self.responder.handle_breach( uuid, extended_appointment.locator, dispute_txid, penalty_txid, penalty_rawtx, user_id, self.last_known_block, ) if receipt.delivered: self.db_manager.store_watcher_appointment(uuid, extended_appointment.to_dict()) self.db_manager.create_triggered_appointment_flag(uuid) except (EncryptionError, InvalidTransactionFormat): pass else: self.appointments[uuid] = extended_appointment.get_summary() if extended_appointment.locator in self.locator_uuid_map: if uuid not in self.locator_uuid_map[extended_appointment.locator]: self.locator_uuid_map[extended_appointment.locator].append(uuid) else: self.locator_uuid_map[extended_appointment.locator] = [uuid] self.db_manager.store_watcher_appointment(uuid, extended_appointment.to_dict()) try: signature = Cryptographer.sign( receipts.create_appointment_receipt(user_signature, start_block), self.signing_key ) except (InvalidParameter, SignatureError): self.logger.error("Data couldn't be signed", appointment=extended_appointment.to_dict()) signature = None self.logger.info("New appointment accepted", locator=extended_appointment.locator) return { "locator": extended_appointment.locator, "start_block": extended_appointment.start_block, "signature": signature, "available_slots": available_slots, "subscription_expiry": self.gatekeeper.get_user_info(user_id).subscription_expiry, } def do_watch(self): if self.last_known_block is None: self.last_known_block = self.block_processor.get_best_block_hash(blocking=True) self.db_manager.store_last_block_hash_watcher(self.last_known_block) self.locator_cache.init(self.last_known_block, self.block_processor) while True: block_hash = self.block_queue.get() if block_hash == ChainMonitor.END_MESSAGE: break block = self.block_processor.get_block(block_hash, blocking=True) self.logger.info( "New block received", block_hash=block_hash, prev_block_hash=block.get("previousblockhash") ) if self.last_known_block != block.get("previousblockhash"): self.locator_cache.fix(block_hash, self.block_processor) txids = block.get("tx") locator_txid_map = {compute_locator(txid): txid for txid in txids} self.locator_cache.update(block_hash, locator_txid_map) with self.rw_lock.gen_wlock(): if len(self.appointments) > 0 and locator_txid_map: outdated_appointments = self.gatekeeper.get_outdated_appointments(block["height"]) outdated_appointments = list(set(outdated_appointments).intersection(self.appointments.keys())) Cleaner.delete_appointments( outdated_appointments, self.appointments, self.locator_uuid_map, self.db_manager, outdated=True ) valid_breaches, invalid_breaches = self.filter_breaches(self.get_breaches(locator_txid_map)) triggered_flags = [] appointments_to_delete = [] for uuid, breach in valid_breaches.items(): self.logger.info( "Notifying responder and deleting appointment", penalty_txid=breach["penalty_txid"], locator=breach["locator"], uuid=uuid, ) receipt = self.responder.handle_breach( uuid, breach["locator"], breach["dispute_txid"], breach["penalty_txid"], breach["penalty_rawtx"], self.appointments[uuid].get("user_id"), block_hash, ) if receipt.delivered: Cleaner.delete_appointment_from_memory(uuid, self.appointments, self.locator_uuid_map) triggered_flags.append(uuid) else: appointments_to_delete.append(uuid) appointments_to_delete.extend(invalid_breaches) appointments_to_delete_gatekeeper = { uuid: self.appointments[uuid].get("user_id") for uuid in appointments_to_delete } self.db_manager.batch_create_triggered_appointment_flag(triggered_flags) Cleaner.delete_appointments( appointments_to_delete, self.appointments, self.locator_uuid_map, self.db_manager ) self.gatekeeper.delete_appointments(appointments_to_delete_gatekeeper) if not self.appointments: self.logger.info("No more pending appointments") self.db_manager.store_last_block_hash_watcher(block_hash) self.last_known_block = block.get("hash") self.block_queue.task_done() def get_breaches(self, locator_txid_map): intersection = set(self.locator_uuid_map.keys()).intersection(locator_txid_map.keys()) breaches = {locator: locator_txid_map[locator] for locator in intersection} if len(breaches) > 0: self.logger.info("List of breaches", breaches=breaches) else: self.logger.info("No breaches found") return breaches def check_breach(self, uuid, appointment, dispute_txid): try: penalty_rawtx = Cryptographer.decrypt(appointment.encrypted_blob, dispute_txid) penalty_tx = self.block_processor.decode_raw_transaction(penalty_rawtx, blocking=True) except EncryptionError as e: self.logger.info("Transaction cannot be decrypted", uuid=uuid) raise e except InvalidTransactionFormat as e: self.logger.info("The breach contained an invalid transaction", uuid=uuid) raise e self.logger.info( "Breach found for locator", locator=appointment.locator, uuid=uuid, penalty_txid=penalty_tx.get("txid") ) return penalty_tx.get("txid"), penalty_rawtx def filter_breaches(self, breaches): valid_breaches = {} invalid_breaches = [] decrypted_blobs = {} for locator, dispute_txid in breaches.items(): for uuid in self.locator_uuid_map[locator]: appointment = ExtendedAppointment.from_dict(self.db_manager.load_watcher_appointment(uuid)) if appointment.encrypted_blob in decrypted_blobs: penalty_txid, penalty_rawtx = decrypted_blobs[appointment.encrypted_blob] valid_breaches[uuid] = { "locator": appointment.locator, "dispute_txid": dispute_txid, "penalty_txid": penalty_txid, "penalty_rawtx": penalty_rawtx, } else: try: penalty_txid, penalty_rawtx = self.check_breach(uuid, appointment, dispute_txid) valid_breaches[uuid] = { "locator": appointment.locator, "dispute_txid": dispute_txid, "penalty_txid": penalty_txid, "penalty_rawtx": penalty_rawtx, } decrypted_blobs[appointment.encrypted_blob] = (penalty_txid, penalty_rawtx) except (EncryptionError, InvalidTransactionFormat): invalid_breaches.append(uuid) return valid_breaches, invalid_breaches def get_registered_user_ids(self): return self.gatekeeper.user_ids def get_user_info(self, user_id): return self.gatekeeper.get_user_info(user_id) def get_subscription_info(self, signature): message = "get subscription info".encode("utf-8") user_id = self.gatekeeper.authenticate_user(message, signature) has_expired, expiry = self.gatekeeper.has_subscription_expired(user_id) if has_expired: raise SubscriptionExpired(f"Your subscription expired at block {expiry}") subscription_info = self.gatekeeper.get_user_info(user_id) with self.rw_lock.gen_rlock(): locators = [] for appt_uuid in subscription_info.appointments: if appt_uuid in self.appointments: locators.append(self.appointments.get(appt_uuid).get("locator")) elif self.responder.has_tracker(appt_uuid): locators.append(self.responder.get_tracker(appt_uuid).get("locator")) else: self.logger.debug("The appointment uuid was not found in the watcher or the responder.") return subscription_info, locators def get_all_watcher_appointments(self): return self.db_manager.load_watcher_appointments()
MIT License
stifael/offboard
src/bezier_mapping.py
start
python
def start(): nh = rospy.init_node('beziermapping') mp = mapping(nh) rospy.spin() '''r = rospy.Rate(150) while not rospy.is_shutdown(): if mp._run_bz_controller: mp._pub_thrust_sp_desired() r.sleep()'''
r = rospy.Rate(150) while not rospy.is_shutdown(): if mp._run_bz_controller: mp._pub_thrust_sp_desired() r.sleep()
https://github.com/stifael/offboard/blob/d8258cd81e3f51943f89ca6c23d5529dfe090313/src/bezier_mapping.py#L574-L593
import rospy from mavros_msgs.msg import State, AttitudeTarget, PositionTarget, AvoidanceTriplet from nav_msgs.msg import Odometry from geometry_msgs.msg import PoseStamped, TwistStamped, Vector3Stamped, Quaternion, Vector3, Point from sensor_msgs.msg import Imu from nav_msgs.msg import Path import time from tf.transformations import * import numpy as np import common_functions as cf import bezier_fn as bf import pub_bezier from dynamic_reconfigure.server import Server from offboard.cfg import PIDConfig from offboard.msg import ThreePointMsg import controller RATE_STATE = 1 class pidCoeff(): def __init__(self): self.Pp = np.zeros(3) self.Pv = np.zeros(3) self.Iv = np.zeros(3) self.Dv = np.zeros(3) self.Pa = np.zeros(3) self.Ia = np.zeros(3) self.Da = np.zeros(3) self.Mxy = 0.0 self.Mz = 0.0 class mapping(): def __init__(self, nh): self._run_bz_controller = False self._vel_pub = rospy.Publisher('mavros/setpoint_velocity/cmd_vel', TwistStamped, queue_size=10 ) self._vel_msg = TwistStamped() self._accel_pub = rospy.Publisher('/mavros/setpoint_accel/accel', Vector3Stamped, queue_size=10 ) self._accel_msg = Vector3Stamped() self._att_pub = rospy.Publisher('/mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=10) self._att_msg = AttitudeTarget() self._att_msg.type_mask = 7 self._acc_yaw_pub = rospy.Publisher('/mavros/setpoint_raw/local', PositionTarget, queue_size= 10) self._acc_yaw_msg = PositionTarget() self._acc_yaw_msg.type_mask = 2048 + 32 + 16 + 8 + 4 + 2 + 1 self._vel_yaw_pub = rospy.Publisher('/mavros/setpoint_raw/local', PositionTarget, queue_size= 10) self._vel_yaw_msg = PositionTarget() self._vel_yaw_msg.type_mask = 1 + 2 + 4 + 64 + 128 + 256 + 2048 self._bezier_triplet_pub = rospy.Publisher('/mavros/avoidance_triplet', AvoidanceTriplet, queue_size=10) self._bezier_triplet_msg = AvoidanceTriplet() self._bezier_duration = 1.0 self._pub_visualize = pub_bezier.pub_bezier() self._dt = 0.0 self._pid_coeff = pidCoeff() Server(PIDConfig, self._pidcallback) self._ctr = controller.controller(self._pid_coeff, 9.91) self._rate_state = rospy.Rate(RATE_STATE) self._current_state = State() rospy.Subscriber('/mavros/state', State , self._current_state_cb) self._local_pose = PoseStamped() self._local_pose.pose.position = cf.p_numpy_to_ros([0.0,0.0,0.0]) rospy.Subscriber('/mavros/local_position/pose', PoseStamped, self._local_pose_cb) self._local_vel = TwistStamped() self._local_vel.twist.linear = cf.p_numpy_to_ros([0.0,0.0,0.0]) rospy.Subscriber('/mavros/local_position/velocity', TwistStamped, self._local_vel_cb) self._bezier_pt = [] rospy.Subscriber('/path/bezier_pt', Path, self._bezier_cb) rospy.Subscriber('/path/three_point_message', ThreePointMsg, self._three_point_msg_cb) self._linear_acc = Vector3() self._linear_acc = cf.p_numpy_to_ros_vector([0.0,0.0,0.0]) rospy.Subscriber('/mavros/imu/data', Imu, self._imu_cb) def _pub_thrust_sp_desired(self): p_c = cf.p_ros_to_numpy(self._local_pose.pose.position) q_c = cf.q_ros_to_numpy(self._local_pose.pose.orientation) v_c =cf.p_ros_to_numpy(self._local_vel.twist.linear) a_c = cf.p_ros_to_numpy(self._linear_acc) a_c = np.dot(cf.rotation_from_q_transpose(q_c), a_c) bz = [cf.p_ros_to_numpy(self._bezier_pt[0]), cf.p_ros_to_numpy(self._bezier_pt[1]), cf.p_ros_to_numpy(self._bezier_pt[2])] p_star, v_star, a_star = bf.point_closest_to_bezier(bz, p_c, self._bezier_duration) '''p_star = np.array([0.0,0.0,5.0]) v_star = np.array([0.0,0.0,0.0]) a_star = np.array([0.0,0.0,0])''' self._ctr.set_states(p_c, v_c, a_c, p_star, v_star, a_star, self._pid_coeff) thrust_des, v_sp, vc = self._ctr.update_thrust_old(time.time()) self._visualize_vel(p_c, vc) self._visualize_acc(p_c, v_sp ) self._visualize_x(p_c) self._visualize_target(p_star) yaw_desired = 0.0 v_star_norm= np.linalg.norm(v_star) z = np.array([0.0,0.0,1.0]) if (v_star_norm > 0.0) and not (np.array_equal(np.abs(v_star/v_star_norm), z)): yaw_desired = self.get_desired_yaw(v_star) - np.pi/2.0 self._acc_yaw_msg.acceleration_or_force = cf.p_numpy_to_ros_vector(thrust_des) self._acc_yaw_msg.yaw = yaw_desired self._acc_yaw_pub.publish(self._acc_yaw_msg) def _pub_att_desired(self): q = Quaternion() q.x =0.0 q.y = 0.0 q.z = 1.0 q.w = 0.0 self._att_msg.orientation = q self._att_msg.thrust =1.0 self._att_pub.publish(self._att_msg) def _pub_acc_yaw_desired(self): a = Vector3() a.x = 0.0 a.y = 0.0 a.z = 0.2 self._acc_yaw_msg.acceleration_or_force = a self._local_pub.publish(self._acc_yaw_msg) def _pub_v_desired(self): pose = cf.p_ros_to_numpy(self._local_pose.pose.position) bz = [cf.p_ros_to_numpy(self._bezier_pt[0]), cf.p_ros_to_numpy(self._bezier_pt[1]), cf.p_ros_to_numpy(self._bezier_pt[2])] p_des, v_des, a_des = bf.point_closest_to_bezier(bz, pose, self._bezier_duration) print a_des self._visualize_vel(p_des, v_des) self._visualize_x(pose) v_final = bf.vel_adjusted(p_des, v_des, pose) v_final *= min(np.linalg.norm(v_final), 3.0) / np.linalg.norm(v_final) theta = 0.0 v_des_norm= np.linalg.norm(v_des) z = np.array([0.0,0.0,1.0]) if (v_des_norm > 0.0) and not (np.array_equal(np.abs(v_des/v_des_norm), z)): theta = self.angle_error(v_des) yaw_desired = self.get_desired_yaw(v_des) - np.pi/2.0 self._vel_yaw_msg.velocity = cf.p_numpy_to_ros_vector(v_final) self._vel_yaw_msg.yaw = yaw_desired self._vel_yaw_pub.publish(self._vel_yaw_msg) def _visualize_x(self, pose): q_c = cf.q_ros_to_numpy(self._local_pose.pose.orientation) x_b = np.array([1.0,0.0,0.0]) x = np.dot(cf.rotation_from_q_transpose(q_c), x_b) pt = cf.p_numpy_to_ros(pose) pt2 = cf.p_numpy_to_ros(pose + x) pts = [pt, pt2] self._pub_visualize.pub_x_vec(pts) def _visualize_target(self, p): pt = cf.p_numpy_to_ros(p) self._pub_visualize.pub_target(pt) def _visualize_vel(self, p, v): pt = cf.p_numpy_to_ros(p) pt2 = cf.p_numpy_to_ros(v + p) points = [pt, pt2] self._pub_visualize.pub_velocity(points) def _visualize_acc(self, p, a): pt = cf.p_numpy_to_ros(p) pt2 = cf.p_numpy_to_ros( p + a) points = [pt, pt2] self._pub_visualize.pub_a_vec(points) def _pub_a_desired(self): pose = cf.p_ros_to_numpy(self._local_pose.pose.position) velocity = cf.p_ros_to_numpy(self._local_vel.twist.linear) bz = [cf.p_ros_to_numpy(self._bezier_pt[0]), cf.p_ros_to_numpy(self._bezier_pt[1]), cf.p_ros_to_numpy(self._bezier_pt[2])] p_des, v_des, a_des = bf.point_closest_to_bezier(bz, pose, self._bezier_duration) print a_des '''theta = 0.0 v_des_norm= np.linalg.norm(v_des) z = np.array([0.0,0.0,1.0]) if (v_des_norm > 0.0) and not (np.array_equal(np.abs(v_des/v_des_norm), z)): #yaw not defined if norm(v_des) or v_des == z theta = self.angle_error(v_des)''' self._accel_msg.vector = cf.p_numpy_to_ros(a_des) self._accel_pub.publish(self._accel_msg) def angle_error(self, v_des): q_c = cf.q_ros_to_numpy(self._local_pose.pose.orientation) vb_des = np.dot(cf.rotation_from_q(q_c), v_des) z = np.array([0.0,0.0,1.0]) x = np.array([1.0,0.0,0.0]) vb_des_proj = vb_des - z * np.dot(z, np.transpose(vb_des)) vb_proj_n = vb_des_proj / np.linalg.norm(vb_des_proj) theta = np.arccos(np.dot(x, np.transpose(vb_proj_n))) cross = np.cross(x, vb_proj_n) if ( cross[2] < 0.0 ): theta *= -1.0 return theta def get_desired_yaw(self, v_des): z = np.array([0.0,0.0,1.0]) x = np.array([1.0,0.0,0.0]) v_des_proj = v_des - z * np.dot(z, np.transpose(v_des)) v_des_p_n = v_des_proj / np.linalg.norm(v_des_proj) angle = np.arccos(np.dot(x, np.transpose(v_des_p_n))) cross = np.cross(x, v_des_p_n) if (cross[2] < 0.0): angle *= -1 return angle def get_current_yaw(self): q_c = cf.q_ros_to_numpy(self._local_pose.pose.orientation) x_b = np.array([1.0,0.0,0.0]) x_w = np.dot(cf.rotation_from_q_transpose(q_c), x_b) z = np.array([0.0,0.0,1.0]) x = np.array([1.0,0.0,0.0]) x_w_proj = x_w - z * np.dot(z, np.transpose(x_w)) x_w_proj_n = x_w_proj / np.linalg.norm(x_w_proj) yaw = np.arccos(np.dot(x, np.transpose(x_w_proj_n))) cross = np.cross(x, x_w_proj_n) if (cross[2] < 0.0): yaw *= -1.0 return yaw def send_bezier_triplet(self): self._bezier_triplet_msg.prev = self._bezier_pt[0] self._bezier_triplet_msg.ctrl = self._bezier_pt[1] self._bezier_triplet_msg.next = self._bezier_pt[2] self._bezier_triplet_msg.acc_per_err = 0.0 self._bezier_triplet_msg.duration = 1.0 self._bezier_triplet_msg.max_acc = 5.0 print self._bezier_pt self._bezier_triplet_pub.publish(self._bezier_triplet_msg) def _current_state_cb(self, data): self._current_state = data def _local_pose_cb(self, data): self._local_pose = data def _local_vel_cb(self, data): self._local_vel = data def _imu_cb(self, data): self._linear_acc = data.linear_acceleration if self._run_bz_controller: self._pub_thrust_sp_desired() def _bezier_cb(self, data): self._bezier_pt = [pose.pose.position for pose in data.poses] self._run_bz_controller = True def _three_point_msg_cb(self, data): self._bezier_pt = [data.prev, data.ctrl, data.next] self._bezier_duration = data.duration self._run_bz_controller = True def _pidcallback(self, config, level): rospy.loginfo("""Reconfigure Request: {Pxy_p}, {Pz_p},\ #{Pxy_v}, {Pz_v}, {Ixy_v}, {Ixy_v}, {Iz_v}, {Dxy_v}, {Dz_v}, {Mz}, {Mxy}""".format(**config)) pid = pidCoeff() pid.Pp[0] = config.Pxy_p pid.Pp[1] = config.Pxy_p pid.Pp[2] = config.Pz_p pid.Pv[0] = config.Pxy_v pid.Pv[1] = config.Pxy_v pid.Pv[2] = config.Pz_v pid.Iv[0] = config.Ixy_v pid.Iv[1] = config.Ixy_v pid.Iv[2] = config.Iz_v pid.Dv[0] = config.Dxy_v pid.Dv[1] = config.Dxy_v pid.Dv[2] = config.Dz_v pid.Pa[0] = config.Pxy_a pid.Pa[1] = config.Pxy_a pid.Pa[2] = config.Pz_a pid.Ia[0] = config.Ixy_a pid.Ia[1] = config.Ixy_a pid.Ia[2] = config.Iz_a pid.Da[0] = config.Dxy_a pid.Da[1] = config.Dxy_a pid.Da[2] = config.Dz_a pid.Mxy= config.Mxy pid.Mz = config.Mz self._pid_coeff = pid print self._pid_coeff.Pp return config
BSD 3-Clause New or Revised License
reliaqualassociates/ramstk
src/ramstk/views/gtk3/validation/view.py
ValidationGeneralDataView._do_request_calculate_all
python
def _do_request_calculate_all(self, __button: Gtk.ToolButton) -> None: super().do_set_cursor_busy() pub.sendMessage( "request_calculate_validation_tasks", )
Request to calculate program cost and time. :param __button: the Gtk.ToolButton() that called this method. :return: None :rtype: None
https://github.com/reliaqualassociates/ramstk/blob/ffec5a107424914cf0026c6dfe26369c221f79f9/src/ramstk/views/gtk3/validation/view.py#L301-L311
from typing import Any, Dict from pubsub import pub from ramstk.configuration import RAMSTKUserConfiguration from ramstk.logger import RAMSTKLogManager from ramstk.views.gtk3 import Gtk, _ from ramstk.views.gtk3.widgets import ( RAMSTKMessageDialog, RAMSTKModuleView, RAMSTKPanel, RAMSTKWorkView, ) from . import ( ValidationTaskDescriptionPanel, ValidationTaskEffortPanel, ValidationTreePanel, ) class ValidationModuleView(RAMSTKModuleView): _tag: str = "validation" _tablabel: str = "Verification" _tabtooltip: str = _( "Displays the list of verification tasks for the selected Revision." ) def __init__( self, configuration: RAMSTKUserConfiguration, logger: RAMSTKLogManager ) -> None: super().__init__(configuration, logger) self._dic_icons["tab"] = ( self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/validation.png" ) self._lst_mnu_labels = [ _("Add Verification Task"), _("Delete Selected Task"), _("Save Selected Task"), _("Save All Tasks"), ] self._lst_tooltips = [ _("Add a new verification task."), _("Remove the currently selected verification task."), _("Save changes to the currently selected verification task."), _("Save changes to all verification tasks."), ] self._pnlPanel = ValidationTreePanel() self.__make_ui() pub.subscribe(self._do_set_record_id, f"selected_{self._tag}") def do_request_delete(self, __button: Gtk.ToolButton) -> None: _parent = self.get_parent().get_parent().get_parent().get_parent().get_parent() _prompt = _( "You are about to delete Validation {0:d} and all " "data associated with it. Is this really what " "you want to do?" ).format(self._record_id) _dialog: RAMSTKMessageDialog = RAMSTKMessageDialog(parent=_parent) _dialog.do_set_message(_prompt) _dialog.do_set_message_type("question") if _dialog.do_run() == Gtk.ResponseType.YES: super().do_set_cursor_busy() pub.sendMessage("request_delete_validation", node_id=self._record_id) _dialog.do_destroy() def _do_set_record_id(self, attributes: Dict[str, Any]) -> None: self._record_id = attributes["validation_id"] def __make_ui(self) -> None: super().make_ui() self._pnlPanel.do_load_measurement_units( self.RAMSTK_USER_CONFIGURATION.RAMSTK_MEASUREMENT_UNITS ) self._pnlPanel.do_load_verification_types( self.RAMSTK_USER_CONFIGURATION.RAMSTK_VALIDATION_TYPE ) self._pnlPanel.do_set_cell_callbacks( "mvw_editing_validation", [ "acceptable_maximum", "acceptable_mean", "acceptable_minimum", "acceptable_variance", "confidence", "cost_average", "cost_maximum", "cost_minimum", "date_end", "date_start", "description", "measurement_unit", "name", "status", "task_specification", "task_type", "time_average", "time_maximum", "time_minimum", ], ) self._pnlPanel.tvwTreeView.dic_handler_id[ "button-press" ] = self._pnlPanel.tvwTreeView.connect( "button_press_event", super().on_button_press ) class ValidationGeneralDataView(RAMSTKWorkView): _tag: str = "validation" _tablabel: str = _("General\nData") _tabtooltip: str = _( "Displays general information for the selected Verification task." ) def __init__( self, configuration: RAMSTKUserConfiguration, logger: RAMSTKLogManager ) -> None: super().__init__(configuration, logger) self._lst_callbacks = [ self._do_request_calculate, self._do_request_calculate_all, super().do_request_update, super().do_request_update_all, ] self._lst_icons = ["calculate", "calculate_all", "save", "save-all"] self._lst_mnu_labels = [ _("Calculate Task"), _("Calculate Program"), _("Save"), _("Save All"), ] self._lst_tooltips = [ _( "Calculate the expected cost and time of the selected " "Validation task." ), _( "Calculate the cost and time of the program (i.e., all " "Validation tasks)." ), _("Save changes to the selected Validation task."), _("Save changes to all Validation tasks."), ] self._pnlTaskDescription: RAMSTKPanel = ValidationTaskDescriptionPanel() self._pnlTaskEffort: RAMSTKPanel = ValidationTaskEffortPanel() self.__make_ui() pub.subscribe(super().do_set_cursor_active, "succeed_calculate_validation_task") pub.subscribe( super().do_set_cursor_active_on_fail, "fail_calculate_validation_task" ) pub.subscribe(self._do_set_record_id, "selected_validation") def _do_request_calculate(self, __button: Gtk.ToolButton) -> None: super().do_set_cursor_busy() pub.sendMessage( "request_calculate_validation_task", node_id=self._record_id, )
BSD 3-Clause New or Revised License
alontalmor/leapofthought
LeapOfThought/common/file_utils.py
s3_get
python
def s3_get(url: str, temp_file: IO) -> None: s3_resource = get_s3_resource() bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
Pull a file directly from S3.
https://github.com/alontalmor/leapofthought/blob/8912a34bebf25dac4397db011613a25b62b3d4ae/LeapOfThought/common/file_utils.py#L173-L177
import os import re import gzip import logging import shutil import tempfile import json from urllib.parse import urlparse from pathlib import Path from typing import Optional, Tuple, Union, IO, Callable, Set from hashlib import sha256 from functools import wraps import boto3 import botocore from botocore.exceptions import ClientError import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from allennlp.common.tqdm import Tqdm logger = logging.getLogger(__name__) CACHE_ROOT = Path(os.getenv('LEAPOFTHOUGHT_CACHE_ROOT', Path.home() / '.teachyourai')) CACHE_DIRECTORY = str(CACHE_ROOT / "cache") DEPRECATED_CACHE_DIRECTORY = str(CACHE_ROOT / "datasets") DATASET_CACHE = CACHE_DIRECTORY if os.path.exists(DEPRECATED_CACHE_DIRECTORY): logger = logging.getLogger(__name__) logger.warning(f"Deprecated cache directory found ({DEPRECATED_CACHE_DIRECTORY}). " f"Please remove this directory from your system to free up space.") def url_to_filename(url: str, etag: str = None) -> str: url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += '.' + etag_hash.hexdigest() return filename def filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]: if cache_dir is None: cache_dir = CACHE_DIRECTORY cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise FileNotFoundError("file {} not found".format(cache_path)) meta_path = cache_path + '.json' if not os.path.exists(meta_path): raise FileNotFoundError("file {} not found".format(meta_path)) with open(meta_path) as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return url, etag def cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str: if cache_dir is None: cache_dir = CACHE_DIRECTORY if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) url_or_filename = os.path.expanduser(url_or_filename) parsed = urlparse(url_or_filename) if parsed.scheme in ('http', 'https', 's3'): return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): return url_or_filename elif parsed.scheme == '': raise FileNotFoundError("file {} not found".format(url_or_filename)) else: raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) def is_url_or_existing_file(url_or_filename: Union[str, Path, None]) -> bool: if url_or_filename is None: return False url_or_filename = os.path.expanduser(str(url_or_filename)) parsed = urlparse(url_or_filename) return parsed.scheme in ('http', 'https', 's3') or os.path.exists(url_or_filename) def split_s3_path(url: str) -> Tuple[str, str]: parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func: Callable): @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise FileNotFoundError("file {} not found".format(url)) else: raise return wrapper def get_s3_resource(): session = boto3.session.Session() if session.get_credentials() is None: s3_resource = session.resource("s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)) else: s3_resource = session.resource("s3") return s3_resource @s3_request def s3_etag(url: str) -> Optional[str]: s3_resource = get_s3_resource() bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request
MIT License
pytorch/fairseq
examples/MMPT/mmpt/models/mmfusion.py
MMFusionMTM.__init__
python
def __init__(self, config, **kwargs): super().__init__(config) from .transformermodel import MMBertForMTM model_config = AutoConfig.from_pretrained(config.dataset.bert_name) model_config.max_video_len = config.dataset.max_video_len model_config.use_seg_emb = config.model.use_seg_emb self.mm_encoder = MMBertForMTM.from_pretrained( config.dataset.bert_name, config=model_config)
For reproducibility: self.mm_encoder will be initialized then discarded.
https://github.com/pytorch/fairseq/blob/fcca32258c8e8bcc9f9890bf4714fa2f96b6b3e1/examples/MMPT/mmpt/models/mmfusion.py#L352-L363
import torch from torch import nn try: from transformers import AutoConfig, AutoTokenizer except ImportError: pass from . import transformermodel class MMPTModel(nn.Module): @classmethod def from_pretrained(cls, config, checkpoint="checkpoint_best.pt"): import os from ..utils import recursive_config from ..tasks import Task config = recursive_config(config) mmtask = Task.config_task(config) checkpoint_path = os.path.join(config.eval.save_path, checkpoint) mmtask.build_model(checkpoint=checkpoint_path) from ..processors.models.s3dg import S3D video_encoder = S3D('pretrained_models/s3d_dict.npy', 512) video_encoder.load_state_dict( torch.load('pretrained_models/s3d_howto100m.pth')) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name, use_fast=config.dataset.use_fast ) from ..processors import Aligner aligner = Aligner(config.dataset) return ( MMPTModel(config, mmtask.model, video_encoder), tokenizer, aligner ) def __init__(self, config, model, video_encoder, **kwargs): super().__init__() self.max_video_len = config.dataset.max_video_len self.video_encoder = video_encoder self.model = model def forward(self, video_frames, caps, cmasks, return_score=False): bsz = video_frames.size(0) assert bsz == 1, "only bsz=1 is supported now." seq_len = video_frames.size(1) video_frames = video_frames.view(-1, *video_frames.size()[2:]) vfeats = self.video_encoder(video_frames.permute(0, 4, 1, 2, 3)) vfeats = vfeats['video_embedding'] vfeats = vfeats.view(bsz, seq_len, vfeats.size(-1)) padding = torch.zeros( bsz, self.max_video_len - seq_len, vfeats.size(-1)) vfeats = torch.cat([vfeats, padding], dim=1) vmasks = torch.cat([ torch.ones((bsz, seq_len), dtype=torch.bool), torch.zeros((bsz, self.max_video_len - seq_len), dtype=torch.bool) ], dim=1 ) output = self.model(caps, cmasks, vfeats, vmasks) if return_score: output = {"score": torch.bmm( output["pooled_video"][:, None, :], output["pooled_text"][:, :, None] ).squeeze(-1).squeeze(-1)} return output class MMFusion(nn.Module): def __init__(self, config, **kwargs): super().__init__() transformer_config = AutoConfig.from_pretrained( config.dataset.bert_name) self.hidden_size = transformer_config.hidden_size self.is_train = False if config.dataset.train_path is not None: self.is_train = True self.num_hidden_layers = transformer_config.num_hidden_layers self.last_iso_layer = 0 if config.dataset.num_iso_layer is not None: self.last_iso_layer = config.dataset.num_iso_layer - 1 + 1 if config.model.mm_encoder_cls is not None: mm_encoder_cls = getattr(transformermodel, config.model.mm_encoder_cls) model_config = AutoConfig.from_pretrained(config.dataset.bert_name) model_config.max_video_len = config.dataset.max_video_len model_config.use_seg_emb = config.model.use_seg_emb self.mm_encoder = mm_encoder_cls.from_pretrained( config.dataset.bert_name, config=model_config) elif config.model.video_encoder_cls is not None and config.model.text_encoder_cls is not None: video_encoder_cls = getattr(transformermodel, config.model.video_encoder_cls) model_config = AutoConfig.from_pretrained(config.dataset.bert_name) model_config.max_video_len = config.dataset.max_video_len if hasattr(model_config, "num_layers"): model_config.num_layers = config.model.num_hidden_video_layers else: model_config.num_hidden_layers = config.model.num_hidden_video_layers self.video_encoder = video_encoder_cls.from_pretrained( config.dataset.bert_name, config=model_config) text_encoder_cls = getattr(transformermodel, config.model.text_encoder_cls) self.text_encoder = text_encoder_cls.from_pretrained( config.dataset.bert_name) else: raise ValueError("the encoder must be either MM or two backbones.") def forward( self, caps, cmasks, vfeats, vmasks, **kwargs ): raise NotImplementedError( "Please derive MMFusion module." ) def _mm_on_the_fly( self, cmasks, vmasks, attention_mask ): if attention_mask is None: attention_mask = self._mm_attention_mask(cmasks, vmasks) """ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | """ token_type_ids = torch.cat( [ torch.zeros( (vmasks.size(0), vmasks.size(1) + 2), dtype=torch.long, device=vmasks.device, ), torch.ones( (cmasks.size(0), cmasks.size(1) - 2), dtype=torch.long, device=cmasks.device, ), ], dim=1, ) return attention_mask, token_type_ids def _mm_attention_mask(self, cmasks, vmasks): assert cmasks.size(0) == vmasks.size(0), "{}, {}, {}, {}".format( str(cmasks.size()), str(vmasks.size()), str(cmasks.size(0)), str(vmasks.size(0)), ) mm_mask = torch.cat([cmasks[:, :1], vmasks, cmasks[:, 1:]], dim=1) if self.last_iso_layer == 0: return mm_mask else: batch_size = cmasks.size(0) iso_mask = self._make_iso_mask(batch_size, cmasks, vmasks) mm_mask = mm_mask[:, None, :].repeat(1, mm_mask.size(-1), 1) iso_mm_masks = [] iso_mask = iso_mask[:, None, :, :].repeat( 1, self.last_iso_layer, 1, 1) iso_mm_masks.append(iso_mask) if self.last_iso_layer < self.num_hidden_layers: mm_mask = mm_mask[:, None, :, :].repeat( 1, self.num_hidden_layers - self.last_iso_layer, 1, 1 ) iso_mm_masks.append(mm_mask) iso_mm_masks = torch.cat(iso_mm_masks, dim=1) return iso_mm_masks def _make_iso_mask(self, batch_size, cmasks, vmasks): cls_self_mask = torch.cat( [ torch.ones( (batch_size, 1), dtype=torch.bool, device=cmasks.device), torch.zeros( (batch_size, cmasks.size(1) + vmasks.size(1) - 1), dtype=torch.bool, device=cmasks.device) ], dim=1) iso_video_mask = torch.cat( [ torch.zeros( (batch_size, 1), dtype=torch.bool, device=cmasks.device ), vmasks, cmasks[:, 1:2], torch.zeros( (batch_size, cmasks.size(1) - 2), dtype=torch.bool, device=cmasks.device, ), ], dim=1, ) iso_text_mask = torch.cat( [ torch.zeros( (batch_size, 2 + vmasks.size(1)), dtype=torch.bool, device=cmasks.device, ), cmasks[:, 2:], ], dim=1, ) cls_self_mask = cls_self_mask[:, None, :] iso_video_mask = iso_video_mask[:, None, :].repeat( 1, vmasks.size(1) + 1, 1) iso_text_mask = iso_text_mask[:, None, :].repeat( 1, cmasks.size(1) - 2, 1) return torch.cat([cls_self_mask, iso_video_mask, iso_text_mask], dim=1) def _pooling_vt_layer( self, layered_sequence_output, cmasks, vmasks ): layer_idx = self.last_iso_layer if self.last_iso_layer > 0 else self.num_hidden_layers hidden_state = layered_sequence_output[layer_idx] batch_size = cmasks.size(0) text_offset = vmasks.size(1) + 2 video_outputs = hidden_state[:, 1:text_offset] video_attention_mask = torch.cat( [ vmasks, torch.ones( (batch_size, 1), dtype=torch.bool, device=vmasks.device), ], dim=1, ) assert video_outputs.size(1) == video_attention_mask.size(1) pooled_video = torch.sum( video_outputs * video_attention_mask.unsqueeze(-1), dim=1 ) / video_attention_mask.sum(1, keepdim=True) text_attention_mask = cmasks[:, 2:] text_outputs = hidden_state[:, text_offset:] assert text_outputs.size(1) == text_attention_mask.size(1) pooled_text = torch.sum( text_outputs * text_attention_mask.unsqueeze(-1), dim=1 ) / text_attention_mask.sum(1, keepdim=True) return pooled_video, pooled_text class MMFusionMFMMLM(MMFusion): def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, video_label=None, text_label=None, **kwargs ): output_hidden_states = False if self.is_train else True target_vfeats, non_masked_frame_mask = None, None if video_label is not None: target_vfeats = vfeats.masked_select( video_label.unsqueeze(-1)).view( -1, vfeats.size(-1) ) vfeats[video_label] = 0.0 non_masked_frame_mask = vmasks.clone() non_masked_frame_mask[video_label] = False attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) outputs = self.mm_encoder( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, masked_frame_labels=video_label, target_video_hidden_states=target_vfeats, non_masked_frame_mask=non_masked_frame_mask, masked_lm_labels=text_label, output_hidden_states=output_hidden_states, ) video_logits, text_logits = outputs[0], outputs[1] if self.is_train: return { "video_logits": video_logits, "text_logits": text_logits, } pooled_video, pooled_text = self._pooling_vt_layer( outputs[2], cmasks, vmasks) return {"pooled_video": pooled_video, "pooled_text": pooled_text} class MMFusionMTM(MMFusionMFMMLM):
MIT License
arsenlosenko/python-ripple-lib
ripple_api/data_api.py
RippleDataAPIClient.get_validations
python
def get_validations(self, **query_params) -> dict: url_params = 'network', 'validations' return self._call(url_params, query_params)
Retrieve validation votes, including votes for ledger versions that are outside the main ledger chain. Reference: https://developers.ripple.com/data-api.html#get-validations
https://github.com/arsenlosenko/python-ripple-lib/blob/3f04e7a47dbae8ad6bd76c61cefb6d9a1541c3ec/ripple_api/data_api.py#L275-L281
import json from urllib.request import Request, urlopen from urllib.parse import urljoin, urlencode from urllib.error import HTTPError, URLError class RippleDataAPIClient(object): def __init__(self, node: str = 'https://data.ripple.com'): self.node = node def __repr__(self): return '<RippleDataAPIClient node=%r>' % self.node def _call(self, url_params: tuple, params: dict) -> dict: api_version = "/v2/" endpoint = "/".join(url_params) api_url = "".join((api_version, endpoint)) url = urljoin(self.node, api_url) url = url + "?" + urlencode(params) req = Request(method='GET', url=url) try: with urlopen(req) as res: res_json = json.loads(res.fp.read().decode('utf-8')) return res_json except HTTPError as err: return {"status": "error", "msg": err} except URLError as err: return {"status": "error", "msg": err} def get_ledger(self, ledger_identifier: str, **query_params) -> dict: url_params = 'ledgers', ledger_identifier return self._call(url_params, query_params) def get_ledger_validations(self, ledger_hash: str, **query_params) -> dict: endpoint = 'ledgers', ledger_hash, 'validations' return self._call(endpoint, query_params) def get_ledger_validation(self, ledger_hash: str, pubkey: str, **query_params) -> dict: url_params = 'ledgers', ledger_hash, 'validations', pubkey return self._call(url_params, query_params) def get_transaction(self, hash: str, **query_params) -> dict: url_params = 'transactions', hash return self._call(url_params, query_params) def get_transactions(self, **query_params) -> dict: return self._call(('transactions', ), query_params) def get_payments(self, currency: str = None, **query_params) -> dict: url_params = ('payments', ) if currency: url_params = 'payments', currency return self._call(url_params, query_params) def get_exchanges(self, base: str, counter: str, **query_params) -> dict: url_params = 'exchanges', base, counter return self._call(url_params, query_params) def get_exchange_rates(self, base: str, counter: str, **query_params) -> dict: url_params = 'exchange_rates', base, counter return self._call(url_params, query_params) def normalize(self, **query_params) -> dict: return self._call(('normalize', ), query_params) def get_daily_reports(self, date: str = None, **query_params) -> dict: url_params = ('reports', ) if date: url_params = 'reports', date return self._call(url_params, query_params) def get_stats(self, **query_params) -> dict: return self._call(('stats', ), query_params) def get_active_accounts(self, base: str, counter: str, **query_params) -> dict: url_params = 'active_accounts', base, counter return self._call(url_params, query_params) def get_exchange_volume(self, **query_params) -> dict: url_params = 'network', 'exchange_volume' return self._call(url_params, query_params) def get_payment_volume(self, **query_params) -> dict: url_params = 'network', 'payment_volume' return self._call(url_params, query_params) def get_external_markets(self, **query_params) -> dict: url_params = 'network', 'external_markets' return self._call(url_params, query_params) def get_xrp_distribution(self, **query_params) -> dict: url_params = 'network', 'xrp_distribution' return self._call(url_params, query_params) def get_top_currencies(self, date: str = None, **query_params) -> dict: url_params = 'network', 'top_currencies' if date: url_params = 'network', 'top_currencies', date return self._call(url_params, query_params) def get_top_markets(self, date: str = None, **query_params) -> dict: url_params = 'network', 'top_markets' if date: url_params = 'network', 'top_markets', date return self._call(url_params, query_params) def get_transaction_costs(self, **query_params) -> dict: url_params = 'network', 'fees' return self._call(url_params, query_params) def get_fee_stats(self, **query_params) -> dict: url_params = 'network', 'fee_stats' return self._call(url_params, query_params) def get_topology(self, **query_params) -> dict: url_params = 'network', 'topology' return self._call(url_params, query_params) def get_topology_nodes(self, **query_params) -> dict: url_params = 'network', 'topology', 'nodes' return self._call(url_params, query_params) def get_topology_node(self, pubkey: str, **query_params) -> dict: url_params = 'network', 'topology', 'nodes', pubkey return self._call(url_params, query_params) def get_topology_links(self, **query_params) -> dict: url_params = 'network', 'topology', 'links' return self._call(url_params, query_params) def get_validator(self, pubkey: str, **query_params) -> dict: url_params = 'network', 'validators', pubkey return self._call(url_params, query_params) def get_validators(self, **query_params) -> dict: url_params = 'network', 'validators' return self._call(url_params, query_params) def get_validator_validations(self, pubkey: str, **query_params) -> dict: url_params = 'network', 'validators', pubkey, 'validations' return self._call(url_params, query_params)
MIT License
seetaresearch/dragon
dragon/python/core/autograph/context.py
Context.mode
python
def mode(self, mode): ctx = self._thread_local_data old_mode = ctx.mode old_is_eager = ctx.is_eager ctx.mode = mode ctx.is_eager = mode == 'EAGER_MODE' try: yield finally: ctx.mode = old_mode ctx.is_eager = old_is_eager
Context-manager to allow setting the mode to EAGER/GRAPH.
https://github.com/seetaresearch/dragon/blob/3dfb6ea55d90d2fb2da9b1b471f5e1e7d7667810/dragon/python/core/autograph/context.py#L45-L56
from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import threading class _ThreadLocalData(threading.local): def __init__(self): super(_ThreadLocalData, self).__init__() self.mode = 'EAGER_MODE' self.is_eager = self.mode == 'EAGER_MODE' class Context(object): def __init__(self): self._thread_local_data = _ThreadLocalData() def executing_eagerly(self): return self._thread_local_data.is_eager @contextlib.contextmanager
BSD 2-Clause Simplified License
mila-iqia/babyai
babyai/rl/algos/base.py
BaseAlgo.collect_experiences
python
def collect_experiences(self): for i in range(self.num_frames_per_proc): preprocessed_obs = self.preprocess_obss(self.obs, device=self.device) with torch.no_grad(): model_results = self.acmodel(preprocessed_obs, self.memory * self.mask.unsqueeze(1)) dist = model_results['dist'] value = model_results['value'] memory = model_results['memory'] extra_predictions = model_results['extra_predictions'] action = dist.sample() obs, reward, done, env_info = self.env.step(action.cpu().numpy()) if self.aux_info: env_info = self.aux_info_collector.process(env_info) self.obss[i] = self.obs self.obs = obs self.memories[i] = self.memory self.memory = memory self.masks[i] = self.mask self.mask = 1 - torch.tensor(done, device=self.device, dtype=torch.float) self.actions[i] = action self.values[i] = value if self.reshape_reward is not None: self.rewards[i] = torch.tensor([ self.reshape_reward(obs_, action_, reward_, done_) for obs_, action_, reward_, done_ in zip(obs, action, reward, done) ], device=self.device) else: self.rewards[i] = torch.tensor(reward, device=self.device) self.log_probs[i] = dist.log_prob(action) if self.aux_info: self.aux_info_collector.fill_dictionaries(i, env_info, extra_predictions) self.log_episode_return += torch.tensor(reward, device=self.device, dtype=torch.float) self.log_episode_reshaped_return += self.rewards[i] self.log_episode_num_frames += torch.ones(self.num_procs, device=self.device) for i, done_ in enumerate(done): if done_: self.log_done_counter += 1 self.log_return.append(self.log_episode_return[i].item()) self.log_reshaped_return.append(self.log_episode_reshaped_return[i].item()) self.log_num_frames.append(self.log_episode_num_frames[i].item()) self.log_episode_return *= self.mask self.log_episode_reshaped_return *= self.mask self.log_episode_num_frames *= self.mask preprocessed_obs = self.preprocess_obss(self.obs, device=self.device) with torch.no_grad(): next_value = self.acmodel(preprocessed_obs, self.memory * self.mask.unsqueeze(1))['value'] for i in reversed(range(self.num_frames_per_proc)): next_mask = self.masks[i+1] if i < self.num_frames_per_proc - 1 else self.mask next_value = self.values[i+1] if i < self.num_frames_per_proc - 1 else next_value next_advantage = self.advantages[i+1] if i < self.num_frames_per_proc - 1 else 0 delta = self.rewards[i] + self.discount * next_value * next_mask - self.values[i] self.advantages[i] = delta + self.discount * self.gae_lambda * next_advantage * next_mask exps = DictList() exps.obs = [self.obss[i][j] for j in range(self.num_procs) for i in range(self.num_frames_per_proc)] exps.memory = self.memories.transpose(0, 1).reshape(-1, *self.memories.shape[2:]) exps.mask = self.masks.transpose(0, 1).reshape(-1).unsqueeze(1) exps.action = self.actions.transpose(0, 1).reshape(-1) exps.value = self.values.transpose(0, 1).reshape(-1) exps.reward = self.rewards.transpose(0, 1).reshape(-1) exps.advantage = self.advantages.transpose(0, 1).reshape(-1) exps.returnn = exps.value + exps.advantage exps.log_prob = self.log_probs.transpose(0, 1).reshape(-1) if self.aux_info: exps = self.aux_info_collector.end_collection(exps) exps.obs = self.preprocess_obss(exps.obs, device=self.device) keep = max(self.log_done_counter, self.num_procs) log = { "return_per_episode": self.log_return[-keep:], "reshaped_return_per_episode": self.log_reshaped_return[-keep:], "num_frames_per_episode": self.log_num_frames[-keep:], "num_frames": self.num_frames, "episodes_done": self.log_done_counter, } self.log_done_counter = 0 self.log_return = self.log_return[-self.num_procs:] self.log_reshaped_return = self.log_reshaped_return[-self.num_procs:] self.log_num_frames = self.log_num_frames[-self.num_procs:] return exps, log
Collects rollouts and computes advantages. Runs several environments concurrently. The next actions are computed in a batch mode for all environments at the same time. The rollouts and advantages from all environments are concatenated together. Returns ------- exps : DictList Contains actions, rewards, advantages etc as attributes. Each attribute, e.g. `exps.reward` has a shape (self.num_frames_per_proc * num_envs, ...). k-th block of consecutive `self.num_frames_per_proc` frames contains data obtained from the k-th environment. Be careful not to mix data from different environments! logs : dict Useful stats about the training process, including the average reward, policy loss, value loss, etc.
https://github.com/mila-iqia/babyai/blob/863f3529371ba45ef0148a48b48f5ae6e61e06cc/babyai/rl/algos/base.py#L110-L251
from abc import ABC, abstractmethod import torch import numpy from babyai.rl.format import default_preprocess_obss from babyai.rl.utils import DictList, ParallelEnv from babyai.rl.utils.supervised_losses import ExtraInfoCollector class BaseAlgo(ABC): def __init__(self, envs, acmodel, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef, value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward, aux_info): self.env = ParallelEnv(envs) self.acmodel = acmodel self.acmodel.train() self.num_frames_per_proc = num_frames_per_proc self.discount = discount self.lr = lr self.gae_lambda = gae_lambda self.entropy_coef = entropy_coef self.value_loss_coef = value_loss_coef self.max_grad_norm = max_grad_norm self.recurrence = recurrence self.preprocess_obss = preprocess_obss or default_preprocess_obss self.reshape_reward = reshape_reward self.aux_info = aux_info self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.num_procs = len(envs) self.num_frames = self.num_frames_per_proc * self.num_procs assert self.num_frames_per_proc % self.recurrence == 0 shape = (self.num_frames_per_proc, self.num_procs) self.obs = self.env.reset() self.obss = [None]*(shape[0]) self.memory = torch.zeros(shape[1], self.acmodel.memory_size, device=self.device) self.memories = torch.zeros(*shape, self.acmodel.memory_size, device=self.device) self.mask = torch.ones(shape[1], device=self.device) self.masks = torch.zeros(*shape, device=self.device) self.actions = torch.zeros(*shape, device=self.device, dtype=torch.int) self.values = torch.zeros(*shape, device=self.device) self.rewards = torch.zeros(*shape, device=self.device) self.advantages = torch.zeros(*shape, device=self.device) self.log_probs = torch.zeros(*shape, device=self.device) if self.aux_info: self.aux_info_collector = ExtraInfoCollector(self.aux_info, shape, self.device) self.log_episode_return = torch.zeros(self.num_procs, device=self.device) self.log_episode_reshaped_return = torch.zeros(self.num_procs, device=self.device) self.log_episode_num_frames = torch.zeros(self.num_procs, device=self.device) self.log_done_counter = 0 self.log_return = [0] * self.num_procs self.log_reshaped_return = [0] * self.num_procs self.log_num_frames = [0] * self.num_procs
BSD 3-Clause New or Revised License
sqlfluff/sqlfluff
src/sqlfluff/core/linter/linter.py
Linter.lint_paths
python
def lint_paths( self, paths: Tuple[str, ...], fix: bool = False, ignore_non_existent_files: bool = False, ignore_files: bool = True, processes: int = 1, ) -> LintingResult: if len(paths) == 0: paths = (os.getcwd(),) result = LintingResult() for path in paths: result.add( self.lint_path( path, fix=fix, ignore_non_existent_files=ignore_non_existent_files, ignore_files=ignore_files, processes=processes, ) ) result.stop_timer() return result
Lint an iterable of paths.
https://github.com/sqlfluff/sqlfluff/blob/ce4e5a344526f7ee61a0950adc079e4d3b5af438/src/sqlfluff/core/linter/linter.py#L833-L860
import os import time import logging from typing import ( Any, List, Sequence, Optional, Tuple, Union, cast, Iterable, Iterator, ) import pathspec from sqlfluff.core.errors import ( SQLBaseError, SQLLexError, SQLLintError, SQLParseError, SQLTemplaterSkipFile, ) from sqlfluff.core.parser import Lexer, Parser from sqlfluff.core.file_helpers import get_encoding from sqlfluff.core.templaters import TemplatedFile from sqlfluff.core.rules import get_ruleset from sqlfluff.core.config import FluffConfig, ConfigLoader from sqlfluff.core.linter.runner import get_runner from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.parser.segments.meta import MetaSegment from sqlfluff.core.parser.segments.raw import RawSegment from sqlfluff.core.rules.base import BaseRule from sqlfluff.core.linter.common import ( RuleTuple, ParsedString, NoQaDirective, RenderedFile, ) from sqlfluff.core.linter.linted_file import LintedFile from sqlfluff.core.linter.linted_dir import LintedDir from sqlfluff.core.linter.linting_result import LintingResult WalkableType = Iterable[Tuple[str, Optional[List[str]], List[str]]] linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") class Linter: allow_process_parallelism = True def __init__( self, config: Optional[FluffConfig] = None, formatter: Any = None, dialect: Optional[str] = None, rules: Optional[Union[str, List[str]]] = None, user_rules: Optional[Union[str, List[str]]] = None, ) -> None: self.config = FluffConfig.from_kwargs( config=config, dialect=dialect, rules=rules ) self.dialect = self.config.get("dialect_obj") self.templater = self.config.get("templater_obj") self.formatter = formatter self.user_rules = user_rules or [] def get_ruleset(self, config: Optional[FluffConfig] = None) -> List[BaseRule]: rs = get_ruleset() for rule in self.user_rules: rs.register(rule) cfg = config or self.config return rs.get_rulelist(config=cfg) def rule_tuples(self) -> List[RuleTuple]: rs = self.get_ruleset() return [RuleTuple(rule.code, rule.description) for rule in rs] @staticmethod def _load_raw_file_and_config(fname, root_config): file_config = root_config.make_child_from_path(fname) encoding = get_encoding(fname=fname, config=file_config) with open(fname, encoding=encoding, errors="backslashreplace") as target_file: raw_file = target_file.read() file_config.process_raw_file_for_config(raw_file) return raw_file, file_config, encoding @staticmethod def _lex_templated_file( templated_file: TemplatedFile, config: FluffConfig ) -> Tuple[Optional[Sequence[BaseSegment]], List[SQLLexError], FluffConfig]: violations = [] linter_logger.info("LEXING RAW (%s)", templated_file.fname) lexer = Lexer(config=config) try: tokens, lex_vs = lexer.lex(templated_file) violations += lex_vs linter_logger.info( "Lexed tokens: %s", [seg.raw for seg in tokens] if tokens else None ) except SQLLexError as err: linter_logger.info("LEXING FAILED! (%s): %s", templated_file.fname, err) violations.append(err) return None, violations, config if not tokens: return None, violations, config templating_blocks_indent = config.get("template_blocks_indent", "indentation") if isinstance(templating_blocks_indent, str): force_block_indent = templating_blocks_indent.lower().strip() == "force" else: force_block_indent = False templating_blocks_indent = bool(templating_blocks_indent) if templating_blocks_indent and not force_block_indent: indent_balance = sum( getattr(elem, "indent_val", 0) for elem in cast(Tuple[BaseSegment, ...], tokens) ) if indent_balance != 0: linter_logger.debug( "Indent balance test failed for %r. Template indents will not be linted for this file.", templated_file.fname, ) templating_blocks_indent = False new_tokens = [] for token in cast(Tuple[BaseSegment, ...], tokens): if token.is_meta: token = cast(MetaSegment, token) if token.indent_val != 0: if not templating_blocks_indent: continue new_tokens.append(token) return new_tokens, violations, config @staticmethod def _parse_tokens( tokens: Sequence[BaseSegment], config: FluffConfig, recurse: bool = True, fname: Optional[str] = None, ) -> Tuple[Optional[BaseSegment], List[SQLParseError]]: parser = Parser(config=config) violations = [] try: parsed: Optional[BaseSegment] = parser.parse( tokens, recurse=recurse, fname=fname ) except SQLParseError as err: linter_logger.info("PARSING FAILED! : %s", err) violations.append(err) return None, violations if parsed: linter_logger.info("\n###\n#\n# {}\n#\n###".format("Parsed Tree:")) linter_logger.info("\n" + parsed.stringify()) for unparsable in parsed.iter_unparsables(): violations.append( SQLParseError( "Line {0[0]}, Position {0[1]}: Found unparsable section: {1!r}".format( unparsable.pos_marker.working_loc, unparsable.raw if len(unparsable.raw) < 40 else unparsable.raw[:40] + "...", ), segment=unparsable, ) ) linter_logger.info("Found unparsable segment...") linter_logger.info(unparsable.stringify()) return parsed, violations @staticmethod def parse_noqa(comment: str, line_no: int): if comment.startswith("noqa"): comment_remainder = comment[4:] if comment_remainder: if not comment_remainder.startswith(":"): return SQLParseError( "Malformed 'noqa' section. Expected 'noqa: <rule>[,...]", line_no=line_no, ) comment_remainder = comment_remainder[1:].strip() if comment_remainder: action: Optional[str] if "=" in comment_remainder: action, rule_part = comment_remainder.split("=", 1) if action not in {"disable", "enable"}: return SQLParseError( "Malformed 'noqa' section. " "Expected 'noqa: enable=<rule>[,...] | all' " "or 'noqa: disable=<rule>[,...] | all", line_no=line_no, ) else: action = None rule_part = comment_remainder if rule_part in {"disable", "enable"}: return SQLParseError( "Malformed 'noqa' section. " "Expected 'noqa: enable=<rule>[,...] | all' " "or 'noqa: disable=<rule>[,...] | all", line_no=line_no, ) rules: Optional[Tuple[str, ...]] if rule_part != "all": rules = tuple(r.strip() for r in rule_part.split(",")) else: rules = None return NoQaDirective(line_no, rules, action) return NoQaDirective(line_no, None, None) return None @staticmethod def remove_templated_errors( linting_errors: List[SQLBaseError], ) -> List[SQLBaseError]: result: List[SQLBaseError] = [] for e in linting_errors: if isinstance(e, SQLLintError): if ( e.segment.pos_marker.is_literal() or e.rule.targets_templated ): result.append(e) else: result.append(e) return result @staticmethod def _warn_unfixable(code: str): linter_logger.warning( f"One fix for {code} not applied, it would re-cause the same error." ) @classmethod def parse_rendered(cls, rendered: RenderedFile, recurse: bool = True): t0 = time.monotonic() violations = cast(List[SQLBaseError], rendered.templater_violations) tokens: Optional[Sequence[BaseSegment]] if rendered.templated_file: tokens, lvs, config = cls._lex_templated_file( rendered.templated_file, rendered.config ) violations += lvs else: tokens = None t1 = time.monotonic() linter_logger.info("PARSING (%s)", rendered.fname) if tokens: parsed, pvs = cls._parse_tokens( tokens, rendered.config, recurse=recurse, fname=rendered.fname ) violations += pvs else: parsed = None time_dict = { **rendered.time_dict, "lexing": t1 - t0, "parsing": time.monotonic() - t1, } return ParsedString( parsed, violations, time_dict, rendered.templated_file, rendered.config, rendered.fname, ) @classmethod def extract_ignore_from_comment(cls, comment: RawSegment): comment_content = comment.raw_trimmed().strip() comment_line, _ = comment.pos_marker.source_position() result = cls.parse_noqa(comment_content, comment_line) if isinstance(result, SQLParseError): result.segment = comment return result @classmethod def extract_ignore_mask( cls, tree: BaseSegment ) -> Tuple[List[NoQaDirective], List[SQLBaseError]]: ignore_buff: List[NoQaDirective] = [] violations: List[SQLBaseError] = [] for comment in tree.recursive_crawl("comment"): if comment.name == "inline_comment": ignore_entry = cls.extract_ignore_from_comment(comment) if isinstance(ignore_entry, SQLParseError): violations.append(ignore_entry) elif ignore_entry: ignore_buff.append(ignore_entry) if ignore_buff: linter_logger.info("Parsed noqa directives from file: %r", ignore_buff) return ignore_buff, violations @classmethod def lint_fix_parsed( cls, tree: BaseSegment, config: FluffConfig, rule_set: List[BaseRule], fix: bool = False, fname: Optional[str] = None, templated_file: Optional[TemplatedFile] = None, formatter: Any = None, ) -> Tuple[BaseSegment, List[SQLBaseError], List[NoQaDirective]]: all_linting_errors = [] last_fixes = None previous_versions = {tree.raw} loop_limit = config.get("runaway_limit") if fix else 1 if formatter: formatter.dispatch_lint_header(fname) ignore_buff, ivs = cls.extract_ignore_mask(tree) all_linting_errors += ivs for loop in range(loop_limit): changed = False for crawler in rule_set: linting_errors, _, fixes, _ = crawler.crawl( tree, ignore_mask=ignore_buff, dialect=config.get("dialect_obj"), fname=fname, templated_file=templated_file, ) all_linting_errors += linting_errors if fix and fixes: linter_logger.info(f"Applying Fixes [{crawler.code}]: {fixes}") if fixes == last_fixes: cls._warn_unfixable(crawler.code) else: last_fixes = fixes new_tree, _ = tree.apply_fixes(fixes) if new_tree.raw not in previous_versions: tree = new_tree previous_versions.add(tree.raw) changed = True continue else: cls._warn_unfixable(crawler.code) if loop == 0: initial_linting_errors = all_linting_errors.copy() if fix and not changed: linter_logger.info( f"Fix loop complete. Stability achieved after {loop}/{loop_limit} loops." ) break if fix and loop + 1 == loop_limit: linter_logger.warning(f"Loop limit on fixes reached [{loop_limit}].") if config.get("ignore_templated_areas", default=True): initial_linting_errors = cls.remove_templated_errors(initial_linting_errors) return tree, initial_linting_errors, ignore_buff @classmethod def lint_parsed( cls, parsed: ParsedString, rule_set: List[BaseRule], fix: bool = False, formatter: Any = None, encoding: str = "utf8", ): violations = parsed.violations time_dict = parsed.time_dict tree: Optional[BaseSegment] if parsed.tree: t0 = time.monotonic() linter_logger.info("LINTING (%s)", parsed.fname) tree, initial_linting_errors, ignore_buff = cls.lint_fix_parsed( parsed.tree, config=parsed.config, rule_set=rule_set, fix=fix, fname=parsed.fname, templated_file=parsed.templated_file, formatter=formatter, ) time_dict["linting"] = time.monotonic() - t0 violations += initial_linting_errors else: tree = None ignore_buff = [] for violation in violations: violation.ignore_if_in(parsed.config.get("ignore")) linted_file = LintedFile( parsed.fname, violations, time_dict, tree, ignore_mask=ignore_buff, templated_file=parsed.templated_file, encoding=encoding, ) if formatter: formatter.dispatch_file_violations( parsed.fname, linted_file, only_fixable=fix ) if parsed.config.get("dialect") == "ansi" and linted_file.get_violations( fixable=True if fix else None, types=SQLParseError ): if formatter: formatter.dispatch_dialect_warning() return linted_file @classmethod def lint_rendered( cls, rendered: RenderedFile, rule_set: List[BaseRule], fix: bool = False, formatter: Any = None, ) -> LintedFile: parsed = cls.parse_rendered(rendered) return cls.lint_parsed( parsed, rule_set=rule_set, fix=fix, formatter=formatter, encoding=rendered.encoding, ) def render_string( self, in_str: str, fname: str, config: FluffConfig, encoding: str ) -> RenderedFile: linter_logger.info("TEMPLATING RAW [%s] (%s)", self.templater.name, fname) t0 = time.monotonic() if not config.get("templater_obj") == self.templater: linter_logger.warning( ( f"Attempt to set templater to {config.get('templater_obj').name} failed. Using {self.templater.name} " "templater. Templater cannot be set in a .sqlfluff file in a subdirectory of the current working " "directory. It can be set in a .sqlfluff in the current working directory. See Nesting section of the " "docs for more details." ) ) try: templated_file, templater_violations = self.templater.process( in_str=in_str, fname=fname, config=config, formatter=self.formatter ) except SQLTemplaterSkipFile as s: linter_logger.warning(str(s)) templated_file = None templater_violations = [] if not templated_file: linter_logger.info("TEMPLATING FAILED: %s", templater_violations) time_dict = {"templating": time.monotonic() - t0} return RenderedFile( templated_file, templater_violations, config, time_dict, fname, encoding ) def render_file(self, fname: str, root_config: FluffConfig) -> RenderedFile: raw_file, config, encoding = self._load_raw_file_and_config(fname, root_config) return self.render_string(raw_file, fname, config, encoding) def parse_string( self, in_str: str, fname: str = "<string>", recurse: bool = True, config: Optional[FluffConfig] = None, encoding: str = "utf-8", ) -> ParsedString: violations: List[SQLBaseError] = [] if self.formatter: self.formatter.dispatch_template_header(fname, self.config, config) config = config or self.config config.process_raw_file_for_config(in_str) rendered = self.render_string(in_str, fname, config, encoding) violations += rendered.templater_violations if self.formatter: self.formatter.dispatch_parse_header(fname) return self.parse_rendered(rendered, recurse=recurse) def fix( self, tree: BaseSegment, config: Optional[FluffConfig] = None, fname: Optional[str] = None, templated_file: Optional[TemplatedFile] = None, ) -> Tuple[BaseSegment, List[SQLBaseError]]: config = config or self.config rule_set = self.get_ruleset(config=config) fixed_tree, violations, _ = self.lint_fix_parsed( tree, config, rule_set, fix=True, fname=fname, templated_file=templated_file, formatter=self.formatter, ) return fixed_tree, violations def lint( self, tree: BaseSegment, config: Optional[FluffConfig] = None, fname: Optional[str] = None, templated_file: Optional[TemplatedFile] = None, ) -> List[SQLBaseError]: config = config or self.config rule_set = self.get_ruleset(config=config) _, violations, _ = self.lint_fix_parsed( tree, config, rule_set, fix=False, fname=fname, templated_file=templated_file, formatter=self.formatter, ) return violations def lint_string( self, in_str: str = "", fname: str = "<string input>", fix: bool = False, config: Optional[FluffConfig] = None, encoding: str = "utf8", ) -> LintedFile: config = config or self.config parsed = self.parse_string(in_str=in_str, fname=fname, config=config) rule_set = self.get_ruleset(config=config) return self.lint_parsed( parsed, rule_set, fix=fix, formatter=self.formatter, encoding=encoding ) def paths_from_path( self, path: str, ignore_file_name: str = ".sqlfluffignore", ignore_non_existent_files: bool = False, ignore_files: bool = True, working_path: str = os.getcwd(), ) -> List[str]: if not os.path.exists(path): if ignore_non_existent_files: return [] else: raise OSError("Specified path does not exist") is_exact_file = os.path.isfile(path) if is_exact_file: dirpath = os.path.dirname(path) files = [os.path.basename(path)] ignore_file_paths = ConfigLoader.find_ignore_config_files( path=path, working_path=working_path, ignore_file_name=ignore_file_name ) path_walk_ignore_file = [ ( os.path.dirname(ignore_file_path), None, [os.path.basename(ignore_file_path)], ) for ignore_file_path in ignore_file_paths ] path_walk: WalkableType = [(dirpath, None, files)] + path_walk_ignore_file else: path_walk = os.walk(path) buffer = [] ignores = {} for dirpath, _, filenames in path_walk: for fname in filenames: fpath = os.path.join(dirpath, fname) if ignore_files and fname == ignore_file_name: with open(fpath) as fh: spec = pathspec.PathSpec.from_lines("gitwildmatch", fh) ignores[dirpath] = spec continue for ext in self.config.get("sql_file_exts", default=".sql").split(","): if fname.endswith(ext): buffer.append(fpath) if not ignore_files: return sorted(buffer) filtered_buffer = [] for fpath in buffer: abs_fpath = os.path.abspath(fpath) for ignore_base, ignore_spec in ignores.items(): abs_ignore_base = os.path.abspath(ignore_base) if abs_fpath.startswith( abs_ignore_base + os.sep ) and ignore_spec.match_file( os.path.relpath(abs_fpath, abs_ignore_base) ): if is_exact_file: linter_logger.warning( "Exact file path %s was given but " "it was ignored by a %s pattern in %s, " "re-run with `--disregard-sqlfluffignores` to " "skip %s" % ( path, ignore_file_name, ignore_base, ignore_file_name, ) ) break else: filtered_buffer.append(os.path.normpath(fpath)) return sorted(filtered_buffer) def lint_string_wrapped( self, string: str, fname: str = "<string input>", fix: bool = False ) -> LintingResult: result = LintingResult() linted_path = LintedDir(fname) linted_path.add(self.lint_string(string, fname=fname, fix=fix)) result.add(linted_path) result.stop_timer() return result def lint_path( self, path: str, fix: bool = False, ignore_non_existent_files: bool = False, ignore_files: bool = True, processes: int = 1, ) -> LintedDir: linted_path = LintedDir(path) if self.formatter: self.formatter.dispatch_path(path) fnames = list( self.paths_from_path( path, ignore_non_existent_files=ignore_non_existent_files, ignore_files=ignore_files, ) ) runner = get_runner( self, self.config, processes=processes, allow_process_parallelism=self.allow_process_parallelism, ) for linted_file in runner.run(fnames, fix): linted_path.add(linted_file) if any(v.fatal for v in linted_file.violations): linter_logger.error("Fatal linting error. Halting further linting.") break return linted_path
MIT License
mabuchilab/qnet
src/qnet/algebra/toolbox/commutator_manipulation.py
expand_commutators_leibniz
python
def expand_commutators_leibniz(expr, expand_expr=True): recurse = partial(expand_commutators_leibniz, expand_expr=expand_expr) A = wc('A', head=Operator) C = wc('C', head=Operator) AB = wc('AB', head=OperatorTimes) BC = wc('BC', head=OperatorTimes) def leibniz_right(A, BC): B = BC.operands[0] C = OperatorTimes.create(*BC.operands[1:]) return Commutator.create(A, B) * C + B * Commutator.create(A, C) def leibniz_left(AB, C): A = AB.operands[0] B = OperatorTimes(*AB.operands[1:]) return A * Commutator.create(B, C) + Commutator.create(A, C) * B rules = OrderedDict([ ('leibniz1', ( pattern(Commutator, A, BC), lambda A, BC: recurse(leibniz_right(A, BC).expand()))), ('leibniz2', ( pattern(Commutator, AB, C), lambda AB, C: recurse(leibniz_left(AB, C).expand())))]) if expand_expr: res = _apply_rules(expr.expand(), rules).expand() else: res = _apply_rules(expr, rules) return res
Recursively expand commutators in `expr` according to the Leibniz rule. .. math:: [A B, C] = A [B, C] + [A, C] B .. math:: [A, B C] = [A, B] C + B [A, C] If `expand_expr` is True, expand products of sums in `expr`, as well as in the result.
https://github.com/mabuchilab/qnet/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/toolbox/commutator_manipulation.py#L14-L58
from collections.__init__ import OrderedDict from functools import partial from ..core.abstract_algebra import _apply_rules from ..core.operator_algebra import ( Operator, OperatorTimes, Commutator, ) from ..pattern_matching import wc, pattern __all__ = ['expand_commutators_leibniz']
MIT License
faucetsdn/daq
subset/dot1x/authenticator/radius.py
RadiusAttributesList.extract_attributes
python
def extract_attributes(cls, attributes_data, attributes, attributes_to_concat): total_length = len(attributes_data) pos = 0 index = -1 last_attribute = -1 while pos < total_length: try: type_, attr_length = struct.unpack("!BB", attributes_data[pos:pos + Attribute.HEADER_SIZE]) except struct.error as exception: raise MessageParseError('Unable to unpack first 2 bytes of attribute header') from exception data = attributes_data[pos + Attribute.HEADER_SIZE: pos + attr_length] pos += attr_length packed_value = data[:attr_length - Attribute.HEADER_SIZE] try: attribute = ATTRIBUTE_TYPES[type_].parse(packed_value) except KeyError as exception: raise MessageParseError('Cannot find parser for RADIUS attribute %s' % type_) from exception if attribute.DATA_TYPE != Concat or last_attribute != attribute.TYPE: index += 1 last_attribute = attribute.TYPE if attribute.DATA_TYPE.DATA_TYPE_VALUE == Concat.DATA_TYPE_VALUE: if attribute.TYPE not in attributes_to_concat: attributes_to_concat[attribute.TYPE] = [] attributes_to_concat[attribute.TYPE].append((attribute, index)) attributes.append(attribute)
Extracts Radius Attributes from a packed payload. Keeps track of attribute ordering. Args: attributes_data (): data to extract from (input). attributes: attributes extracted (output variable). attributes_to_concat (dict): (output variable). Raises: MessageParseError: RadiusAttribute.parse will raise error if it cannot parse the attribute's data
https://github.com/faucetsdn/daq/blob/5a5f46d76bb36519bd2ee44ee63c51f2f19fce0f/subset/dot1x/authenticator/radius.py#L282-L325
import copy import hashlib import hmac import struct import binascii from radius_attributes import ATTRIBUTE_TYPES, Attribute, MessageAuthenticator from radius_datatypes import Concat from utils import MessageParseError RADIUS_HEADER_LENGTH = 1 + 1 + 2 + 16 PACKET_TYPE_PARSERS = {} class InvalidResponseAuthenticatorError(Exception): pass class InvalidMessageAuthenticatorError(Exception): pass class Radius: ACCESS_REQUEST = 1 ACCESS_ACCEPT = 2 ACCESS_REJECT = 3 ACCOUNTING_REQUEST = 4 ACCOUNTING_RESPONSE = 5 ACCESS_CHALLENGE = 11 STATUS_SERVER = 12 STATUS_CLIENT = 13 @staticmethod def parse(packed_message, secret, packet_id_to_request_authenticator): try: code, packet_id, length, authenticator = struct.unpack( "!BBH16s", packed_message[:RADIUS_HEADER_LENGTH]) except struct.error as exception: raise MessageParseError('Unable to unpack first 20 bytes of RADIUS header') from exception if code in PACKET_TYPE_PARSERS.keys(): radius_packet = PACKET_TYPE_PARSERS[code](packet_id, authenticator, RadiusAttributesList.parse( packed_message[RADIUS_HEADER_LENGTH:])) if code == Radius.ACCESS_REQUEST: request_authenticator = authenticator else: try: request_authenticator = packet_id_to_request_authenticator[packet_id] except KeyError as exception: raise MessageParseError('Unknown RAIDUS packet_id: %s' % packet_id, ) from exception try: return radius_packet.validate_packet(secret, request_authenticator=request_authenticator, code=code) except (InvalidMessageAuthenticatorError, InvalidResponseAuthenticatorError) as exception: raise MessageParseError("Unable to validate Radius packet") from exception raise MessageParseError("Unable to parse radius code: %d" % code) def pack(self): pass def register_packet_type_parser(cls): PACKET_TYPE_PARSERS[cls.CODE] = cls.parse return cls class RadiusPacket(Radius): CODE = None packed = None def __init__(self, packet_id, authenticator, attributes): self.packet_id = packet_id self.authenticator = authenticator self.attributes = attributes @classmethod def parse(cls, packet_id, request_authenticator, attributes): return cls(packet_id, request_authenticator, attributes) def pack(self): header = struct.pack("!BBH16s", self.CODE, self.packet_id, RADIUS_HEADER_LENGTH + self.attributes.__len__(), self.authenticator) packed_attributes = self.attributes.pack() self.packed = bytearray(header + packed_attributes) return self.packed def build(self, secret=None): if not self.packed: self.pack() try: position = self.attributes.indexof(MessageAuthenticator.DESCRIPTION) + RADIUS_HEADER_LENGTH + Attribute.HEADER_SIZE except ValueError as err: print(err) return self.packed if secret: message_authenticator = bytearray(hmac.new(secret.encode(), self.packed, 'md5') .digest()) for i in range(16): self.packed[i + position] = message_authenticator[i] return self.packed def validate_packet(self, secret, request_authenticator=None, code=None): radius_packet = copy.deepcopy(self) if not secret: raise ValueError("secret cannot be None for hashing") self.validate_response_authenticator(radius_packet, request_authenticator, secret, code) self.validate_message_authenticator(radius_packet, secret, request_authenticator) return self @staticmethod def validate_response_authenticator(radius_packet, request_authenticator, secret, code): if request_authenticator and code in [Radius.ACCESS_REJECT, Radius.ACCESS_ACCEPT, Radius.ACCESS_CHALLENGE]: response_authenticator = radius_packet.authenticator radius_packet.authenticator = request_authenticator radius_packet.pack() calculated_response_authenticator = hashlib.md5(radius_packet.packed + bytearray(secret, 'utf-8')).digest() if calculated_response_authenticator != response_authenticator: raise InvalidResponseAuthenticatorError( "Original ResponseAuthenticator: '%s', does not match calculated: '%s' %s" % ( response_authenticator, calculated_response_authenticator, binascii.hexlify(radius_packet.packed))) @staticmethod def validate_message_authenticator(radius_packet, secret, request_authenticator): message_authenticator = radius_packet.attributes.find(MessageAuthenticator.DESCRIPTION) if message_authenticator: radius_packet.authenticator = request_authenticator original_ma = message_authenticator.bytes_data message_authenticator.bytes_data = bytes.fromhex( "00000000000000000000000000000000") radius_packet.pack() new_ma = hmac.new(secret.encode(), radius_packet.packed, 'md5').digest() if original_ma != new_ma: raise InvalidMessageAuthenticatorError( "Original Message-Authenticator: '%s', does not match calculated: '%s'" % (binascii.hexlify(original_ma), binascii.hexlify(new_ma))) @register_packet_type_parser class RadiusAccessRequest(RadiusPacket): CODE = Radius.ACCESS_REQUEST @register_packet_type_parser class RadiusAccessAccept(RadiusPacket): CODE = Radius.ACCESS_ACCEPT @register_packet_type_parser class RadiusAccessReject(RadiusPacket): CODE = Radius.ACCESS_REJECT @register_packet_type_parser class RadiusAccessChallenge(RadiusPacket): CODE = Radius.ACCESS_CHALLENGE class RadiusAttributesList: def __init__(self, attributes): self.attributes = attributes @classmethod def parse(cls, attributes_data): attributes = [] attributes_to_concat = {} cls.extract_attributes(attributes_data, attributes, attributes_to_concat) attributes = cls.merge_concat_attributes(attributes, attributes_to_concat) return cls(attributes) @classmethod def merge_concat_attributes(cls, attributes, attributes_to_concat): concatenated_attributes = [] for value, list_ in attributes_to_concat.items(): concatenated_data = b"" for d, i in list_: concatenated_data += d.bytes_data concatenated_attributes.append(tuple((ATTRIBUTE_TYPES[value].parse(concatenated_data), i))) for ca, _ in concatenated_attributes: attributes = [x for x in attributes if x.TYPE != ca.TYPE] for ca, i in concatenated_attributes: attributes.insert(i, ca) return attributes @classmethod
Apache License 2.0
datera/rtslib
rtslib/node.py
CFSNode.list_auth_attrs
python
def list_auth_attrs(self, writable=None): self._check_self() path = "%s/auth" % self.path return self._list_files(path, writable)
@param writable: If None (default), returns all auth attrs, if True, returns read-write auth attrs, if False, returns just the read-only auth attrs. @type writable: bool or None @return: A list of existing attribute names as strings.
https://github.com/datera/rtslib/blob/6fd0bbfc20947143eb2e4c3bfd34c65bf8551468/rtslib/node.py#L160-L170
import os import stat from utils import fread, fwrite, RTSLibError, RTSLibNotInCFS class CFSNode(object): configfs_dir = '/sys/kernel/config/target' alua_metadata_dir = "/var/target/alua/iSCSI" def __init__(self): self._path = self.configfs_dir def __nonzero__(self): if os.path.isdir(self.path): return True else: return False def __str__(self): return self.path def _get_path(self): return self._path def _create_in_cfs_ine(self, mode): if mode not in ['any', 'lookup', 'create']: raise RTSLibError("Invalid mode: %s" % mode) if self and mode == 'create': raise RTSLibError("This %s already exists in configFS." % self.__class__.__name__) elif not self and mode == 'lookup': raise RTSLibNotInCFS("No such %s in configfs: %s." % (self.__class__.__name__, self.path)) if self: self._fresh = False return try: os.mkdir(self.path) self._fresh = True except Exception as e: raise RTSLibError("Could not create %s: %s" % (self.path, e)) def _exists(self): return bool(self) def _check_self(self): if not self: raise RTSLibNotInCFS("This %s does not exist in configFS." % self.__class__.__name__) def _is_fresh(self): return self._fresh def _list_files(self, path, writable=None): if not os.path.isdir(path): return [] if writable is None: names = os.listdir(path) elif writable: names = [name for name in os.listdir(path) if (os.stat("%s/%s" % (path, name))[stat.ST_MODE] & stat.S_IWUSR)] else: names = [os.path.basename(name) for name in os.listdir(path) if not (os.stat("%s/%s" % (path, name))[stat.ST_MODE] & stat.S_IWUSR)] names.sort() return names def list_parameters(self, writable=None): self._check_self() path = "%s/param" % self.path return self._list_files(path, writable) def list_attributes(self, writable=None): self._check_self() path = "%s/attrib" % self.path attributes = self._list_files(path, writable) backstore = getattr(self, "backstore", None) plugin = getattr(backstore, "plugin", None) edited_attributes = [] force_ro_attrs = ["block_size", "emulate_fua_write", "optimal_sectors"] if writable is True and plugin == "pscsi": edited_attributes = [attr for attr in attributes if attr not in force_ro_attrs] elif writable is False and plugin == "pscsi": edited_attributes = list(set(attributes + force_ro_attrs)) else: edited_attributes = attributes return edited_attributes
Apache License 2.0
aparo/pyes
pyes/tests.py
ESTestCase.dump
python
def dump(self, result): pprint(result)
dump to stdout the result
https://github.com/aparo/pyes/blob/96965174760cb5aa5c92eac7ccff346fb5d53cf1/pyes/tests.py#L109-L113
from __future__ import absolute_import import os import logging import unittest from pprint import pprint from pyes.es import ES from pyes.helpers import SettingsBuilder def get_conn(*args, **kwargs): return ES(("http", "127.0.0.1", 9200), *args, **kwargs) DEFAULT_TEST_MAPPING = { u'parsedtext': { 'store': 'true', 'type': u'text', "term_vector": "with_positions_offsets"}, u'name': { 'store': 'true', 'type': u'text', "term_vector": "with_positions_offsets"}, u'title': { 'store': 'true', 'type': u'text', "term_vector": "with_positions_offsets"}, u'pos': { 'store': 'true', 'type': u'integer'}, u'position': { 'store': 'true', 'type': u'integer'}, u'doubles': { 'store': 'true', 'type': u'double'}, u'uuid': { 'store': 'true', 'type': u'keyword'}, u'tag': {'store': 'true', 'type': u'keyword'}, u'array': {'store': 'true', 'type': u'integer'}, u'inserted': {'store': 'true', 'type': u'date'}, u'date': {'store': 'true', 'type': u'date'}, u'resellers': { 'type': 'nested', 'properties': { 'name': {'type': 'text'}, 'price': {'type': 'double'} }}, } class ESTestCase(unittest.TestCase): def setUp(self): self.log = open("/tmp/%s.sh" % self._testMethodName, "wb") self.conn = get_conn(timeout=300.0, log_curl=True, dump_curl=self.log) self.index_name = "test-index" self.document_type = "test-type" self.conn.indices.delete_index_if_exists(self.index_name) def tearDown(self): self.conn.indices.delete_index_if_exists(self.index_name) if self.log: self.log.close() def assertResultContains(self, result, expected): for (key, value) in expected.items(): found = False try: found = value == result[key] except KeyError: if result.has_key('meta'): found = value == result['meta'][key] self.assertEqual(True, found) def checkRaises(self, excClass, callableObj, *args, **kwargs): try: callableObj(*args, **kwargs) except excClass as e: return e else: raise self.failureException("Expected exception %s not raised" % excClass) def get_datafile(self, filename): return open(os.path.join(os.path.dirname(__file__), "data", filename), "rb").read() def get_datafile_path(self, filename): return os.path.join(os.path.dirname(__file__), "data", filename)
BSD 3-Clause New or Revised License
liqunchen0606/graph-optimal-transport
NLP/TexarTransformer/utils/preprocess.py
make_array
python
def make_array(word_id, words): ids = [word_id.get(word, unk_token_id) for word in words] return np.array(ids, 'i')
generate id numpy array from plain text words.
https://github.com/liqunchen0606/graph-optimal-transport/blob/950c31d362afd3993f276a9503a251ea245cb057/NLP/TexarTransformer/utils/preprocess.py#L74-L77
from __future__ import unicode_literals import collections import re import json import os import numpy as np import pickle import argparse from io import open split_pattern = re.compile(r'([.,!?"\':;)(])') digit_pattern = re.compile(r'\d') pad_token_id, bos_token_id, eos_token_id, unk_token_id = 0, 1, 2, 3 def split_sentence(s, tok=False): if tok: s = s.lower() s = s.replace('\u2019', "'") s = digit_pattern.sub('0', s) words = [] for word in s.split(): if tok: words.extend(split_pattern.split(word)) else: words.append(word) words = [w for w in words if w] return words def open_file(path): return open(path, encoding='utf-8') def read_file(path, tok=False): with open_file(path) as f: for line in f.readlines(): words = split_sentence(line.strip(), tok) yield words def count_words(path, max_vocab_size=40000, tok=False): counts = collections.Counter() for words in read_file(path, tok): for word in words: counts[word] += 1 vocab = [word for (word, _) in counts.most_common(max_vocab_size)] return vocab
MIT License
pocket/recommendation-api
app/models/topic.py
TopicModel.get_topic
python
async def get_topic(slug: str) -> Optional['TopicModel']: async with aioboto3.resource('dynamodb', endpoint_url=dynamodb_config['endpoint_url']) as dynamodb: table = await dynamodb.Table(dynamodb_config['metadata']['table']) response = await table.query(IndexName='slug', Limit=1, KeyConditionExpression=Key('slug').eq(slug)) if response['Items']: return TopicModel.parse_obj(response['Items'][0]) raise ValueError('Topic not found')
Retrieves a single topic from dynamo db :param slug: string slug of the topic to be retrieved :return: a TopicModel object
https://github.com/pocket/recommendation-api/blob/7bac8144fd3b896a021cb7e8e443eef5200498fb/app/models/topic.py#L53-L65
import aioboto3 from aws_xray_sdk.core import xray_recorder from boto3.dynamodb.conditions import Key from enum import Enum from pydantic import BaseModel from typing import Optional from app.config import dynamodb as dynamodb_config class PageType(str, Enum): editorial_collection = 'editorial_collection' topic_page = 'topic_page' class TopicModel(BaseModel): id: str display_name: str page_type: PageType slug: str query: str curator_label: str is_displayed: bool is_promoted: bool display_note: str = None social_title: str = None social_description: str = None social_image: str = None custom_feed_id: str = None @staticmethod @xray_recorder.capture_async('models_topic_get_all') async def get_all() -> ['TopicModel']: async with aioboto3.resource('dynamodb', endpoint_url=dynamodb_config['endpoint_url']) as dynamodb: table = await dynamodb.Table(dynamodb_config['metadata']['table']) response = await table.scan() return sorted(list(map(TopicModel.parse_obj, response['Items'])), key=lambda topic: topic.slug) @staticmethod @xray_recorder.capture_async('models_topic_get_topic')
Apache License 2.0
vincent-lg/tsunami
src/secondaires/calendrier/commandes/calendrier/__init__.py
CmdCalendrier.__init__
python
def __init__(self): Commande.__init__(self, "calendrier", "calendar") self.groupe = "administrateur" self.aide_courte = "permet de consulter le calendrier" self.aide_longue = "Cette commande permet de consulter le calendrier et de " "créer des évènements, de les éditer ou commenter."
Constructeur de la commande
https://github.com/vincent-lg/tsunami/blob/36b3b974f6eefbf15cd5d5f099fc14630e66570b/src/secondaires/calendrier/commandes/calendrier/__init__.py#L44-L51
from primaires.interpreteur.commande.commande import Commande from .editer import PrmEditer from .creer import PrmCreer from .detail import PrmDetail from .voir import PrmVoir class CmdCalendrier(Commande):
BSD 3-Clause New or Revised License
jobovy/galpy
galpy/df/jeans.py
sigmar
python
def sigmar(Pot,r,dens=None,beta=0.): Pot= flatten_pot(Pot) if dens is None: dens= lambda r: evaluateDensities(Pot,r*_INVSQRTTWO,r*_INVSQRTTWO, phi=numpy.pi/4., use_physical=False) if callable(beta): intFactor= lambda x: numpy.exp(2.*integrate.quad(lambda y: beta(y)/y, 1.,x)[0]) else: intFactor= lambda x: x**(2.*beta) return numpy.sqrt(integrate.quad(lambda x: -intFactor(x)*dens(x) *evaluaterforces(Pot, x*_INVSQRTTWO, x*_INVSQRTTWO, phi=numpy.pi/4., use_physical=False), r,numpy.inf)[0]/ dens(r)/intFactor(r))
NAME: sigmar PURPOSE: Compute the radial velocity dispersion using the spherical Jeans equation INPUT: Pot - potential or list of potentials (evaluated at R=r/sqrt(2),z=r/sqrt(2), sphericity not checked) r - Galactocentric radius (can be Quantity) dens= (None) tracer density profile (function of r); if None, the density is assumed to be that corresponding to the potential beta= (0.) anisotropy; can be a constant or a function of r OUTPUT: sigma_r(r) HISTORY: 2018-07-05 - Written - Bovy (UofT)
https://github.com/jobovy/galpy/blob/0470fa3e990f44319e9340497f669699d1bf1008/galpy/df/jeans.py#L11-L57
import numpy from scipy import integrate from ..potential.Potential import evaluateDensities, evaluaterforces, evaluateSurfaceDensities from ..potential.Potential import flatten as flatten_pot from ..util.conversion import physical_conversion, potential_physical_input _INVSQRTTWO= 1./numpy.sqrt(2.) @potential_physical_input @physical_conversion('velocity',pop=True)
BSD 3-Clause New or Revised License
timothyb0912/pylogit
tests/test_construct_estimator.py
ConstructorTests.setUp
python
def setUp(self): self.asym_model = self.make_asym_model() self.uneven_model, self.scobit_model = self.make_uneven_and_scobit_models() self.clog_model, self.mnl_model = self.make_clog_and_mnl_models() self.mixed_model = self.make_mixed_model() self.nested_model = self.make_nested_model() return None
Create the real model objects.
https://github.com/timothyb0912/pylogit/blob/cffc9c523b5368966ef2481c7dc30f0a5d296de8/tests/test_construct_estimator.py#L563-L573
import unittest from collections import OrderedDict from copy import deepcopy import numpy as np import numpy.testing as npt import pandas as pd from scipy.sparse import csr_matrix, eye import pylogit.asym_logit as asym import pylogit.conditional_logit as mnl import pylogit.clog_log as clog import pylogit.scobit as scobit import pylogit.uneven_logit as uneven import pylogit.mixed_logit_calcs as mlc import pylogit.mixed_logit as mixed_logit import pylogit.nested_logit as nested_logit import pylogit.construct_estimator as constructor class ConstructorTests(unittest.TestCase): def make_asym_model(self): fake_betas = np.array([-0.6]) fake_intercepts = np.array([1, 0.5]) fake_intercept_names = ["ASC 1", "ASC 2"] fake_intercept_ref_pos = 2 fake_shapes = np.array([-1, 1]) fake_shape_names = ["Shape 1", "Shape 2"] fake_shape_ref_pos = 2 natural_shapes = asym._convert_eta_to_c(fake_shapes, fake_shape_ref_pos) fake_all_params = np.concatenate((fake_shapes, fake_intercepts, fake_betas)) fake_rows_to_obs = csr_matrix(np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1]])) fake_design = np.array([[1], [2], [3], [1.5], [3.5]]) fake_index = fake_design.dot(fake_betas) fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2], "alt_id": [1, 2, 3, 1, 3], "choice": [0, 1, 0, 0, 1], "x": fake_design[:, 0], "intercept": [1 for i in range(5)]}) alt_id_col = "alt_id" obs_id_col = "obs_id" choice_col = "choice" fake_specification = OrderedDict() fake_names = OrderedDict() fake_specification["x"] = [[1, 2, 3]] fake_names["x"] = ["x (generic coefficient)"] constructor_args = [fake_df, alt_id_col, obs_id_col, choice_col, fake_specification] constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos, "shape_ref_pos": fake_shape_ref_pos, "names": fake_names, "intercept_names": fake_intercept_names, "shape_names": fake_shape_names} model_obj = asym.MNAL(*constructor_args, **constructor_kwargs) model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"]) model_obj.intercepts = pd.Series(fake_intercepts, index=fake_intercept_names) model_obj.shapes = pd.Series(fake_shapes, index=fake_shape_names) model_obj.nests = None model_obj.params = pd.concat([model_obj.shapes, model_obj.intercepts, model_obj.coefs], axis=0, ignore_index=False) return model_obj def make_uneven_and_scobit_models(self): fake_betas = np.array([-0.6]) fake_intercepts = np.array([1, 0.5]) fake_intercept_names = ["ASC 1", "ASC 2"] fake_intercept_ref_pos = 2 fake_shapes = np.array([-1, 1, 2]) fake_shape_names = ["Shape 1", "Shape 2", "Shape 3"] fake_all_params = np.concatenate((fake_shapes, fake_intercepts, fake_betas)) fake_rows_to_obs = csr_matrix(np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1]])) fake_design = np.array([[1], [2], [3], [1.5], [3.5]]) fake_index = fake_design.dot(fake_betas) fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2], "alt_id": [1, 2, 3, 1, 3], "choice": [0, 1, 0, 0, 1], "x": fake_design[:, 0], "intercept": [1 for i in range(5)]}) alt_id_col = "alt_id" obs_id_col = "obs_id" choice_col = "choice" fake_specification = OrderedDict() fake_names = OrderedDict() fake_specification["x"] = [[1, 2, 3]] fake_names["x"] = ["x (generic coefficient)"] constructor_args = [fake_df, alt_id_col, obs_id_col, choice_col, fake_specification] constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos, "names": fake_names, "intercept_names": fake_intercept_names, "shape_names": fake_shape_names} uneven_obj = uneven.MNUL(*constructor_args, **constructor_kwargs) scobit_obj = scobit.MNSL(*constructor_args, **constructor_kwargs) for model_obj in [uneven_obj, scobit_obj]: model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"]) model_obj.intercepts = pd.Series(fake_intercepts, index=fake_intercept_names) model_obj.shapes = pd.Series(fake_shapes, index=fake_shape_names) model_obj.nests = None model_obj.params = pd.concat([model_obj.shapes, model_obj.intercepts, model_obj.coefs], axis=0, ignore_index=False) return uneven_obj, scobit_obj def make_clog_and_mnl_models(self): fake_betas = np.array([-0.6]) fake_intercepts = np.array([1, 0.5]) fake_intercept_names = ["ASC 1", "ASC 2"] fake_intercept_ref_pos = 2 fake_all_params = np.concatenate((fake_intercepts, fake_betas)) fake_rows_to_obs = csr_matrix(np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1]])) fake_design = np.array([[1], [2], [3], [1.5], [3.5]]) fake_index = fake_design.dot(fake_betas) fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2], "alt_id": [1, 2, 3, 1, 3], "choice": [0, 1, 0, 0, 1], "x": fake_design[:, 0], "intercept": [1 for i in range(5)]}) alt_id_col = "alt_id" obs_id_col = "obs_id" choice_col = "choice" fake_specification = OrderedDict() fake_names = OrderedDict() fake_specification["x"] = [[1, 2, 3]] fake_names["x"] = ["x (generic coefficient)"] mnl_spec = OrderedDict() mnl_names = OrderedDict() mnl_spec["intercept"] =[1, 2] mnl_names["intercept"] = fake_intercept_names mnl_spec["x"] = fake_specification["x"] mnl_names["x"] = fake_names["x"] clog_args = [fake_df, alt_id_col, obs_id_col, choice_col, fake_specification] mnl_args = deepcopy(clog_args) mnl_args[-1] = mnl_spec clog_kwargs = {"names": fake_names, "intercept_ref_pos": fake_intercept_ref_pos, "intercept_names": fake_intercept_names} mnl_kwargs = {"names": mnl_names} clog_obj = clog.MNCL(*clog_args, **clog_kwargs) mnl_obj = mnl.MNL(*mnl_args, **mnl_kwargs) clog_obj.coefs = pd.Series(fake_betas, index=fake_names["x"]) clog_obj.intercepts = pd.Series(fake_intercepts, index=fake_intercept_names) clog_obj.shapes = None clog_obj.nests = None clog_obj.params = pd.concat([clog_obj.intercepts, clog_obj.coefs], axis=0, ignore_index=False) mnl_obj.params = clog_obj.params.copy() mnl_obj.coefs = mnl_obj.params.copy() mnl_obj.intercepts = None mnl_obj.shapes = None mnl_obj.nests = None return clog_obj, mnl_obj def make_mixed_model(self): fake_draws = mlc.get_normal_draws(2, 2, 1, seed=1)[0] fake_betas = np.array([0.3, -0.6, 0.2]) fake_std = 1 fake_betas_ext = np.concatenate((fake_betas, np.array([fake_std])), axis=0) fake_design = np.array([[1, 0, 1], [0, 1, 2], [0, 0, 3], [1, 0, 1.5], [0, 1, 2.5], [0, 0, 3.5], [1, 0, 0.5], [0, 1, 1.0], [0, 0, 1.5]]) mixing_pos = [2] situation_ids = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]) individual_ids = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2]) alternative_ids = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) choice_array = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0]) obs_1_rows = np.ones(fake_design.shape[0]) obs_1_rows[-3:] = 0 obs_2_rows = 1 - obs_1_rows fake_rows_to_mixers = csr_matrix(obs_1_rows[:, None] == np.array([1, 0])[None, :]) fake_rows_to_obs = csr_matrix(situation_ids[:, None] == np.arange(1, 4)[None, :]) arrays_to_join = (fake_design.copy(), fake_design.copy()[:, -1][:, None]) fake_design_draw_1 = np.concatenate(arrays_to_join, axis=1) fake_design_draw_2 = fake_design_draw_1.copy() fake_design_draw_1[:, -1] *= (obs_1_rows * fake_draws[0, 0] + obs_2_rows * fake_draws[1, 0]) fake_design_draw_2[:, -1] *= (obs_1_rows * fake_draws[0, 1] + obs_2_rows * fake_draws[1, 1]) extended_design_draw_1 = fake_design_draw_1[:, None, :] extended_design_draw_2 = fake_design_draw_2[:, None, :] fake_design_3d = np.concatenate((extended_design_draw_1, extended_design_draw_2), axis=1) sys_utilities_draw_1 = fake_design_draw_1.dot(fake_betas_ext) sys_utilities_draw_2 = fake_design_draw_2.dot(fake_betas_ext) long_exp_draw_1 = np.exp(sys_utilities_draw_1) long_exp_draw_2 = np.exp(sys_utilities_draw_2) ind_exp_sums_draw_1 = fake_rows_to_obs.T.dot(long_exp_draw_1) ind_exp_sums_draw_2 = fake_rows_to_obs.T.dot(long_exp_draw_2) long_exp_sum_draw_1 = fake_rows_to_obs.dot(ind_exp_sums_draw_1) long_exp_sum_draw_2 = fake_rows_to_obs.dot(ind_exp_sums_draw_2) long_probs_draw_1 = long_exp_draw_1 / long_exp_sum_draw_1 long_probs_draw_2 = long_exp_draw_2 / long_exp_sum_draw_2 prob_array = np.concatenate((long_probs_draw_1[:, None], long_probs_draw_2[:, None]), axis=1) alt_id_column = "alt_id" situation_id_column = "situation_id" obs_id_column = "observation_id" choice_column = "choice" data = {"x": fake_design[:, 2], alt_id_column: alternative_ids, situation_id_column: situation_ids, obs_id_column: individual_ids, choice_column: choice_array} fake_old_df = pd.DataFrame(data) fake_old_df["intercept"] = 1 fake_spec = OrderedDict() fake_names = OrderedDict() fake_spec["intercept"] = [1, 2] fake_names["intercept"] = ["ASC 1", "ASC 2"] fake_spec["x"] = [[1, 2, 3]] fake_names["x"] = ["beta_x"] fake_mixing_vars = ["beta_x"] args = [fake_old_df, alt_id_column, situation_id_column, choice_column, fake_spec] kwargs = {"names": fake_names, "mixing_id_col": obs_id_column, "mixing_vars": fake_mixing_vars} mixl_obj = mixed_logit.MixedLogit(*args, **kwargs) mixl_obj.design_3d = fake_design_3d mixl_obj.ind_var_names += ["Sigma X"] mixl_obj.coefs = pd.Series(fake_betas_ext, index=mixl_obj.ind_var_names) mixl_obj.intercepts = None mixl_obj.shapes = None mixl_obj.nests = None mixl_obj.params = mixl_obj.coefs.copy() return mixl_obj def make_nested_model(self): fake_betas = np.array([0.3, -0.6, 0.2]) natural_nest_coefs = np.array([1 - 1e-16, 0.5]) fake_all_params = np.concatenate((natural_nest_coefs, fake_betas)) fake_rows_to_nests = csr_matrix(np.array([[1, 0], [1, 0], [0, 1], [1, 0], [0, 1]])) fake_rows_to_obs = csr_matrix(np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1]])) fake_design = np.array([[1, 0, 1], [0, 1, 2], [0, 0, 3], [1, 0, 1.5], [0, 0, 3.5]]) fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2], "alt_id": [1, 2, 3, 1, 3], "choice": [0, 1, 0, 0, 1], "x": range(5), "intercept": [1 for i in range(5)]}) alt_id_col = "alt_id" obs_id_col = "obs_id" choice_col = "choice" choice_array = fake_df[choice_col].values fake_chosen_rows_to_obs = csr_matrix(np.array([[0, 0], [1, 0], [0, 0], [0, 0], [0, 1]])) fake_specification = OrderedDict() fake_specification["intercept"] = [1, 2] fake_specification["x"] = [[1, 2, 3]] fake_names = OrderedDict() fake_names["intercept"] = ["ASC 1", "ASC 2"] fake_names["x"] = ["x (generic coefficient)"] fake_nest_spec = OrderedDict() fake_nest_spec["Nest 1"] = [1, 2] fake_nest_spec["Nest 2"] = [3] args = [fake_df, alt_id_col, obs_id_col, choice_col, fake_specification] kwargs = {"names": fake_names, "nest_spec": fake_nest_spec} model_obj = nested_logit.NestedLogit(*args, **kwargs) model_obj.coefs = pd.Series(fake_betas, index=model_obj.ind_var_names) model_obj.intercepts = None model_obj.shapes = None def logit(x): return np.log(x / (1 - x)) model_obj.nests = pd.Series(logit(natural_nest_coefs), index=fake_nest_spec.keys()) model_obj.params = pd.concat([model_obj.nests, model_obj.coefs], axis=0, ignore_index=False) return model_obj
BSD 3-Clause New or Revised License
facebookincubator/reindeer
build/fbcode_builder/CMake/make_fbpy_archive.py
build_install_dir
python
def build_install_dir(args, path_map): dest_dir = os.path.dirname(args.output) with tempfile.TemporaryDirectory(prefix="make_fbpy.", dir=dest_dir) as tmpdir: inst_dir = os.path.join(tmpdir, "tree") populate_install_tree(inst_dir, path_map) create_main_module(args, inst_dir, path_map) os.rename(inst_dir, args.output)
Create a directory that contains all of the sources, with a __main__ module to run the program.
https://github.com/facebookincubator/reindeer/blob/d3a70b069cd6774f2be374fa19bea68a3cb6142c/build/fbcode_builder/CMake/make_fbpy_archive.py#L167-L179
import argparse import collections import errno import os import shutil import sys import tempfile import zipapp MANIFEST_SEPARATOR = " :: " MANIFEST_HEADER_V1 = "FBPY_MANIFEST 1\n" class UsageError(Exception): def __init__(self, message): self.message = message def __str__(self): return self.message class BadManifestError(UsageError): def __init__(self, path, line_num, message): full_msg = "%s:%s: %s" % (path, line_num, message) super().__init__(full_msg) self.path = path self.line_num = line_num self.raw_message = message PathInfo = collections.namedtuple( "PathInfo", ("src", "dest", "manifest_path", "manifest_line") ) def parse_manifest(manifest, path_map): bad_prefix = ".." + os.path.sep manifest_dir = os.path.dirname(manifest) with open(manifest, "r") as f: line_num = 1 line = f.readline() if line != MANIFEST_HEADER_V1: raise BadManifestError( manifest, line_num, "Unexpected manifest file header" ) for line in f: line_num += 1 if line.startswith("#"): continue line = line.rstrip("\n") parts = line.split(MANIFEST_SEPARATOR) if len(parts) != 2: msg = "line must be of the form SRC %s DEST" % MANIFEST_SEPARATOR raise BadManifestError(manifest, line_num, msg) src, dest = parts dest = os.path.normpath(dest) if dest.startswith(bad_prefix): msg = "destination path starts with %s: %s" % (bad_prefix, dest) raise BadManifestError(manifest, line_num, msg) if not os.path.isabs(src): src = os.path.normpath(os.path.join(manifest_dir, src)) if dest in path_map: prev_info = path_map[dest] msg = ( "multiple source paths specified for destination " "path %s. Previous source was %s from %s:%s" % ( dest, prev_info.src, prev_info.manifest_path, prev_info.manifest_line, ) ) raise BadManifestError(manifest, line_num, msg) info = PathInfo( src=src, dest=dest, manifest_path=manifest, manifest_line=line_num, ) path_map[dest] = info def populate_install_tree(inst_dir, path_map): os.mkdir(inst_dir) dest_dirs = {"": False} def make_dest_dir(path): if path in dest_dirs: return parent = os.path.dirname(path) make_dest_dir(parent) abs_path = os.path.join(inst_dir, path) os.mkdir(abs_path) dest_dirs[path] = False def install_file(info): dir_name, base_name = os.path.split(info.dest) make_dest_dir(dir_name) if base_name == "__init__.py": dest_dirs[dir_name] = True abs_dest = os.path.join(inst_dir, info.dest) shutil.copy2(info.src, abs_dest) for info in path_map.values(): install_file(info) for dir_path, has_init in dest_dirs.items(): if has_init: continue init_path = os.path.join(inst_dir, dir_path, "__init__.py") with open(init_path, "w"): pass def build_zipapp(args, path_map): dest_dir = os.path.dirname(args.output) with tempfile.TemporaryDirectory(prefix="make_fbpy.", dir=dest_dir) as tmpdir: inst_dir = os.path.join(tmpdir, "tree") populate_install_tree(inst_dir, path_map) tmp_output = os.path.join(tmpdir, "output.exe") zipapp.create_archive( inst_dir, target=tmp_output, interpreter=args.python, main=args.main ) os.replace(tmp_output, args.output) def create_main_module(args, inst_dir, path_map): if not args.main: assert "__main__.py" in path_map return dest_path = os.path.join(inst_dir, "__main__.py") main_module, main_fn = args.main.split(":") main_contents = """\ #!{python} if __name__ == "__main__": import {main_module} {main_module}.{main_fn}() """.format( python=args.python, main_module=main_module, main_fn=main_fn ) with open(dest_path, "w") as f: f.write(main_contents) os.chmod(dest_path, 0o755)
MIT License