repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
blurstudio/cross3d
cross3d/abstract/abstractscenelayer.py
AbstractSceneLayer.currentAltPropSet
python
def currentAltPropSet(self): return self.altPropSetAt(self._altPropIndex)
Retrieve the alternate object property set at the inputed index :rtype: :class:`cross3d.ScenePropSet`
https://github.com/blurstudio/cross3d/blob/277968d1227de740fc87ef61005c75034420eadf/cross3d/abstract/abstractscenelayer.py#L279-L285
import cross3d from cross3d import abstractmethod from abstractcontainer import AbstractContainer class AbstractSceneLayer(AbstractContainer): def __init__(self, scene, nativeLayer): AbstractContainer.__init__(self, scene, nativeLayer) self._altMtlIndex = -1 self._altPropIndex = -1 @abstractmethod def _nativeAltMaterials(self): return [] @abstractmethod def _nativeLayerGroup(self): return None @abstractmethod def _setNativeAltMaterialAt(self, index, nativeMaterial): return False @abstractmethod def _setNativeAltMaterials(self, nativeMaterials): return False @abstractmethod def _setNativeLayerGroup(self, nativeLayerGroup): return False @abstractmethod def _setNativeWireColor(self, nativeColor): return False @abstractmethod def _nativeWireColor(self): return None def addAltMaterial(self, material): altMtls = self.altMaterials() altMtls.append(material) return self.setAltMaterials(altMtls) def addAltPropSet(self, propSet): propSets = self.altPropSets() propSets.append(propSet) return self.setAltPropSets(propSets) @abstractmethod def advancedAltMaterialStateAt(self, index): return {} def altMaterialAt(self, index): mtls = self._nativeAltMaterials() if (0 <= index and index < len(mtls)): mtl = mtls[index] if (mtl): from cross3d import SceneMaterial return SceneMaterial(self._scene, mtl) else: return None return None def altMaterialCount(self): return len(self._nativeAltMaterials()) def altMaterials(self): from cross3d import SceneMaterial output = [] for mtl in self._nativeAltMaterials(): if (mtl): output.append(SceneMaterial(self._scene, mtl)) else: output.append(None) return output @abstractmethod def altMaterialFlags(self): return [] def altMaterialFlagsAt(self, index): flags = self.altMaterialFlags() if (0 <= index and index < len(flags)): return flags[index] return 0 def altPropSetAt(self, index): propsets = self.altPropSets() if (0 <= index and index < len(propsets)): return propsets[index] return None def altPropSetCount(self): return len(self.altPropSets()) @abstractmethod def altPropSets(self): return [] def defineAltMaterialAt(self, index, material): existing = self.altMaterialAt(index) if (not existing): return self.setAltMaterialAt(index, material) return False def defineAltPropSetAt(self, index, propSet): existing = self.altPropSetAt(index) if (not (existing and existing.isActive())): return self.setAltPropSetAt(index, propSet) return False def currentAltMaterialIndex(self): return self._altMtlIndex def currentAltMaterial(self): return self.altMaterialAt(self._altMtlIndex) def currentAltPropSetIndex(self): return self._altPropIndex
MIT License
ai4finance-llc/neofinrl
stable_baselines3/common/off_policy_algorithm.py
OffPolicyAlgorithm._convert_train_freq
python
def _convert_train_freq(self) -> None: if not isinstance(self.train_freq, TrainFreq): train_freq = self.train_freq if not isinstance(train_freq, tuple): train_freq = (train_freq, "step") try: train_freq = (train_freq[0], TrainFrequencyUnit(train_freq[1])) except ValueError: raise ValueError(f"The unit of the `train_freq` must be either 'step' or 'episode' not '{train_freq[1]}'!") if not isinstance(train_freq[0], int): raise ValueError(f"The frequency of `train_freq` must be an integer and not {train_freq[0]}") self.train_freq = TrainFreq(*train_freq)
Convert `train_freq` parameter (int or tuple) to a TrainFreq object.
https://github.com/ai4finance-llc/neofinrl/blob/51338dbb0ec86f74e4fc6cce90bc385a4639de79/stable_baselines3/common/off_policy_algorithm.py#L155-L175
import io import pathlib import time import warnings from typing import Any, Dict, List, Optional, Tuple, Type, Union import gym import numpy as np import torch as th from stable_baselines3.common.base_class import BaseAlgorithm from stable_baselines3.common.buffers import DictReplayBuffer, ReplayBuffer from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.save_util import load_from_pkl, save_to_pkl from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, Schedule, TrainFreq, TrainFrequencyUnit from stable_baselines3.common.utils import safe_mean, should_collect_more_steps from stable_baselines3.common.vec_env import VecEnv from stable_baselines3.her.her_replay_buffer import HerReplayBuffer class OffPolicyAlgorithm(BaseAlgorithm): def __init__( self, policy: Type[BasePolicy], env: Union[GymEnv, str], policy_base: Type[BasePolicy], learning_rate: Union[float, Schedule], buffer_size: int = 1000000, learning_starts: int = 100, batch_size: int = 256, tau: float = 0.005, gamma: float = 0.99, train_freq: Union[int, Tuple[int, str]] = (1, "step"), gradient_steps: int = 1, action_noise: Optional[ActionNoise] = None, replay_buffer_class: Optional[ReplayBuffer] = None, replay_buffer_kwargs: Optional[Dict[str, Any]] = None, optimize_memory_usage: bool = False, policy_kwargs: Optional[Dict[str, Any]] = None, tensorboard_log: Optional[str] = None, verbose: int = 0, device: Union[th.device, str] = "auto", support_multi_env: bool = False, create_eval_env: bool = False, monitor_wrapper: bool = True, seed: Optional[int] = None, use_sde: bool = False, sde_sample_freq: int = -1, use_sde_at_warmup: bool = False, sde_support: bool = True, remove_time_limit_termination: bool = False, supported_action_spaces: Optional[Tuple[gym.spaces.Space, ...]] = None, ): super(OffPolicyAlgorithm, self).__init__( policy=policy, env=env, policy_base=policy_base, learning_rate=learning_rate, policy_kwargs=policy_kwargs, tensorboard_log=tensorboard_log, verbose=verbose, device=device, support_multi_env=support_multi_env, create_eval_env=create_eval_env, monitor_wrapper=monitor_wrapper, seed=seed, use_sde=use_sde, sde_sample_freq=sde_sample_freq, supported_action_spaces=supported_action_spaces, ) self.buffer_size = buffer_size self.batch_size = batch_size self.learning_starts = learning_starts self.tau = tau self.gamma = gamma self.gradient_steps = gradient_steps self.action_noise = action_noise self.optimize_memory_usage = optimize_memory_usage self.replay_buffer_class = replay_buffer_class if replay_buffer_kwargs is None: replay_buffer_kwargs = {} self.replay_buffer_kwargs = replay_buffer_kwargs self._episode_storage = None self.remove_time_limit_termination = remove_time_limit_termination self.train_freq = train_freq self.actor = None self.replay_buffer = None if sde_support: self.policy_kwargs["use_sde"] = self.use_sde self.use_sde_at_warmup = use_sde_at_warmup
MIT License
mozilla/relman-auto-nag
auto_nag/bzcleaner.py
BzCleaner.get_bz_params
python
def get_bz_params(self, date): return {}
Get the Bugzilla parameters for the search query
https://github.com/mozilla/relman-auto-nag/blob/d9729ce52abdc90a377ce038cc49b10c87bd384a/auto_nag/bzcleaner.py#L153-L155
import argparse import os import sys import time import six from dateutil.relativedelta import relativedelta from jinja2 import Environment, FileSystemLoader from libmozdata import utils as lmdutils from libmozdata.bugzilla import Bugzilla from auto_nag import db, logger, mail, utils from auto_nag.cache import Cache from auto_nag.nag_me import Nag class BzCleaner(object): def __init__(self): super(BzCleaner, self).__init__() self._set_tool_name() self.has_autofix = False self.no_manager = set() self.auto_needinfo = {} self.has_flags = False self.cache = Cache(self.name(), self.max_days_in_cache()) self.test_mode = utils.get_config("common", "test", False) self.versions = None logger.info("Run tool {}".format(self.get_tool_path())) def _set_tool_name(self): module = sys.modules[self.__class__.__module__] base = os.path.dirname(__file__) scripts = os.path.join(base, "scripts") self.__tool_path__ = os.path.relpath(module.__file__, scripts) name = os.path.basename(module.__file__) name = os.path.splitext(name)[0] self.__tool_name__ = name def init_versions(self): self.versions = utils.get_checked_versions() return bool(self.versions) def max_days_in_cache(self): return self.get_config("max_days_in_cache", -1) def preamble(self): return None def description(self): return "" def name(self): return self.__tool_name__ def get_tool_path(self): return self.__tool_path__ def needinfo_template(self): return self.name() + "_needinfo.txt" def template(self): return self.name() + ".html" def subject(self): return self.description() def get_email_subject(self, date): af = "[autofix]" if self.has_autofix else "" if date: return "[autonag]{} {} for the {}".format(af, self.subject(), date) return "[autonag]{} {}".format(af, self.subject()) def ignore_date(self): return False def must_run(self, date): return True def has_enough_data(self): if self.versions is None: return True return bool(self.versions) def filter_no_nag_keyword(self): return True def add_no_manager(self, bugid): self.no_manager.add(str(bugid)) def has_assignee(self): return False def has_needinfo(self): return False def get_mail_to_auto_ni(self, bug): return None def all_include_fields(self): return False def get_max_ni(self): return -1 def ignore_meta(self): return False def columns(self): return ["id", "summary"] def sort_columns(self): return None def get_dates(self, date): date = lmdutils.get_date_ymd(date) lookup = self.get_config("days_lookup", 7) start_date = date - relativedelta(days=lookup) end_date = date + relativedelta(days=1) return start_date, end_date def get_extra_for_template(self): return {} def get_extra_for_needinfo_template(self): return {} def get_config(self, entry, default=None): return utils.get_config(self.name(), entry, default=default)
BSD 3-Clause New or Revised License
cpwood/pico-stub
dist/micropy-cli/frozen/uio.py
_IOBase.write
python
def write(self, b: Any) -> int: ...
Writes the given bytes-like object, ``b``, to the underlying raw stream, and returns the number of bytes written. :param b: Bytes-like object to write in the stream. :return: The number of bytes written.
https://github.com/cpwood/pico-stub/blob/176af2962b4701805c81afed2e540d39e1adad82/dist/micropy-cli/frozen/uio.py#L93-L102
from typing import Any, List def open(file: str, *, mode: str="r", buffering: int =-1, encoding: str=None) -> Any: ... class _IOBase(object): def read(self, size: int=-1) -> bytes: ... def readinto(self, b: Any) -> int: ... def readline(self, size: int=-1) -> bytes: ...
Apache License 2.0
brython-dev/brython
www/src/Lib/_pydecimal.py
Decimal._isnan
python
def _isnan(self): if self._is_special: exp = self._exp if exp == 'n': return 1 elif exp == 'N': return 2 return 0
Returns whether the number is not actually one. 0 if a number 1 if NaN 2 if sNaN
https://github.com/brython-dev/brython/blob/33aeaab551f1b73209326c5a0aecf98642d4c126/www/src/Lib/_pydecimal.py#L717-L730
__all__ = [ 'Decimal', 'Context', 'DecimalTuple', 'DefaultContext', 'BasicContext', 'ExtendedContext', 'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero', 'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow', 'FloatOperation', 'DivisionImpossible', 'InvalidContext', 'ConversionSyntax', 'DivisionUndefined', 'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING', 'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP', 'setcontext', 'getcontext', 'localcontext', 'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY', 'HAVE_THREADS', 'HAVE_CONTEXTVAR' ] __xname__ = __name__ __name__ = 'decimal' __version__ = '1.70' __libmpdec_version__ = "2.4.2" import math as _math import numbers as _numbers import sys try: from collections import namedtuple as _namedtuple DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent') except ImportError: DecimalTuple = lambda *args: args ROUND_DOWN = 'ROUND_DOWN' ROUND_HALF_UP = 'ROUND_HALF_UP' ROUND_HALF_EVEN = 'ROUND_HALF_EVEN' ROUND_CEILING = 'ROUND_CEILING' ROUND_FLOOR = 'ROUND_FLOOR' ROUND_UP = 'ROUND_UP' ROUND_HALF_DOWN = 'ROUND_HALF_DOWN' ROUND_05UP = 'ROUND_05UP' HAVE_THREADS = True HAVE_CONTEXTVAR = True if sys.maxsize == 2**63-1: MAX_PREC = 999999999999999999 MAX_EMAX = 999999999999999999 MIN_EMIN = -999999999999999999 else: MAX_PREC = 425000000 MAX_EMAX = 425000000 MIN_EMIN = -425000000 MIN_ETINY = MIN_EMIN - (MAX_PREC-1) class DecimalException(ArithmeticError): def handle(self, context, *args): pass class Clamped(DecimalException): class InvalidOperation(DecimalException): def handle(self, context, *args): if args: ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True) return ans._fix_nan(context) return _NaN class ConversionSyntax(InvalidOperation): def handle(self, context, *args): return _NaN class DivisionByZero(DecimalException, ZeroDivisionError): def handle(self, context, sign, *args): return _SignedInfinity[sign] class DivisionImpossible(InvalidOperation): def handle(self, context, *args): return _NaN class DivisionUndefined(InvalidOperation, ZeroDivisionError): def handle(self, context, *args): return _NaN class Inexact(DecimalException): class InvalidContext(InvalidOperation): def handle(self, context, *args): return _NaN class Rounded(DecimalException): class Subnormal(DecimalException): class Overflow(Inexact, Rounded): def handle(self, context, sign, *args): if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_HALF_DOWN, ROUND_UP): return _SignedInfinity[sign] if sign == 0: if context.rounding == ROUND_CEILING: return _SignedInfinity[sign] return _dec_from_triple(sign, '9'*context.prec, context.Emax-context.prec+1) if sign == 1: if context.rounding == ROUND_FLOOR: return _SignedInfinity[sign] return _dec_from_triple(sign, '9'*context.prec, context.Emax-context.prec+1) class Underflow(Inexact, Rounded, Subnormal): class FloatOperation(DecimalException, TypeError): _signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded, Underflow, InvalidOperation, Subnormal, FloatOperation] _condition_map = {ConversionSyntax:InvalidOperation, DivisionImpossible:InvalidOperation, DivisionUndefined:InvalidOperation, InvalidContext:InvalidOperation} _rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING, ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP) import contextvars _current_context_var = contextvars.ContextVar('decimal_context') def getcontext(): try: return _current_context_var.get() except LookupError: context = Context() _current_context_var.set(context) return context def setcontext(context): if context in (DefaultContext, BasicContext, ExtendedContext): context = context.copy() context.clear_flags() _current_context_var.set(context) del contextvars def localcontext(ctx=None): if ctx is None: ctx = getcontext() return _ContextManager(ctx) class Decimal(object): __slots__ = ('_exp','_int','_sign', '_is_special') def __new__(cls, value="0", context=None): self = object.__new__(cls) if isinstance(value, str): m = _parser(value.strip().replace("_", "")) if m is None: if context is None: context = getcontext() return context._raise_error(ConversionSyntax, "Invalid literal for Decimal: %r" % value) if m.group('sign') == "-": self._sign = 1 else: self._sign = 0 intpart = m.group('int') if intpart is not None: fracpart = m.group('frac') or '' exp = int(m.group('exp') or '0') self._int = str(int(intpart+fracpart)) self._exp = exp - len(fracpart) self._is_special = False else: diag = m.group('diag') if diag is not None: self._int = str(int(diag or '0')).lstrip('0') if m.group('signal'): self._exp = 'N' else: self._exp = 'n' else: self._int = '0' self._exp = 'F' self._is_special = True return self if isinstance(value, int): if value >= 0: self._sign = 0 else: self._sign = 1 self._exp = 0 self._int = str(abs(value)) self._is_special = False return self if isinstance(value, Decimal): self._exp = value._exp self._sign = value._sign self._int = value._int self._is_special = value._is_special return self if isinstance(value, _WorkRep): self._sign = value.sign self._int = str(value.int) self._exp = int(value.exp) self._is_special = False return self if isinstance(value, (list,tuple)): if len(value) != 3: raise ValueError('Invalid tuple size in creation of Decimal ' 'from list or tuple. The list or tuple ' 'should have exactly three elements.') if not (isinstance(value[0], int) and value[0] in (0,1)): raise ValueError("Invalid sign. The first value in the tuple " "should be an integer; either 0 for a " "positive number or 1 for a negative number.") self._sign = value[0] if value[2] == 'F': self._int = '0' self._exp = value[2] self._is_special = True else: digits = [] for digit in value[1]: if isinstance(digit, int) and 0 <= digit <= 9: if digits or digit != 0: digits.append(digit) else: raise ValueError("The second value in the tuple must " "be composed of integers in the range " "0 through 9.") if value[2] in ('n', 'N'): self._int = ''.join(map(str, digits)) self._exp = value[2] self._is_special = True elif isinstance(value[2], int): self._int = ''.join(map(str, digits or [0])) self._exp = value[2] self._is_special = False else: raise ValueError("The third value in the tuple must " "be an integer, or one of the " "strings 'F', 'n', 'N'.") return self if isinstance(value, float): if context is None: context = getcontext() context._raise_error(FloatOperation, "strict semantics for mixing floats and Decimals are " "enabled") value = Decimal.from_float(value) self._exp = value._exp self._sign = value._sign self._int = value._int self._is_special = value._is_special return self raise TypeError("Cannot convert %r to Decimal" % value) @classmethod def from_float(cls, f): if isinstance(f, int): sign = 0 if f >= 0 else 1 k = 0 coeff = str(abs(f)) elif isinstance(f, float): if _math.isinf(f) or _math.isnan(f): return cls(repr(f)) if _math.copysign(1.0, f) == 1.0: sign = 0 else: sign = 1 n, d = abs(f).as_integer_ratio() k = d.bit_length() - 1 coeff = str(n*5**k) else: raise TypeError("argument must be int or float.") result = _dec_from_triple(sign, coeff, -k) if cls is Decimal: return result else: return cls(result)
BSD 3-Clause New or Revised License
ikostrikov/pytorch-flows
datasets/util.py
isposint
python
def isposint(n): return isinstance(n, int) and n > 0
Determines whether number n is a positive integer. :param n: number :return: bool
https://github.com/ikostrikov/pytorch-flows/blob/bf12ed91b86867b38d74982f5e2d44c248604df9/datasets/util.py#L9-L15
import os import pickle as pickle import matplotlib.pyplot as plt import numpy as np import numpy.random as rng
MIT License
nextcord/nextcord
examples/basic_voice.py
Music.join
python
async def join(self, ctx, *, channel: nextcord.VoiceChannel): if ctx.voice_client is not None: return await ctx.voice_client.move_to(channel) await channel.connect()
Joins a voice channel
https://github.com/nextcord/nextcord/blob/5b2c64cf4fd0e593f032ec6c2465682e9b67f767/examples/basic_voice.py#L59-L65
import asyncio import nextcord import youtube_dl from nextcord.ext import commands youtube_dl.utils.bug_reports_message = lambda: '' ytdl_format_options = { 'format': 'bestaudio/best', 'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s', 'restrictfilenames': True, 'noplaylist': True, 'nocheckcertificate': True, 'ignoreerrors': False, 'logtostderr': False, 'quiet': True, 'no_warnings': True, 'default_search': 'auto', 'source_address': '0.0.0.0' } ffmpeg_options = { 'options': '-vn' } ytdl = youtube_dl.YoutubeDL(ytdl_format_options) class YTDLSource(nextcord.PCMVolumeTransformer): def __init__(self, source, *, data, volume=0.5): super().__init__(source, volume) self.data = data self.title = data.get('title') self.url = data.get('url') @classmethod async def from_url(cls, url, *, loop=None, stream=False): loop = loop or asyncio.get_event_loop() data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream)) if 'entries' in data: data = data['entries'][0] filename = data['url'] if stream else ytdl.prepare_filename(data) return cls(nextcord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data) class Music(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command()
MIT License
llvm-mirror/zorg
zorg/jenkins/build.py
should_exclude
python
def should_exclude(base_path, repo_path): if base_path == repo_path: return False if not base_path: return True if repo_path.startswith(base_path + "/"): return True return False
Check wither a repo should be excluded in a given rsync
https://github.com/llvm-mirror/zorg/blob/b78b0c96bff39702d901a22a4198dfa7f02e9907/zorg/jenkins/build.py#L728-L736
import sys import logging import os import subprocess import datetime import time import argparse import shutil import math import re import xml.etree.ElementTree as ET from contextlib import contextmanager from urllib2 import urlopen, URLError, HTTPError SERVER = "labmaster2.lab.llvm.org" NINJA = "/usr/local/bin/ninja" here = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.abspath(here + "/../../dep/")) import dep def readme_name(repo): if repo == "libcxx": return "LICENSE.TXT" return "README.txt" def next_section(name): footer() header(name) def header(name): print "@@@", name, "@@@" def footer(): print "Completed at: " + time.strftime("%FT%T") print "@@@@@@" def quote_sh_string(string): return "\\'".join("'" + p + "'" for p in string.split("'")) class Configuration(object): def __init__(self, args): super(Configuration, self).__init__() self._args = args self.workspace = os.environ.get('WORKSPACE', os.getcwd()) self._src_dir = os.environ.get('SRC_DIR', 'llvm') self._lldb_src_dir = os.environ.get('LLDB_SRC_DIR', 'lldb') self._build_dir = os.environ.get('BUILD_DIR', 'clang-build') self._lldb_build_dir = os.environ.get('LLDB_BUILD_DIR', 'lldb-build') self._install_dir = os.environ.get('INSTALL_DIR', 'clang-install') self.j_level = os.environ.get('J_LEVEL', None) self.max_parallel_tests = os.environ.get('MAX_PARALLEL_TESTS', None) self.max_parallel_links = os.environ.get('MAX_PARALLEL_LINKS', None) self.host_compiler_url = os.environ.get('HOST_URL', 'http://labmaster2.local/artifacts/') self.artifact_url = os.environ.get('ARTIFACT', 'NONE') self.job_name = os.environ.get('JOB_NAME', 'NONE') self.build_id = os.environ.get('BUILD_ID', 'NONE') self.build_number = os.environ.get('BUILD_NUMBER', 'NONE') self.svn_rev = os.environ.get('LLVM_REV', 'NONE') self.nobootstrap = True self.device = None self._svn_url_cache = None self.node_name = os.environ.get('NODE_NAME', None) self.lldb_test_archs = os.environ.get('LLDB_TEST_ARCHS', 'x86_64').split() self.__dict__.update(vars(args)) def builddir(self): return os.path.join(self.workspace, self._build_dir) def srcdir(self): return os.path.join(self.workspace, self._src_dir) def lldbbuilddir(self): return os.path.join(self.workspace, self._lldb_build_dir) def lldbsrcdir(self): return os.path.join(self.workspace, self._lldb_src_dir) def installdir(self): return os.path.join(self.workspace, self._install_dir) def CC(self): cc_basedir = os.path.join(self.workspace, 'host-compiler/') if os.path.exists(cc_basedir): clang_exec_path = os.path.join(cc_basedir, 'bin/clang') assert os.path.exists(clang_exec_path), "host-compiler present," " but has no clang executable." return clang_exec_path else: return False def liblto(self): cc_basedir = os.path.join(self.workspace, 'host-compiler/') if os.path.exists(cc_basedir): clang_liblto_path = os.path.join(cc_basedir, 'lib/') assert os.path.exists(clang_liblto_path), "host-compiler present," " but has no liblto." return clang_liblto_path else: return False def branch(self): try: return os.environ['BRANCH'] except: assert self._svn_url is not None BRANCH_MARKER = "/branches/" if BRANCH_MARKER in self._svn_url: wo_branch = self._svn_url.split(BRANCH_MARKER, 1)[1] branch = wo_branch.rsplit("@", 1)[0] return branch else: return "master" @property def _svn_url(self): if self._svn_url_cache: return self._svn_url_cache svn_url = os.environ.get('SVN_URL', os.environ.get('SVN_URL_1', None)) if svn_url is None: svn_url = self.grab_svn_url() self._svn_url_cache = svn_url return svn_url def grab_svn_url(self): if os.environ.get('TESTING', False): return '/foo/workspace/llvm.src' cmd = ['svn', 'info', '--xml', os.path.join(self.workspace, 'llvm.src')] out = run_collect_output(cmd) x = ET.fromstring(out) url = x.find('entry').find('url').text return url def link_memory_usage(self): usages = {'master': 3.5} if self.branch() == 'master': return usages['master'] else: raise NotImplementedError( "Unknown link memory usage." + self.branch()) conf = None def update_svn_checkout(working_dir): next_section("SVN upgrade") out = "" try: run_collect_output(["/usr/bin/xcrun", "svn", "upgrade"], working_dir=working_dir) except subprocess.CalledProcessError as e: msg = """Process return code: {}\n The working path was: {}\n The error was: {}.\n""" msg = msg.format(e.returncode, working_dir, out) print msg def cmake_builder(target): check_repo_state(conf.workspace) if not os.getenv("TESTING"): dep.parse_dependencies([here + "/clang_build_dependencies.dep"]) env = [] dyld_path = "" if conf.lto and conf.liblto(): dyld_path = conf.liblto() env.extend(["env", "DYLD_LIBRARY_PATH=" + dyld_path]) cmake_cmd = env + ["/usr/local/bin/cmake", "-G", "Ninja", '-DCMAKE_MAKE_PROGRAM=' + NINJA, "-DCMAKE_INSTALL_PREFIX=" + conf.installdir(), conf.srcdir()] compiler_flags = conf.compiler_flags max_parallel_links = conf.max_parallel_links if conf.lto: if conf.thinlto: cmake_cmd += ["-DLLVM_PARALLEL_LINK_JOBS=1"] else: cmake_cmd += ["-DLLVM_PARALLEL_LINK_JOBS=" + str(max_link_jobs())] cmake_cmd += ['-DLLVM_BUILD_EXAMPLES=Off'] if not max_parallel_links: max_parallel_links = 1 if dyld_path: cmake_cmd += ['-DDYLD_LIBRARY_PATH=' + dyld_path] else: cmake_cmd += ['-DLLVM_ENABLE_LTO=Off'] cmake_cmd += ['-DLLVM_BUILD_EXAMPLES=On'] cmake_cmd += ["-DCMAKE_MACOSX_RPATH=On"] libtool_path = query_sys_tool("macosx", "libtool") if libtool_path: cmake_cmd += ['-DCMAKE_LIBTOOL=' + libtool_path] if compiler_flags: cmake_cmd += ["-DCMAKE_C_FLAGS={}".format(' '.join(compiler_flags)), "-DCMAKE_CXX_FLAGS={}".format(' '.join(compiler_flags))] if max_parallel_links is not None: cmake_cmd += ["-DLLVM_PARALLEL_LINK_JOBS={}".format(max_parallel_links)] if conf.CC(): cmake_cmd += ['-DCMAKE_C_COMPILER=' + conf.CC(), '-DCMAKE_CXX_COMPILER=' + conf.CC() + "++"] if conf.cmake_build_type: cmake_cmd += ["-DCMAKE_BUILD_TYPE=" + conf.cmake_build_type] elif conf.debug: cmake_cmd += ["-DCMAKE_BUILD_TYPE=Debug"] else: cmake_cmd += ["-DCMAKE_BUILD_TYPE=Release"] cmake_cmd += ["-DLLVM_BUILD_EXTERNAL_COMPILER_RT=On"] for flag in conf.cmake_flags: cmake_cmd += [flag] if conf.assertions: cmake_cmd += ["-DLLVM_ENABLE_ASSERTIONS=On"] else: cmake_cmd += ["-DLLVM_ENABLE_ASSERTIONS=Off"] if conf.globalisel: cmake_cmd += ["-DLLVM_BUILD_GLOBAL_ISEL=ON"] if conf.svn_rev != 'NONE': cmake_cmd += ["-DSVN_REVISION={}".format(conf.svn_rev)] lit_flags = ['--xunit-xml-output=testresults.xunit.xml', '-v', '--timeout=600'] if conf.max_parallel_tests: lit_flags += ['-j', conf.max_parallel_tests] cmake_cmd += ['-DLLVM_LIT_ARGS={}'.format(' '.join(lit_flags))] ninja_cmd = env + ["/usr/local/bin/ninja", '-v'] if conf.j_level is not None: ninja_cmd += ["-j", conf.j_level] if target == 'all' or target == 'build': header("Cmake") run_cmd(conf.builddir(), cmake_cmd) footer() header("Ninja build") passed_target = conf.cmake_build_targets build_target = passed_target if passed_target else ['all'] run_cmd(conf.builddir(), ninja_cmd + build_target) footer() if conf.noinstall: header("Skip install") else: header("Ninja install") run_cmd(conf.builddir(), ninja_cmd + ['install']) build_upload_artifact() footer() ninja_cmd.extend(['-k', '0']) if target == 'all' or target == 'test' or target == 'testlong': header("Ninja test") targets = [ 'check-all'] if target == 'testlong' or target == 'all' else conf.cmake_test_targets if not targets: targets = ['check', 'check-clang'] run_cmd(conf.builddir(), ninja_cmd + targets) footer() def clang_builder(target): check_repo_state(conf.workspace) run_ws(['sh', '-c', 'rm -rfv *gz']) if target == "all" or target == "build": run_ws(['rm', '-rf', 'clang.roots']) debug_src_dir = 'debuginfo-tests.src' sdk_name = 'macosx' sdkroot = query_sdk_path(sdk_name) libtool_path = query_sys_tool(sdk_name, "libtool") next_section("Setup debug-info tests") run_ws(['rm', '-rf', 'llvm/tools/clang/test/debuginfo-tests']) run_cmd(os.path.join(conf.workspace, 'llvm/tools/clang/test'), ['ln', '-sf', os.path.join(conf.workspace, debug_src_dir), 'debuginfo-tests']) project = 'clang' clang_br = os.path.join(conf.workspace, conf._build_dir) next_section("Build Directory") run_ws(["mkdir", "-p", clang_br]) toolchain = '/Applications/Xcode.app/Contents/Developer' '/Toolchains/XcodeDefault.xctoolchain' env = [] dyld_path = "" if conf.lto and conf.liblto(): dyld_path = conf.liblto() env.extend(["env", "DYLD_LIBRARY_PATH=" + dyld_path]) next_section("Build Clang") if conf.nobootstrap: if conf.debug or conf.device: assert False, "Invalid parameter for clang-builder." run_cmd(clang_br, ['mkdir', './Build', './Root']) install_prefix = conf.installdir() cmake_cachefile_thinlto = '' if conf.thinlto: cmake_cachefile_thinlto = '-ThinLTO' cmake_cachefile = '{}/llvm/tools/clang/cmake/caches/Apple-stage2{}.cmake'.format( conf.workspace, cmake_cachefile_thinlto) cmake_command = env + ["/usr/local/bin/cmake", '-G', 'Ninja', '-C', cmake_cachefile, '-DLLVM_ENABLE_ASSERTIONS:BOOL={}'.format( "TRUE" if conf.assertions else "FALSE"), '-DCMAKE_BUILD_TYPE=RelWithDebInfo', '-DCMAKE_MAKE_PROGRAM=' + NINJA, '-DLLVM_VERSION_PATCH=99', '-DLLVM_VERSION_SUFFIX=""', '-DLLVM_BUILD_EXTERNAL_COMPILER_RT=On', '-DCLANG_COMPILER_RT_CMAKE_ARGS={}/llvm/projects/compiler-rt/cmake/caches/Apple.cmake'.format( conf.workspace), '-DCOMPILER_RT_BUILD_SANITIZERS=On', '-DCMAKE_INSTALL_PREFIX={}'.format( install_prefix), '-DLLVM_REPOSITORY={}'.format(conf._svn_url), '-DCLANG_REPOSITORY_STRING={}'.format( conf.branch()), '-DCLANG_APPEND_VC_REV=On', '-DSVN_REVISION={}'.format(conf.svn_rev), '-DLLVM_BUILD_TESTS=On', '-DLLVM_INCLUDE_TESTS=On', '-DCLANG_INCLUDE_TESTS=On', '-DLLVM_INCLUDE_UTILS=On', '-DLIBCXX_INSTALL_HEADERS=On', '-DLIBCXX_OVERRIDE_DARWIN_INSTALL=On', '-DLIBCXX_INSTALL_LIBRARY=Off', '-DCMAKE_MACOSX_RPATH=On', ] if dyld_path: cmake_command += ['-DDYLD_LIBRARY_PATH=' + dyld_path] if libtool_path: cmake_command += ['-DCMAKE_LIBTOOL=' + libtool_path] if conf.CC(): cmake_command.extend(['-DCMAKE_C_COMPILER=' + conf.CC(), '-DCMAKE_CXX_COMPILER=' + conf.CC() + "++"]) lit_flags = ['--xunit-xml-output=testresults.xunit.xml', '-v', '--timeout=600'] if conf.max_parallel_tests: lit_flags += ['-j', conf.max_parallel_tests] cmake_command.extend( ['-DLLVM_LIT_ARGS={}'.format(' '.join(lit_flags))]) if conf.thinlto: cmake_command.extend(["-DLLVM_PARALLEL_LINK_JOBS=1"]) elif conf.lto: cmake_command.extend( ["-DLLVM_PARALLEL_LINK_JOBS=" + str(max_link_jobs())]) else: cmake_command.extend(['-DLLVM_ENABLE_LTO=Off']) cmake_command.extend([ '-DCMAKE_C_FLAGS_RELWITHDEBINFO:STRING=-O2 -gline-tables-only -DNDEBUG', '-DCMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -gline-tables-only -DNDEBUG']) for flag in conf.cmake_flags: cmake_command += [flag] cmake_command.append("{}/llvm".format(conf.workspace)) run_cmd(os.path.join(clang_br, 'Build'), cmake_command) next_section("Ninja") run_cmd(os.path.join(clang_br, 'Build'), [NINJA, '-v', 'install']) build_upload_artifact() else: print 'Stage two compile TBD in near future' if not conf.device and (target == "test" or target == "all"): next_section("Tests") obj_dir = os.path.join(conf._build_dir, 'Objects/obj-llvm/tools/clang/stage2-bins/') if not os.path.exists(obj_dir): obj_dir = os.path.join(conf._build_dir, 'Build/') obj_dir = os.path.join(conf.workspace, obj_dir) cmd = [NINJA, '-v', '-k', '0', 'check-all'] if conf.assertions: cmd[-1] += ' --param use_gmalloc=1 ' '--param gmalloc_path=$(xcodebuild -find-library' ' libgmalloc.dylib)' run_cmd(obj_dir, cmd, env={'MALLOC_LOG_FILE': '/dev/null'}) def parse_settings_from_output(working_dir, cmd): old_dir = os.getcwd() try: os.chdir(working_dir) assignment_regex = re.compile(r"^\s+([^\s=]+)\s*=\s*(.+)$") settings = {} for line in subprocess.check_output(cmd).splitlines(True): match = assignment_regex.match(line) if match: settings[match.group(1)] = match.group(2) return settings finally: os.chdir(old_dir) def lldb_builder(): header("Clean LLDB build directory") if os.path.exists(conf.lldbbuilddir()): shutil.rmtree(conf.lldbbuilddir()) footer() build_configuration = "Release" xcodebuild_cmd = [ "xcodebuild", "-arch", "x86_64", "-configuration", build_configuration, "-scheme", "desktop", "-derivedDataPath", conf.lldbbuilddir() ] header("Build Xcode desktop scheme") run_cmd("lldb", xcodebuild_cmd) footer() header("Gather Xcode build settings") xcodebuild_cmd.append("-showBuildSettings") settings = parse_settings_from_output("lldb", xcodebuild_cmd) footer() build_dir = settings.get("BUILD_DIR", None) built_products_dir = settings.get("BUILT_PRODUCTS_DIR", None) if build_dir is None or built_products_dir is None: raise Exception("failed to retrieve build-related directories " "from Xcode") llvm_build_dir = settings.get("LLVM_BUILD_DIR", None) llvm_build_dir_arch = settings.get("LLVM_BUILD_DIR_ARCH", None) if llvm_build_dir is None or llvm_build_dir_arch is None: raise Exception("failed to retrieve LLVM build-related settings " "from Xcode") llvm_build_bin_dir = os.path.join(llvm_build_dir, llvm_build_dir_arch, "bin") built_clang_path = os.path.join(llvm_build_bin_dir, "clang") built_filecheck_path = os.path.join(llvm_build_bin_dir, "FileCheck") effective_clang = os.environ.get("LLDB_PYTHON_TESTSUITE_CC", built_clang_path) xcodebuild_cmd = [ "xcodebuild", "-arch", "x86_64", "-configuration", build_configuration, "-scheme", "lldb-gtest", "-derivedDataPath", conf.lldbbuilddir(), ] header("Build Xcode lldb-gtest scheme") run_cmd("lldb", xcodebuild_cmd) footer() for arch in conf.lldb_test_archs: results_file = os.path.join(build_dir, "test-results-{}.xml".format(arch)) python_testsuite_cmd = [ "/usr/bin/python", "test/dotest.py", "--executable", os.path.join(built_products_dir, "lldb"), "-C", effective_clang, "--arch", arch, "--results-formatter", "lldbsuite.test_event.formatter.xunit.XunitFormatter", "--results-file", results_file, "--rerun-all-issues", "--env", "TERM=vt100", "-O--xpass=ignore", "--dsymutil="+os.path.join(os.path.dirname(effective_clang), 'dsymutil'), "--filecheck="+built_filecheck_path ] header("Run LLDB Python-based test suite ({} targets)".format(arch)) print repr(python_testsuite_cmd) run_cmd_errors_okay("lldb", python_testsuite_cmd) footer() def lldb_cmake_builder(): test_dir = os.path.join(conf.workspace, 'test') log_dir = os.path.join(test_dir, 'logs') results_file = os.path.join(test_dir, 'results.xml') dest_dir = os.path.join(conf.workspace, 'results', 'lldb') run_ws(["mkdir", "-p", conf.lldbbuilddir()]) cmake_build_type = conf.cmake_build_type if conf.cmake_build_type else 'RelWithDebInfo' header("Configure") dotest_args=['--arch', 'x86_64', '--build-dir', conf.lldbbuilddir()+'/lldb-test-build.noindex', '-s='+log_dir, '-t', '--env', 'TERM=vt100'] dotest_args.extend(conf.dotest_flags) cmake_cmd = ["/usr/local/bin/cmake", '-G', 'Ninja', conf.srcdir(), '-DLLVM_ENABLE_ASSERTIONS:BOOL={}'.format( "TRUE" if conf.assertions else "FALSE"), '-DCMAKE_BUILD_TYPE='+cmake_build_type, '-DCMAKE_MAKE_PROGRAM=' + NINJA, '-DLLVM_VERSION_PATCH=99', '-DLLVM_ENABLE_MODULES=On', '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON', '-DCMAKE_INSTALL_PREFIX="%s"'%dest_dir, '-DLLDB_TEST_USER_ARGS='+';'.join(dotest_args), '-DLLVM_LIT_ARGS=--xunit-xml-output=%s -v'%results_file] cmake_cmd.extend(conf.cmake_flags) if conf.CC(): cmake_cmd.extend(['-DCMAKE_C_COMPILER=' + conf.CC(), '-DCMAKE_CXX_COMPILER=' + conf.CC() + "++"]) run_cmd(conf.lldbbuilddir(), cmake_cmd) footer() header("Build") run_cmd(conf.lldbbuilddir(), [NINJA, '-v']) footer() header("Run Tests") run_cmd(conf.lldbbuilddir(), [NINJA, '-v', 'check-debuginfo']) run_cmd(conf.lldbbuilddir(), ['/usr/bin/env', 'TERM=vt100', NINJA, '-v', 'check-lldb']) footer() def static_analyzer_benchmarks_builder(): header("Static Analyzer Benchmarks") benchmark_script = conf.workspace + "/utils-analyzer/SATestBuild.py" benchmarks_dir = conf.workspace + "/test-suite-ClangAnalyzer/" compiler_bin_dir = conf.workspace + "/host-compiler/bin/" scanbuild_bin_dir = conf.workspace + "/tools-scan-build/bin/" old_path = os.environ.get("PATH", "") env = dict(os.environ, PATH=compiler_bin_dir + os.pathsep + scanbuild_bin_dir + os.pathsep + old_path) benchmark_cmd = [benchmark_script, "--strictness", "0" ] run_cmd(benchmarks_dir, benchmark_cmd, env=env) footer() def check_repo_state(path): if os.environ.get('TESTING', False): return logging.info("Detecting repos in {}".format(path)) for r in ['llvm', 'clang', 'clang-tools-extra', 'debuginfo-tests', 'compiler-rt', 'libcxx', 'debuginfo-tests']: detected_path = derived_path('llvm', tree_path(tree='llvm', repo=r)) readme = os.path.join(path, detected_path, readme_name(repo=r)) if os.path.exists(readme): logging.info(" - {} found at {}".format(r, detected_path)) else: logging.info(" - {} not found".format(r)) def checkout_path(workspace, repo): return workspace + "/" + repo + ".src" def tree_path(tree, repo): if tree == "llvm": if repo == "llvm": return "" if repo == "clang": return "tools/clang" if repo == "clang-tools-extra": return "tools/clang/tools/extra" if repo == "debuginfo-tests": return "tools/clang/test/debuginfo-tests" if repo == "compiler-rt": return "projects/compiler-rt" if repo == "libcxx": return "projects/libcxx" if repo == "lldb": return "tools/lldb" elif tree == "lldb": if repo == "lldb": return "" if repo == "llvm": return "llvm" if repo == "clang": return "llvm/tools/clang" if repo == "compiler-rt": return "llvm/projects/compiler-rt" if repo == "libcxx": return "llvm/projects/libcxx" else: logging.error("Unknown tree '{}'".format(tree)) sys.exit(1) logging.error("Unknown repo '{}' in tree '{}".format(repo, tree)) sys.exit(1) def tree_srcdir(conf, tree): if tree == "llvm": return conf.srcdir() if tree == "lldb": return conf.lldbsrcdir() logging.error("Unknown tree '{}'".format(tree)) sys.exit(1) def derived_path(srcdir, tree_path): if tree_path: return srcdir + "/" + tree_path return srcdir
Apache License 2.0
lbryio/torba
torba/server/text.py
groups_lines
python
def groups_lines(data): fmt = ('{:<6} {:>9} {:>9} {:>6} {:>6} {:>8}' '{:>7} {:>9} {:>7} {:>9}') yield fmt.format('ID', 'Sessions', 'Bwidth KB', 'Reqs', 'Txs', 'Subs', 'Recv', 'Recv KB', 'Sent', 'Sent KB') for (id_, session_count, bandwidth, reqs, txs_sent, subs, recv_count, recv_size, send_count, send_size) in data: yield fmt.format(id_, '{:,d}'.format(session_count), '{:,d}'.format(bandwidth // 1024), '{:,d}'.format(reqs), '{:,d}'.format(txs_sent), '{:,d}'.format(subs), '{:,d}'.format(recv_count), '{:,d}'.format(recv_size // 1024), '{:,d}'.format(send_count), '{:,d}'.format(send_size // 1024))
A generator returning lines for a list of groups. data is the return value of rpc_groups().
https://github.com/lbryio/torba/blob/190304344c0ff68f8a24cf50272307a11bf7f62b/torba/server/text.py#L28-L48
import time from torba.server import util def sessions_lines(data): fmt = ('{:<6} {:<5} {:>17} {:>5} {:>5} {:>5} ' '{:>7} {:>7} {:>7} {:>7} {:>7} {:>9} {:>21}') yield fmt.format('ID', 'Flags', 'Client', 'Proto', 'Reqs', 'Txs', 'Subs', 'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer') for (id_, flags, peer, client, proto, reqs, txs_sent, subs, recv_count, recv_size, send_count, send_size, time) in data: yield fmt.format(id_, flags, client, proto, '{:,d}'.format(reqs), '{:,d}'.format(txs_sent), '{:,d}'.format(subs), '{:,d}'.format(recv_count), '{:,d}'.format(recv_size // 1024), '{:,d}'.format(send_count), '{:,d}'.format(send_size // 1024), util.formatted_time(time, sep=''), peer)
MIT License
kuri65536/python-for-android
python-modules/twisted/twisted/spread/pb.py
Broker.localObjectForID
python
def localObjectForID(self, luid): lob = self.localObjects.get(luid) if lob is None: return return lob.object
Get a local object for a locally unique ID. @return: An object previously stored with L{registerReference} or C{None} if there is no object which corresponds to the given identifier.
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/spread/pb.py#L634-L645
import random import new import types from zope.interface import implements, Interface from twisted.python import log, failure, reflect from twisted.python.versions import Version from twisted.python.deprecate import deprecated from twisted.python.hashlib import md5 from twisted.internet import defer, protocol from twisted.cred.portal import Portal from twisted.cred.credentials import IAnonymous, ICredentials from twisted.cred.credentials import IUsernameHashedPassword, Anonymous from twisted.persisted import styles from twisted.python.components import registerAdapter from twisted.spread.interfaces import IJellyable, IUnjellyable from twisted.spread.jelly import jelly, unjelly, globalSecurity from twisted.spread import banana from twisted.spread.flavors import Serializable from twisted.spread.flavors import Referenceable, NoSuchMethod from twisted.spread.flavors import Root, IPBRoot from twisted.spread.flavors import ViewPoint from twisted.spread.flavors import Viewable from twisted.spread.flavors import Copyable from twisted.spread.flavors import Jellyable from twisted.spread.flavors import Cacheable from twisted.spread.flavors import RemoteCopy from twisted.spread.flavors import RemoteCache from twisted.spread.flavors import RemoteCacheObserver from twisted.spread.flavors import copyTags from twisted.spread.flavors import setUnjellyableForClass from twisted.spread.flavors import setUnjellyableFactoryForClass from twisted.spread.flavors import setUnjellyableForClassTree from twisted.spread.flavors import setCopierForClass from twisted.spread.flavors import setFactoryForClass from twisted.spread.flavors import setCopierForClassTree MAX_BROKER_REFS = 1024 portno = 8787 class ProtocolError(Exception): class DeadReferenceError(ProtocolError): class Error(Exception): class RemoteMethod: def __init__(self, obj, name): self.obj = obj self.name = name def __cmp__(self, other): return cmp((self.obj, self.name), other) def __hash__(self): return hash((self.obj, self.name)) def __call__(self, *args, **kw): return self.obj.broker._sendMessage('',self.obj.perspective, self.obj.luid, self.name, args, kw) def noOperation(*args, **kw): noOperation = deprecated(Version("twisted", 8, 2, 0))(noOperation) class PBConnectionLost(Exception): pass def printTraceback(tb): log.msg('Perspective Broker Traceback:' ) log.msg(tb) printTraceback = deprecated(Version("twisted", 8, 2, 0))(printTraceback) class IPerspective(Interface): def perspectiveMessageReceived(broker, message, args, kwargs): class Avatar: implements(IPerspective) def perspectiveMessageReceived(self, broker, message, args, kw): args = broker.unserialize(args, self) kw = broker.unserialize(kw, self) method = getattr(self, "perspective_%s" % message) try: state = method(*args, **kw) except TypeError: log.msg("%s didn't accept %s and %s" % (method, args, kw)) raise return broker.serialize(state, self, method, args, kw) class AsReferenceable(Referenceable): def __init__(self, object, messageType="remote"): self.remoteMessageReceived = getattr( object, messageType + "MessageReceived") class RemoteReference(Serializable, styles.Ephemeral): implements(IUnjellyable) def __init__(self, perspective, broker, luid, doRefCount): self.luid = luid self.broker = broker self.doRefCount = doRefCount self.perspective = perspective self.disconnectCallbacks = [] def notifyOnDisconnect(self, callback): assert callable(callback) self.disconnectCallbacks.append(callback) if len(self.disconnectCallbacks) == 1: self.broker.notifyOnDisconnect(self._disconnected) def dontNotifyOnDisconnect(self, callback): self.disconnectCallbacks.remove(callback) if not self.disconnectCallbacks: self.broker.dontNotifyOnDisconnect(self._disconnected) def _disconnected(self): for callback in self.disconnectCallbacks: callback(self) self.disconnectCallbacks = None def jellyFor(self, jellier): if jellier.invoker: assert self.broker == jellier.invoker, "Can't send references to brokers other than their own." return "local", self.luid else: return "unpersistable", "References cannot be serialized" def unjellyFor(self, unjellier, unjellyList): self.__init__(unjellier.invoker.unserializingPerspective, unjellier.invoker, unjellyList[1], 1) return self def callRemote(self, _name, *args, **kw): return self.broker._sendMessage('',self.perspective, self.luid, _name, args, kw) def remoteMethod(self, key): return RemoteMethod(self, key) def __cmp__(self,other): if isinstance(other, RemoteReference): if other.broker == self.broker: return cmp(self.luid, other.luid) return cmp(self.broker, other) def __hash__(self): return self.luid def __del__(self): if self.doRefCount: self.broker.sendDecRef(self.luid) setUnjellyableForClass("remote", RemoteReference) class Local: def __init__(self, object, perspective=None): self.object = object self.perspective = perspective self.refcount = 1 def __repr__(self): return "<pb.Local %r ref:%s>" % (self.object, self.refcount) def incref(self): self.refcount = self.refcount + 1 return self.refcount def decref(self): self.refcount = self.refcount - 1 return self.refcount class CopyableFailure(failure.Failure, Copyable): unsafeTracebacks = 0 def getStateToCopy(self): state = self.__dict__.copy() state['tb'] = None state['frames'] = [] state['stack'] = [] if isinstance(self.value, failure.Failure): state['value'] = failure2Copyable(self.value, self.unsafeTracebacks) else: state['value'] = str(self.value) if isinstance(self.type, str): state['type'] = self.type else: state['type'] = reflect.qual(self.type) if self.unsafeTracebacks: state['traceback'] = self.getTraceback() else: state['traceback'] = 'Traceback unavailable\n' return state class CopiedFailure(RemoteCopy, failure.Failure): def printTraceback(self, file=None, elideFrameworkCode=0, detail='default'): if file is None: file = log.logfile file.write("Traceback from remote host -- ") file.write(self.traceback) printBriefTraceback = printTraceback printDetailedTraceback = printTraceback setUnjellyableForClass(CopyableFailure, CopiedFailure) def failure2Copyable(fail, unsafeTracebacks=0): f = new.instance(CopyableFailure, fail.__dict__) f.unsafeTracebacks = unsafeTracebacks return f class Broker(banana.Banana): version = 6 username = None factory = None def __init__(self, isClient=1, security=globalSecurity): banana.Banana.__init__(self, isClient) self.disconnected = 0 self.disconnects = [] self.failures = [] self.connects = [] self.localObjects = {} self.security = security self.pageProducers = [] self.currentRequestID = 0 self.currentLocalID = 0 self.luids = {} self.remotelyCachedObjects = {} self.remotelyCachedLUIDs = {} self.locallyCachedObjects = {} self.waitingForAnswers = {} self._localCleanup = {} def resumeProducing(self): for pageridx in xrange(len(self.pageProducers)-1, -1, -1): pager = self.pageProducers[pageridx] pager.sendNextPage() if not pager.stillPaging(): del self.pageProducers[pageridx] if not self.pageProducers: self.transport.unregisterProducer() def pauseProducing(self): pass def stopProducing(self): pass def registerPageProducer(self, pager): self.pageProducers.append(pager) if len(self.pageProducers) == 1: self.transport.registerProducer(self, 0) def expressionReceived(self, sexp): if isinstance(sexp, types.ListType): command = sexp[0] methodName = "proto_%s" % command method = getattr(self, methodName, None) if method: method(*sexp[1:]) else: self.sendCall("didNotUnderstand", command) else: raise ProtocolError("Non-list expression received.") def proto_version(self, vnum): if vnum != self.version: raise ProtocolError("Version Incompatibility: %s %s" % (self.version, vnum)) def sendCall(self, *exp): self.sendEncoded(exp) def proto_didNotUnderstand(self, command): log.msg("Didn't understand command: %r" % command) def connectionReady(self): self.sendCall("version", self.version) for notifier in self.connects: try: notifier() except: log.deferr() self.connects = None if self.factory: self.factory.clientConnectionMade(self) def connectionFailed(self): for notifier in self.failures: try: notifier() except: log.deferr() self.failures = None waitingForAnswers = None def connectionLost(self, reason): self.disconnected = 1 self.luids = None if self.waitingForAnswers: for d in self.waitingForAnswers.values(): try: d.errback(failure.Failure(PBConnectionLost(reason))) except: log.deferr() for lobj in self.remotelyCachedObjects.values(): cacheable = lobj.object perspective = lobj.perspective try: cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective)) except: log.deferr() for notifier in self.disconnects[:]: try: notifier() except: log.deferr() self.disconnects = None self.waitingForAnswers = None self.localSecurity = None self.remoteSecurity = None self.remotelyCachedObjects = None self.remotelyCachedLUIDs = None self.locallyCachedObjects = None self.localObjects = None def notifyOnDisconnect(self, notifier): assert callable(notifier) self.disconnects.append(notifier) def notifyOnFail(self, notifier): assert callable(notifier) self.failures.append(notifier) def notifyOnConnect(self, notifier): assert callable(notifier) if self.connects is None: try: notifier() except: log.err() else: self.connects.append(notifier) def dontNotifyOnDisconnect(self, notifier): try: self.disconnects.remove(notifier) except ValueError: pass
Apache License 2.0
arun1729/cog
cog/database.py
Cog.init_instance
python
def init_instance(self, namespace): instance_id=str(uuid.uuid4()) if not os.path.exists(self.config.cog_instance_sys_dir()): os.makedirs(self.config.cog_instance_sys_dir()) m_file=dict() m_file["m_instance_id"] = instance_id m_file["host_name"] = socket.gethostname() m_file["host_ip"] = socket.gethostname() f=open(self.config.cog_instance_sys_file(),'wb') pickle.dump(m_file,f) f.close() self.logger.info("Cog sys file created.") os.mkdir(self.config.cog_data_dir(namespace)) self.logger.info("Database created: " + namespace) self.logger.info("done.") return instance_id
Initiates cog instance - called the 'c instance' for the first time :param namespace: :return:
https://github.com/arun1729/cog/blob/242dbc9bb188263158223e79bc9da339e03da111/cog/database.py#L65-L87
from cog.core import Record import logging import os import os.path from os import listdir from os.path import isfile from os.path import join import pickle import socket import uuid from .core import Table from . import config import xxhash import csv def out_nodes(v): return (v + "__:out:__") def in_nodes(v): return (v + "__:in:__") def hash_predicate(predicate): return str(xxhash.xxh32(predicate,seed=2).intdigest()) class Cog: def __init__(self): self.logger = logging.getLogger('database') self.config = config self.logger.info("Cog init.") self.namespaces = {} self.current_table = None if os.path.exists(self.config.cog_instance_sys_file()): f=open(self.config.cog_instance_sys_file(),"rb") self.m_info=pickle.load(f) self.instance_id=self.m_info["m_instance_id"] f.close() else: self.instance_id = self.init_instance(config.COG_DEFAULT_NAMESPACE) '''Create default namespace and table.''' self.create_namespace(self.config.COG_DEFAULT_NAMESPACE) '''Load all table names but lazy load actual tables on request.''' for name in self.list_tables(): if name not in self.namespaces: self.namespaces[name] = None
MIT License
hunch/hunch-gift-app
django/db/models/sql/query.py
Query.get_initial_alias
python
def get_initial_alias(self): if self.tables: alias = self.tables[0] self.ref_alias(alias) else: alias = self.join((None, self.model._meta.db_table, None, None)) return alias
Returns the first alias for this query, after increasing its reference count.
https://github.com/hunch/hunch-gift-app/blob/8c7cad24cc0d9900deb4175e6b768c64a3d7adcf/django/db/models/sql/query.py#L767-L777
from django.utils.copycompat import deepcopy from django.utils.tree import Node from django.utils.datastructures import SortedDict from django.utils.encoding import force_unicode from django.db import connections, DEFAULT_DB_ALIAS from django.db.models import signals from django.db.models.fields import FieldDoesNotExist from django.db.models.query_utils import select_related_descend, InvalidQuery from django.db.models.sql import aggregates as base_aggregates_module from django.db.models.sql.constants import * from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin from django.db.models.sql.expressions import SQLEvaluator from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode, ExtraWhere, AND, OR) from django.core.exceptions import FieldError __all__ = ['Query', 'RawQuery'] class RawQuery(object): def __init__(self, sql, using, params=None): self.validate_sql(sql) self.params = params or () self.sql = sql self.using = using self.cursor = None self.low_mark, self.high_mark = 0, None self.extra_select = {} self.aggregate_select = {} def clone(self, using): return RawQuery(self.sql, using, params=self.params) def convert_values(self, value, field, connection): return connection.ops.convert_values(value, field) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.table_name_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def validate_sql(self, sql): if not sql.lower().strip().startswith('select'): raise InvalidQuery('Raw queries are limited to SELECT queries. Use ' 'connection.cursor directly for other types of queries.') def __iter__(self): self._execute_query() if not connections[self.using].features.can_use_chunked_reads: result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<RawQuery: %r>" % (self.sql % self.params) def _execute_query(self): self.cursor = connections[self.using].cursor() self.cursor.execute(self.sql, self.params) class Query(object): INNER = 'INNER JOIN' LOUTER = 'LEFT OUTER JOIN' alias_prefix = 'T' query_terms = QUERY_TERMS aggregates_module = base_aggregates_module compiler = 'SQLCompiler' def __init__(self, model, where=WhereNode): self.model = model self.alias_refcount = {} self.alias_map = {} self.table_map = {} self.join_map = {} self.rev_join_map = {} self.quote_cache = {} self.default_cols = True self.default_ordering = True self.standard_ordering = True self.ordering_aliases = [] self.select_fields = [] self.related_select_fields = [] self.dupe_avoidance = {} self.used_aliases = set() self.filter_is_sticky = False self.included_inherited_models = {} self.select = [] self.tables = [] self.where = where() self.where_class = where self.group_by = None self.having = where() self.order_by = [] self.low_mark, self.high_mark = 0, None self.distinct = False self.select_related = False self.related_select_cols = [] self.aggregates = SortedDict() self.aggregate_select_mask = None self._aggregate_select_cache = None self.max_depth = 5 self.extra = SortedDict() self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () self.deferred_loading = (set(), True) def __str__(self): sql, params = self.get_compiler(DEFAULT_DB_ALIAS).as_sql() return sql % params def __deepcopy__(self, memo): result = self.clone(memo=memo) memo[id(self)] = result return result def __getstate__(self): obj_dict = self.__dict__.copy() obj_dict['related_select_fields'] = [] obj_dict['related_select_cols'] = [] obj_dict['select_fields'] = [ f is not None and f.name or None for f in obj_dict['select_fields'] ] return obj_dict def __setstate__(self, obj_dict): obj_dict['select_fields'] = [ name is not None and obj_dict['model']._meta.get_field(name) or None for name in obj_dict['select_fields'] ] self.__dict__.update(obj_dict) def prepare(self): return self def get_compiler(self, using=None, connection=None): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] for alias, aggregate in self.aggregate_select.items(): connection.ops.check_aggregate_support(aggregate) return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): return self.model._meta def clone(self, klass=None, memo=None, **kwargs): obj = Empty() obj.__class__ = klass or self.__class__ obj.model = self.model obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.table_map = self.table_map.copy() obj.join_map = self.join_map.copy() obj.rev_join_map = self.rev_join_map.copy() obj.quote_cache = {} obj.default_cols = self.default_cols obj.default_ordering = self.default_ordering obj.standard_ordering = self.standard_ordering obj.included_inherited_models = self.included_inherited_models.copy() obj.ordering_aliases = [] obj.select_fields = self.select_fields[:] obj.related_select_fields = self.related_select_fields[:] obj.dupe_avoidance = self.dupe_avoidance.copy() obj.select = self.select[:] obj.tables = self.tables[:] obj.where = deepcopy(self.where, memo=memo) obj.where_class = self.where_class if self.group_by is None: obj.group_by = None else: obj.group_by = self.group_by[:] obj.having = deepcopy(self.having, memo=memo) obj.order_by = self.order_by[:] obj.low_mark, obj.high_mark = self.low_mark, self.high_mark obj.distinct = self.distinct obj.select_related = self.select_related obj.related_select_cols = [] obj.aggregates = deepcopy(self.aggregates, memo=memo) if self.aggregate_select_mask is None: obj.aggregate_select_mask = None else: obj.aggregate_select_mask = self.aggregate_select_mask.copy() obj._aggregate_select_cache = None obj.max_depth = self.max_depth obj.extra = self.extra.copy() if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() obj.extra_tables = self.extra_tables obj.extra_order_by = self.extra_order_by obj.deferred_loading = deepcopy(self.deferred_loading, memo=memo) if self.filter_is_sticky and self.used_aliases: obj.used_aliases = self.used_aliases.copy() else: obj.used_aliases = set() obj.filter_is_sticky = False obj.__dict__.update(kwargs) if hasattr(obj, '_setup_query'): obj._setup_query() return obj def convert_values(self, value, field, connection): return connection.ops.convert_values(value, field) def resolve_aggregate(self, value, aggregate, connection): if value is None: if aggregate.is_ordinal: return 0 return value elif aggregate.is_ordinal: return int(value) elif aggregate.is_computed: return float(value) else: return self.convert_values(value, aggregate.field, connection) def get_aggregation(self, using): if not self.aggregate_select: return {} if self.group_by is not None: from subqueries import AggregateQuery query = AggregateQuery(self.model) obj = self.clone() for alias, aggregate in self.aggregate_select.items(): if aggregate.is_summary: query.aggregate_select[alias] = aggregate del obj.aggregate_select[alias] query.add_subquery(obj, using) else: query = self self.select = [] self.default_cols = False self.extra = {} self.remove_inherited_models() query.clear_ordering(True) query.clear_limits() query.select_related = False query.related_select_cols = [] query.related_select_fields = [] result = query.get_compiler(using).execute_sql(SINGLE) if result is None: result = [None for q in query.aggregate_select.items()] return dict([ (alias, self.resolve_aggregate(val, aggregate, connection=connections[using])) for (alias, aggregate), val in zip(query.aggregate_select.items(), result) ]) def get_count(self, using): obj = self.clone() if len(self.select) > 1 or self.aggregate_select: from subqueries import AggregateQuery subquery = obj subquery.clear_ordering(True) subquery.clear_limits() obj = AggregateQuery(obj.model) obj.add_subquery(subquery, using=using) obj.add_count_column() number = obj.get_aggregation(using=using)[None] number = max(0, number - self.low_mark) if self.high_mark is not None: number = min(number, self.high_mark - self.low_mark) return number def has_results(self, using): q = self.clone() q.select = [] q.select_fields = [] q.default_cols = False q.select_related = False q.set_aggregate_mask(()) q.clear_ordering(True) q.set_limits(high=1) compiler = q.get_compiler(using=using) return compiler.has_results() def combine(self, rhs, connector): assert self.model == rhs.model, "Cannot combine queries on two different base models." assert self.can_filter(), "Cannot combine queries once a slice has been taken." assert self.distinct == rhs.distinct, "Cannot combine a unique query with a non-unique query." self.remove_inherited_models() change_map = {} used = set() conjunction = (connector == AND) first = True for alias in rhs.tables: if not rhs.alias_refcount[alias]: continue promote = (rhs.alias_map[alias][JOIN_TYPE] == self.LOUTER) new_alias = self.join(rhs.rev_join_map[alias], (conjunction and not first), used, promote, not conjunction) used.add(new_alias) change_map[alias] = new_alias first = False if not conjunction: for alias in self.tables[1:]: if self.alias_refcount[alias] == 1: self.promote_alias(alias, True) break if rhs.where: w = deepcopy(rhs.where) w.relabel_aliases(change_map) if not self.where: self.where.add(EverythingNode(), AND) elif self.where: w = self.where_class() w.add(EverythingNode(), AND) else: w = self.where_class() self.where.add(w, connector) self.select = [] for col in rhs.select: if isinstance(col, (list, tuple)): self.select.append((change_map.get(col[0], col[0]), col[1])) else: item = deepcopy(col) item.relabel_aliases(change_map) self.select.append(item) self.select_fields = rhs.select_fields[:] if connector == OR: if self.extra and rhs.extra: raise ValueError("When merging querysets using 'or', you " "cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): field_names, defer = self.deferred_loading if not field_names: return columns = set() orig_opts = self.model._meta seen = {} must_include = {self.model: set([orig_opts.pk])} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model opts = orig_opts for name in parts[:-1]: old_model = cur_model source = opts.get_field_by_name(name)[0] cur_model = opts.get_field_by_name(name)[0].rel.to opts = cur_model._meta must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field, model, _, _ = opts.get_field_by_name(parts[-1]) if model is None: model = cur_model add_to_dict(seen, model, field) if defer: workset = {} for model, values in seen.iteritems(): for field, m in model._meta.get_fields_with_model(): if field in values: continue add_to_dict(workset, m or model, field) for model, values in must_include.iteritems(): if model in workset: workset[model].update(values) for model, values in workset.iteritems(): callback(target, model, values) else: for model, values in must_include.iteritems(): if model in seen: seen[model].update(values) else: seen[model] = values for model in orig_opts.get_parent_list(): if model not in seen: seen[model] = set() for model, values in seen.iteritems(): callback(target, model, values) def deferred_to_columns_cb(self, target, model, fields): table = model._meta.db_table if table not in target: target[table] = set() for field in fields: target[table].add(field.column) def table_alias(self, table_name, create=False): current = self.table_map.get(table_name) if not create and current: alias = current[0] self.alias_refcount[alias] += 1 return alias, False if current: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) current.append(alias) else: alias = table_name self.table_map[alias] = [alias] self.alias_refcount[alias] = 1 self.tables.append(alias) return alias, True def ref_alias(self, alias): self.alias_refcount[alias] += 1 def unref_alias(self, alias): self.alias_refcount[alias] -= 1 def promote_alias(self, alias, unconditional=False): if ((unconditional or self.alias_map[alias][NULLABLE]) and self.alias_map[alias][JOIN_TYPE] != self.LOUTER): data = list(self.alias_map[alias]) data[JOIN_TYPE] = self.LOUTER self.alias_map[alias] = tuple(data) return True return False def promote_alias_chain(self, chain, must_promote=False): for alias in chain: if self.promote_alias(alias, must_promote): must_promote = True def promote_unused_aliases(self, initial_refcounts, used_aliases): considered = {} for alias in self.tables: if alias not in used_aliases: continue if (alias not in initial_refcounts or self.alias_refcount[alias] == initial_refcounts[alias]): parent = self.alias_map[alias][LHS_ALIAS] must_promote = considered.get(parent, False) promoted = self.promote_alias(alias, must_promote) considered[alias] = must_promote or promoted def change_aliases(self, change_map): assert set(change_map.keys()).intersection(set(change_map.values())) == set() self.where.relabel_aliases(change_map) self.having.relabel_aliases(change_map) for columns in (self.select, self.aggregates.values(), self.group_by or []): for pos, col in enumerate(columns): if isinstance(col, (list, tuple)): old_alias = col[0] columns[pos] = (change_map.get(old_alias, old_alias), col[1]) else: col.relabel_aliases(change_map) for old_alias, new_alias in change_map.iteritems(): alias_data = list(self.alias_map[old_alias]) alias_data[RHS_ALIAS] = new_alias t = self.rev_join_map[old_alias] data = list(self.join_map[t]) data[data.index(old_alias)] = new_alias self.join_map[t] = tuple(data) self.rev_join_map[new_alias] = t del self.rev_join_map[old_alias] self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] self.alias_map[new_alias] = tuple(alias_data) del self.alias_map[old_alias] table_aliases = self.table_map[alias_data[TABLE_NAME]] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break for pos, alias in enumerate(self.tables): if alias == old_alias: self.tables[pos] = new_alias break for key, alias in self.included_inherited_models.items(): if alias in change_map: self.included_inherited_models[key] = change_map[alias] for alias, data in self.alias_map.iteritems(): lhs = data[LHS_ALIAS] if lhs in change_map: data = list(data) data[LHS_ALIAS] = change_map[lhs] self.alias_map[alias] = tuple(data) def bump_prefix(self, exceptions=()): current = ord(self.alias_prefix) assert current < ord('Z') prefix = chr(current + 1) self.alias_prefix = prefix change_map = {} for pos, alias in enumerate(self.tables): if alias in exceptions: continue new_alias = '%s%d' % (prefix, pos) change_map[alias] = new_alias self.tables[pos] = new_alias self.change_aliases(change_map)
MIT License
dogoncouch/logdissect
logdissect/parsers/linejson.py
ParseModule.parse_line
python
def parse_line(self, line): return json.loads(line)
Convert a line of json into a Python object
https://github.com/dogoncouch/logdissect/blob/4fbb96a1717d277bbfdabff37ffecfe5f9c02464/logdissect/parsers/linejson.py#L61-L63
import json from logdissect.parsers.type import ParseModule as OurModule class ParseModule(OurModule): def __init__(self): self.name = 'linejson' self.desc = 'logdissect object-per-line JSON parsing module' self.data_format = '' self.fields = [] self.backup_format_regex = None self.backup_fields = [] self.tzone = None self.datestamp_type = None def parse_file(self, sourcepath): with open(sourcepath, 'r') as logfile: jsonlist = logfile.readlines() data = {} data['entries'] = [] for line in jsonlist: entry = self.parse_line(line) data['entries'].append(entry) if self.tzone: for e in data['entries']: e['tzone'] = self.tzone return data
MIT License
karimbahgat/pycrs
pycrs/elements/parameters.py
DatumShift.__init__
python
def __init__(self, value): self.value = value
The WGS84 Datum shift parameter. Args: - **value**: A list of 3 or 7 term datum transform parameters.
https://github.com/karimbahgat/pycrs/blob/c5180a2d81525a299e4139cf7cd5608afcab778d/pycrs/elements/parameters.py#L367-L375
from . import directions def find(paramname, crstype, strict=False): if not strict: paramname = paramname.lower() for itemname,item in globals().items(): if itemname.startswith("_"): continue try: if hasattr(item, crstype): itemname = getattr(item, crstype) if not strict: itemname = itemname.lower() if paramname == itemname: return item except: pass else: return None class SemiMajorRadius: proj4 = "+a" def __init__(self, value): self.value = value def to_proj4(self): return "%s=%s" % (self.proj4, self.value) def to_esri_wkt(self): return str(self.value) def to_ogc_wkt(self): return str(self.value) class SemiMinorRadius: proj4 = "+b" def __init__(self, value): self.value = value def to_proj4(self): return "%s=%s" % (self.proj4, self.value) def to_esri_wkt(self): return str(self.value) def to_ogc_wkt(self): return str(self.value) class Flattening: proj4 = "+f" def __init__(self, value): self.value = value def to_proj4(self): return "%s=%s" % (self.proj4, self.value) def to_esri_wkt(self): return str(self.value) def to_ogc_wkt(self): return str(self.value) class InverseFlattening: proj4 = "+rf" def __init__(self, value): self.value = value def to_proj4(self): return "%s=%s" % (self.proj4, self.value) def to_esri_wkt(self): return str(self.value) def to_ogc_wkt(self): return str(self.value) class Azimuth: proj4 = "+alpha" esri_wkt = "azimuth" ogc_wkt = "azimuth" geotiff = "AzimuthAngle" def __init__(self, value): self.value = value def to_proj4(self): return "+alpha=%s" % self.value def to_ogc_wkt(self): return 'PARAMETER["Azimuth",%s]' % self.value def to_esri_wkt(self): return 'PARAMETER["Azimuth",%s]' % self.value class ScalingFactor: proj4 = "+k" esri_wkt = "Scale_Factor" ogc_wkt = "scale_factor" def __init__(self, value): self.value = value def to_proj4(self): return "+k_0=%s" %self.value def to_ogc_wkt(self): return 'PARAMETER["scale_factor", %s]' %self.value def to_esri_wkt(self): return 'PARAMETER["Scale_Factor", %s]' %self.value def to_geotiff(self): pass class LatitudeOrigin: proj4 = "+lat_0" ogc_wkt = "latitude_of_origin" esri_wkt = "Latitude_Of_Origin" def __init__(self, value): self.value = value def to_proj4(self): return "+lat_0=%s" %self.value def to_ogc_wkt(self): return 'PARAMETER["latitude_of_origin", %s]' %self.value def to_esri_wkt(self): return 'PARAMETER["Latitude_Of_Origin", %s]' %self.value def to_geotiff(self): pass class LatitudeFirstStndParallel: proj4 = "+lat_1" ogc_wkt = "standard_parallel_1" esri_wkt = "Standard_Parallel_1" def __init__(self, value): self.value = value def to_proj4(self): return "+lat_1=%s" %self.value def to_ogc_wkt(self): return 'PARAMETER["standard_parallel_1", %s]' %self.value def to_esri_wkt(self): return 'PARAMETER["Standard_Parallel_1", %s]' %self.value def to_geotiff(self): pass class LatitudeSecondStndParallel: proj4 = "+lat_2" ogc_wkt = "standard_parallel_2" esri_wkt = "Standard_Parallel_2" def __init__(self, value): self.value = value def to_proj4(self): return "+lat_2=%s" %self.value def to_ogc_wkt(self): return 'PARAMETER["standard_parallel_2", %s]' %self.value def to_esri_wkt(self): return 'PARAMETER["Standard_Parallel_2", %s]' %self.value def to_geotiff(self): pass class LatitudeTrueScale: proj4 = "lat_ts" ogc_wkt = "Standard_Parallel_1" esri_wkt = "Standard_Parallel_1" def __init__(self, value): self.value = value def to_proj4(self): return "+lat_ts=%s" %self.value def to_ogc_wkt(self): return 'PARAMETER["Standard_Parallel_1", %s]' %self.value def to_esri_wkt(self): return 'PARAMETER["Standard_Parallel_1", %s]' %self.value def to_geotiff(self): pass class CentralMeridian: proj4 = "+lon_0" ogc_wkt = "Central_Meridian" esri_wkt = "Central_Meridian" def __init__(self, value): self.value = value def to_proj4(self): return "+lon_0=%s" %self.value def to_ogc_wkt(self): return 'PARAMETER["Central_Meridian", %s]' %self.value def to_esri_wkt(self): return 'PARAMETER["Central_Meridian", %s]' %self.value def to_geotiff(self): pass class LongitudeCenter: proj4 = "+lonc" ogc_wkt = "Longitude_Of_Center" esri_wkt = "Longitude_Of_Center" def __init__(self, value): self.value = value def to_proj4(self): return "+lonc=%s" %self.value def to_ogc_wkt(self): return 'PARAMETER["Longitude_Of_Center", %s]' %self.value def to_esri_wkt(self): return 'PARAMETER["Longitude_Of_Center", %s]' %self.value class PrimeMeridian: proj4 = "+pm" ogc_wkt = "PRIMEM" esri_wkt = "PRIMEM" cities = { 'greenwich': 0, 'lisbon': -9.131906111111112, 'paris': 2.337229166666667, 'bogota': -74.08091666666667, 'madrid': -3.687911111111111, 'rome': 12.452333333333332, 'bern': 7.439583333333333, 'jakarta': 106.80771944444444, 'ferro': -17.666666666666668, 'brussels': 4.3679749999999995, 'stockholm': 18.05827777777778, 'athens': 23.716337499999998, 'oslo': 10.722916666666666, } def __init__(self, value): .format(', '.join(self.cities.keys())) self.value = value def get_value(self): value = self.value try: value = float(value) except: value = value.lower() if value not in self.cities: raise Exception("Prime meridian value {} must be a number or the name of one of these cities: {}".format(value, ', '.join(self.cities.keys()))) value = self.cities[value] value = int(value) if value.is_integer() else value return value def to_proj4(self): return "+pm=%s" %self.get_value() def to_ogc_wkt(self): return 'PRIMEM["Greenwich", %s]' %self.get_value() def to_esri_wkt(self): return 'PRIMEM["Greenwich", %s]' %self.get_value() class DatumShift: proj4 = "+towgs84" ogc_wkt = "TOWGS84"
MIT License
a2i2/surround
surround_cli/surround_cli/split/cli.py
reset_directory
python
def reset_directory(args): if not args.extension: files = os.listdir(os.path.join(args.reset, "test")) _, args.extension = os.path.splitext(files[0]) if args.extension: args.extension = args.extension[1:] else: print("Failed to guess which extension was used during the initial split") return undo_split_directory(args.reset, args.extension)
Reset a directory that has been split using the split tool. :param args: the arguments parsed from the user :type args: :class:`argparse.Namespace`
https://github.com/a2i2/surround/blob/6aa6f7939bb06856e20bba12d5f7329129dd61de/surround_cli/surround_cli/split/cli.py#L104-L124
import sys import os import argparse from .split_data import split_directory, undo_split_directory, split_file, undo_split_file def is_valid_dir(arg_parser, arg): if not os.path.isdir(arg): arg_parser.error("Invalid directory %s" % arg) return arg return arg def is_valid_file(arg_parser, arg): if not os.path.isfile(arg): arg_parser.error("Invalid file path %s" % arg) return arg return arg def get_split_parser(): parser = argparse.ArgumentParser(description='Randomly assign data to test, training, and validate sets') group = parser.add_mutually_exclusive_group(required=True) group.add_argument("-t", "--text-file", help="Split text file into train/test/validate sets", type=lambda x: is_valid_file(parser, x)) group.add_argument("-d", "--directory", help="Split directory into train/test/validate sets", type=lambda x: is_valid_dir(parser, x)) group.add_argument("-r", "--reset", help="Path to directory containing train/test/validate folders to reset", type=lambda x: is_valid_dir(parser, x)) parser.add_argument("-e", "--extension", help="File extension of the files to process (default: *)", type=str, default="*") parser.add_argument("-tr", "--train", type=int, help="Percentage of files for training (default: 80%%)", default=80) parser.add_argument("-te", "--test", type=int, help="Percentage of files for test (default: 10%%)", default=10) parser.add_argument("-va", "--validate", type=int, help="Percentage of files for validate (default: 10%%)", default=10) parser.add_argument("-nv", "--no-validate", action="store_true", help="Don't produce a validation set when splitting") parser.add_argument("-ns", "--no-shuffle", action="store_true", help="Don't randomise when splitting data") parser.add_argument("-nh", '--no-header', action="store_true", help="Use this flag when the text file has no headers") return parser def validate_args(args): if args.no_validate and args.train == 80 and args.test == 10: args.test = 20 if args.no_validate and args.train + args.test != 100: print("Test and train proportions should add up to 100.") return False if not args.no_validate and args.train + args.test + args.validate != 100: print("Test, train and validate proportions should add up to 100.") return False if args.reset: dirs = [path for path in os.listdir(args.reset) if os.path.isdir(os.path.join(args.reset, path))] expected = ['train', 'test'] if not all([exp in dirs for exp in expected]): print("Cannot reset this folder since there are no test/train/validate folders!") return False return True
BSD 3-Clause New or Revised License
qutip/qutip-qip
src/qutip_qip/compiler/scheduler.py
qubit_constraint
python
def qubit_constraint(ind1, ind2, instructions): if instructions[ind1].used_qubits & instructions[ind2].used_qubits: return False else: return True
Determine if two instructions have overlap in the used qubits.
https://github.com/qutip/qutip-qip/blob/bd854026f2ece5732f3187cbdbc10c0a61432619/src/qutip_qip/compiler/scheduler.py#L630-L637
from collections import deque from copy import deepcopy from functools import cmp_to_key from random import shuffle from ..circuit import QubitCircuit from ..operations import Gate from .instruction import Instruction class InstructionsGraph: def __init__(self, instructions): instructions = deepcopy(instructions) self.nodes = [] for instruction in instructions: if isinstance(instruction, Gate): self.nodes.append(Instruction(instruction)) else: self.nodes.append(instruction) for node in self.nodes: if node.duration is None: node.duration = 1 self.start = None self.end = None def generate_dependency_graph(self, commuting): for node in self.nodes: node.predecessors = set() node.successors = set() num_qubits = ( max( set().union( *[instruction.used_qubits for instruction in self.nodes] ) ) + 1 ) qubits_instructions_dependency = [[set()] for i in range(num_qubits)] for current_ind, instruction in enumerate(self.nodes): for qubit in instruction.used_qubits: dependent = False for dependent_ind in qubits_instructions_dependency[qubit][-1]: if not commuting(current_ind, dependent_ind, self.nodes): dependent = True if not dependent: qubits_instructions_dependency[qubit][-1].add(current_ind) else: qubits_instructions_dependency[qubit].append({current_ind}) for instructions_cycles in qubits_instructions_dependency: for cycle_ind1 in range(len(instructions_cycles) - 1): for instruction_ind1 in instructions_cycles[cycle_ind1]: for instruction_ind2 in instructions_cycles[ cycle_ind1 + 1 ]: self.nodes[instruction_ind1].successors.add( instruction_ind2 ) self.nodes[instruction_ind2].predecessors.add( instruction_ind1 ) start = [] end = [] for i, instruction in enumerate(self.nodes): if not instruction.successors: end.append(i) if not instruction.predecessors: start.append(i) self.start = start self.end = end def reverse_graph(self): for node in self.nodes: node.predecessors, node.successors = ( node.successors, node.predecessors, ) try: self.distance_to_start, self.distance_to_end = ( self.distance_to_end, self.distance_to_start, ) except AttributeError: pass self.start, self.end = self.end, self.start def find_topological_order( self, priority=True, apply_constraint=None, random=False ): graph = deepcopy(self.nodes) cycles_list = [] available_nodes = list(self.start) constraint_dependency = set() while available_nodes: if random: shuffle(available_nodes) if priority: available_nodes.sort(key=cmp_to_key(self._compare_priority)) current_cycle = [] if apply_constraint is None: current_cycle = deepcopy(available_nodes) else: for node1 in available_nodes: approval = True for node2 in current_cycle: if not apply_constraint(node1, node2, graph): approval = False constraint_dependency.add((node2, node1)) if approval: current_cycle.append(node1) cycles_list.append(current_cycle) for node in current_cycle: available_nodes.remove(node) for node in current_cycle: for successor_ind in graph[node].successors: graph[successor_ind].predecessors.remove(node) if not graph[successor_ind].predecessors: available_nodes.append(successor_ind) graph[node].successors = set() return cycles_list, constraint_dependency def compute_distance(self, cycles_list): cycles_list = deepcopy(cycles_list) for cycle in cycles_list: for ind in cycle: if not self.nodes[ind].predecessors: self.nodes[ind].distance_to_start = self.nodes[ ind ].duration else: self.nodes[ind].distance_to_start = ( max( [ self.nodes[predecessor_ind].distance_to_start for predecessor_ind in self.nodes[ ind ].predecessors ] ) + self.nodes[ind].duration ) cycles_list.reverse() self.reverse_graph() for cycle in cycles_list: for ind in cycle: if not self.nodes[ind].predecessors: self.nodes[ind].distance_to_end = self.nodes[ind].duration else: self.nodes[ind].distance_to_end = ( max( [ self.nodes[predecessor_ind].distance_to_end for predecessor_ind in self.nodes[ ind ].predecessors ] ) + self.nodes[ind].duration ) self.longest_distance = max( [self.nodes[i].distance_to_end for i in self.end] ) self.reverse_graph() def _compare_priority(self, ind1, ind2): if ( self.nodes[ind1].distance_to_end == self.nodes[ind2].distance_to_end ): return ( self.nodes[ind1].distance_to_start - self.nodes[ind2].distance_to_start ) else: return ( self.nodes[ind2].distance_to_end - self.nodes[ind1].distance_to_end ) def add_constraint_dependency(self, constraint_dependency): for ind1, ind2 in constraint_dependency: self.nodes[ind1].successors.add(ind2) self.nodes[ind2].predecessors.add(ind1) start = [] end = [] for i, instruction in enumerate(self.nodes): if not instruction.successors: end.append(i) if not instruction.predecessors: start.append(i) self.start = start self.end = end class Scheduler: def __init__(self, method="ALAP", constraint_functions=None): self.method = method if constraint_functions is None: self.constraint_functions = [qubit_constraint] else: return constraint_functions def schedule( self, circuit, gates_schedule=False, return_cycles_list=False, random_shuffle=False, repeat_num=0, ): circuit = deepcopy(circuit) if repeat_num > 0: random_shuffle = True result = [0] max_length = 4294967296 for i in range(repeat_num): gate_cycle_indices = self.schedule( circuit, gates_schedule=gates_schedule, return_cycles_list=return_cycles_list, random_shuffle=random_shuffle, repeat_num=0, ) current_length = max(gate_cycle_indices) if current_length < max_length: result = gate_cycle_indices max_length = current_length return result if isinstance(circuit, QubitCircuit): gates = circuit.gates else: gates = circuit instructions_graph = InstructionsGraph(gates) instructions_graph.generate_dependency_graph( commuting=self.commutation_rules ) if self.method == "ALAP": instructions_graph.reverse_graph() cycles_list, _ = instructions_graph.find_topological_order( priority=False, apply_constraint=None, random=random_shuffle ) instructions_graph.compute_distance(cycles_list=cycles_list) ( cycles_list, constraint_dependency, ) = instructions_graph.find_topological_order( priority=True, apply_constraint=self.apply_constraint, random=random_shuffle, ) if gates_schedule or return_cycles_list: if self.method == "ALAP": cycles_list.reverse() if return_cycles_list: return cycles_list gate_cycles_indices = [0] * len(gates) for cycle_ind, cycle in enumerate(cycles_list): for instruction_ind in cycle: gate_cycles_indices[instruction_ind] = cycle_ind return gate_cycles_indices instructions_graph.add_constraint_dependency(constraint_dependency) instructions_graph.compute_distance(cycles_list=cycles_list) instruction_start_time = [] if self.method == "ASAP": for instruction in instructions_graph.nodes: instruction_start_time.append( instruction.distance_to_start - instruction.duration ) elif self.method == "ALAP": for instruction in instructions_graph.nodes: instruction_start_time.append( instructions_graph.longest_distance - instruction.distance_to_start ) return instruction_start_time def commutation_rules(self, ind1, ind2, instructions): instruction1 = instructions[ind1] instruction2 = instructions[ind2] if instruction1.name != instruction2.name: return False if (instruction1.controls) and ( instruction1.controls == instruction2.controls ): return True elif instruction1.targets == instruction2.targets: return True else: return False def apply_constraint(self, ind1, ind2, instructions): result = [] for constraint_function in self.constraint_functions: result.append(constraint_function(ind1, ind2, instructions)) return all(result)
BSD 3-Clause New or Revised License
forseti-security/forseti-security
google/cloud/forseti/services/inventory/base/gcp.py
ApiClient.iter_stackdriver_organization_sinks
python
def iter_stackdriver_organization_sinks(self, org_id):
Iterate Organization logging sinks from GCP API. Args: org_id (str): id of the organization to query.
https://github.com/forseti-security/forseti-security/blob/de5d0f4d047c293a2a72545a76c3783980865551/google/cloud/forseti/services/inventory/base/gcp.py#L999-L1004
from builtins import object import abc from future.utils import with_metaclass from google.cloud.forseti.common.gcp_api import admin_directory from google.cloud.forseti.common.gcp_api import appengine from google.cloud.forseti.common.gcp_api import bigquery from google.cloud.forseti.common.gcp_api import cloud_resource_manager from google.cloud.forseti.common.gcp_api import cloudbilling from google.cloud.forseti.common.gcp_api import cloudsql from google.cloud.forseti.common.gcp_api import compute from google.cloud.forseti.common.gcp_api import container from google.cloud.forseti.common.gcp_api import groups_settings from google.cloud.forseti.common.gcp_api import iam from google.cloud.forseti.common.gcp_api import servicemanagement from google.cloud.forseti.common.gcp_api import serviceusage from google.cloud.forseti.common.gcp_api import stackdriver_logging from google.cloud.forseti.common.gcp_api import storage class AssetMetadata(object): def __init__(self, cai_name='', cai_type=''): self.cai_name = cai_name self.cai_type = cai_type def __eq__(self, other): return (self.cai_name == other.cai_name and self.cai_type == other.cai_type) def __repr__(self): return 'cai_name: {}, cai_type: {}'.format( self.cai_name, self.cai_type) class ResourceNotSupported(Exception): class ApiClient(with_metaclass(abc.ABCMeta, object)): @abc.abstractmethod def iter_crm_organization_access_levels(self, access_policy_id): @abc.abstractmethod def iter_crm_org_access_policies(self, org_id): @abc.abstractmethod def fetch_crm_organization_service_perimeter(self, access_policy_id): @abc.abstractmethod def fetch_bigquery_dataset_policy(self, project_id, project_number, dataset_id): @abc.abstractmethod def fetch_bigquery_iam_policy(self, project_id, project_number, dataset_id): @abc.abstractmethod def iter_bigquery_datasets(self, project_number): @abc.abstractmethod def iter_bigquery_tables(self, dataset_reference): @abc.abstractmethod def iter_bigtable_clusters(self, project_id, instance_id): @abc.abstractmethod def iter_bigtable_instances(self, project_number): @abc.abstractmethod def iter_bigtable_tables(self, project_id, instance_id): @abc.abstractmethod def fetch_billing_account_iam_policy(self, account_id): @abc.abstractmethod def fetch_billing_project_info(self, project_number): @abc.abstractmethod def iter_billing_accounts(self): @abc.abstractmethod def iter_cloudsql_instances(self, project_id, project_number): @abc.abstractmethod def is_compute_api_enabled(self, project_number): @abc.abstractmethod def fetch_compute_ig_instances(self, project_number, instance_group_name, region=None, zone=None): @abc.abstractmethod def fetch_compute_project(self, project_number): @abc.abstractmethod def iter_compute_address(self, project_number): @abc.abstractmethod def iter_compute_autoscalers(self, project_number): @abc.abstractmethod def iter_compute_backendbuckets(self, project_number): @abc.abstractmethod def iter_compute_backendservices(self, project_number): @abc.abstractmethod def iter_compute_disks(self, project_number): @abc.abstractmethod def iter_compute_firewalls(self, project_number): @abc.abstractmethod def iter_compute_forwardingrules(self, project_number): @abc.abstractmethod def iter_compute_healthchecks(self, project_number): @abc.abstractmethod def iter_compute_httphealthchecks(self, project_number): @abc.abstractmethod def iter_compute_httpshealthchecks(self, project_number): @abc.abstractmethod def iter_compute_ig_managers(self, project_number): @abc.abstractmethod def iter_compute_images(self, project_number): @abc.abstractmethod def iter_compute_instancegroups(self, project_number): @abc.abstractmethod def iter_compute_instances(self, project_number): @abc.abstractmethod def iter_compute_instancetemplates(self, project_number): @abc.abstractmethod def iter_compute_interconnects(self, project_number): @abc.abstractmethod def iter_compute_interconnect_attachments(self, project_number): @abc.abstractmethod def iter_compute_licenses(self, project_number): @abc.abstractmethod def iter_compute_networks(self, project_number): @abc.abstractmethod def iter_compute_project(self, project_number): @abc.abstractmethod def iter_compute_routers(self, project_number): @abc.abstractmethod def iter_compute_securitypolicies(self, project_number): @abc.abstractmethod def iter_compute_snapshots(self, project_number): @abc.abstractmethod def iter_compute_sslcertificates(self, project_number): @abc.abstractmethod def iter_compute_subnetworks(self, project_number): @abc.abstractmethod def iter_compute_targethttpproxies(self, project_number): @abc.abstractmethod def iter_compute_targethttpsproxies(self, project_number): @abc.abstractmethod def iter_compute_targetinstances(self, project_number): @abc.abstractmethod def iter_compute_targetpools(self, project_number): @abc.abstractmethod def iter_compute_targetsslproxies(self, project_number): @abc.abstractmethod def iter_compute_targettcpproxies(self, project_number): @abc.abstractmethod def iter_compute_targetvpngateways(self, project_number): @abc.abstractmethod def iter_compute_urlmaps(self, project_number): @abc.abstractmethod def iter_compute_vpntunnels(self, project_number): @abc.abstractmethod def fetch_container_serviceconfig(self, project_id, zone=None, location=None): @abc.abstractmethod def iter_container_clusters(self, project_number): @abc.abstractmethod def fetch_crm_folder(self, folder_id): @abc.abstractmethod def fetch_crm_folder_iam_policy(self, folder_id): @abc.abstractmethod def fetch_crm_organization(self, org_id): @abc.abstractmethod def fetch_crm_organization_iam_policy(self, org_id): @abc.abstractmethod def fetch_crm_project(self, project_number): @abc.abstractmethod def fetch_crm_project_iam_policy(self, project_number): @abc.abstractmethod def iter_crm_folder_org_policies(self, folder_id): @abc.abstractmethod def iter_crm_folders(self, parent_id): @abc.abstractmethod def iter_crm_organization_org_policies(self, org_id): @abc.abstractmethod def iter_crm_project_liens(self, project_number): @abc.abstractmethod def iter_crm_project_org_policies(self, project_number): @abc.abstractmethod def iter_crm_projects(self, parent_type, parent_id): @abc.abstractmethod def fetch_dataproc_cluster_iam_policy(self, cluster): @abc.abstractmethod def iter_dataproc_clusters(self, project_id, region=None): @abc.abstractmethod def iter_dns_managedzones(self, project_number): @abc.abstractmethod def iter_dns_policies(self, project_number): @abc.abstractmethod def fetch_gae_app(self, project_id): @abc.abstractmethod def iter_gae_instances(self, project_id, service_id, version_id): @abc.abstractmethod def iter_gae_services(self, project_id): @abc.abstractmethod def iter_gae_versions(self, project_id, service_id): @abc.abstractmethod def iter_gsuite_group_members(self, group_key): @abc.abstractmethod def fetch_gsuite_groups_settings(self, group_email): @abc.abstractmethod def iter_gsuite_groups(self, gsuite_id): @abc.abstractmethod def iter_gsuite_users(self, gsuite_id): @abc.abstractmethod def fetch_iam_serviceaccount_iam_policy(self, name, unique_id): @abc.abstractmethod def iter_iam_curated_roles(self): @abc.abstractmethod def iter_iam_organization_roles(self, org_id): @abc.abstractmethod def iter_iam_project_roles(self, project_id, project_number): @abc.abstractmethod def iter_iam_serviceaccount_exported_keys(self, name): @abc.abstractmethod def iter_iam_serviceaccounts(self, project_id, project_number): @abc.abstractmethod def iter_iam_serviceaccount_keys(self, project_id, serviceaccount_id): @abc.abstractmethod def fetch_kms_cryptokey_iam_policy(self, cryptokey): @abc.abstractmethod def fetch_kms_keyring_iam_policy(self, keyring): @abc.abstractmethod def iter_kms_cryptokeys(self, parent): @abc.abstractmethod def iter_kms_cryptokeyversions(self, parent): @abc.abstractmethod def iter_kms_keyrings(self, project_id, location=None): @abc.abstractmethod def iter_kubernetes_nodes(self, project_id, zone, cluster): @abc.abstractmethod def iter_kubernetes_pods(self, project_id, zone, cluster, namespace): @abc.abstractmethod def iter_kubernetes_services(self, project_id, zone, cluster, namespace): @abc.abstractmethod def iter_kubernetes_namespaces(self, project_id, zone, cluster): @abc.abstractmethod def iter_kubernetes_roles(self, project_id, zone, cluster, namespace): @abc.abstractmethod def iter_kubernetes_rolebindings(self, project_id, zone, cluster, namespace): @abc.abstractmethod def iter_kubernetes_clusterroles(self, project_id, zone, cluster): @abc.abstractmethod def iter_kubernetes_clusterrolebindings(self, project_id, zone, cluster): @abc.abstractmethod def fetch_pubsub_subscription_iam_policy(self, name): @abc.abstractmethod def fetch_pubsub_topic_iam_policy(self, name): @abc.abstractmethod def iter_pubsub_subscriptions(self, project_id, project_number): @abc.abstractmethod def iter_pubsub_topics(self, project_id, project_number): @abc.abstractmethod def fetch_services_enabled_apis(self, project_number): @abc.abstractmethod def iter_serviceusage_services(self, project_number): @abc.abstractmethod def iter_spanner_instances(self, project_number): @abc.abstractmethod def iter_spanner_databases(self, parent): @abc.abstractmethod def iter_stackdriver_billing_account_sinks(self, acct_id): @abc.abstractmethod def iter_stackdriver_folder_sinks(self, folder_id): @abc.abstractmethod
Apache License 2.0
avast/retdec-regression-tests-framework
regression_tests/logging.py
disable_logging
python
def disable_logging(): logging.disable(logging.CRITICAL)
Disables the logging facilities.
https://github.com/avast/retdec-regression-tests-framework/blob/a8d024475bf76cd6acdee3c9df3a3d38a2ec63df/regression_tests/logging.py#L32-L34
import logging import os def setup_logging(config, script_name): if not config['logging'].getboolean('enabled'): disable_logging() return root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) log_file_handler = _create_log_file_handler(config, script_name) root_logger.addHandler(log_file_handler) if config['logging'].getboolean('log_also_to_stderr'): stderr_handler = _create_stderr_handler(config) root_logger.addHandler(stderr_handler)
MIT License
azure/autorest.python
test/vanilla/legacy/Expected/AcceptanceTests/Xml/xmlservice/models/_models.py
AppleBarrel.__init__
python
def __init__(self, **kwargs): super(AppleBarrel, self).__init__(**kwargs) self.good_apples = kwargs.get("good_apples", None) self.bad_apples = kwargs.get("bad_apples", None)
:keyword good_apples: :paramtype good_apples: list[str] :keyword bad_apples: :paramtype bad_apples: list[str]
https://github.com/azure/autorest.python/blob/90d60a965788e3b4c0809e6686bdc3525acac89c/test/vanilla/legacy/Expected/AcceptanceTests/Xml/xmlservice/models/_models.py#L67-L76
from azure.core.exceptions import HttpResponseError import msrest.serialization class AccessPolicy(msrest.serialization.Model): _validation = { "start": {"required": True}, "expiry": {"required": True}, "permission": {"required": True}, } _attribute_map = { "start": {"key": "Start", "type": "iso-8601"}, "expiry": {"key": "Expiry", "type": "iso-8601"}, "permission": {"key": "Permission", "type": "str"}, } def __init__(self, **kwargs): super(AccessPolicy, self).__init__(**kwargs) self.start = kwargs["start"] self.expiry = kwargs["expiry"] self.permission = kwargs["permission"] class AppleBarrel(msrest.serialization.Model): _attribute_map = { "good_apples": {"key": "GoodApples", "type": "[str]", "xml": {"wrapped": True, "itemsName": "Apple"}}, "bad_apples": {"key": "BadApples", "type": "[str]", "xml": {"wrapped": True, "itemsName": "Apple"}}, }
MIT License
dstl/stone-soup
stonesoup/metricgenerator/tracktotruthmetrics.py
SIAPMetrics.S_single_time
python
def S_single_time(self, manager, timestamp): numerator = self._n_t(manager, timestamp) - self._na_t(manager, timestamp) try: S = numerator / self._n_t(manager, timestamp) except ZeroDivisionError: S = 0 return SingleTimeMetric(title="SIAP S at timestamp", value=S, timestamp=timestamp, generator=self)
r"""SIAP metric S "Spuriousness" at a specific time Returns an assessment of the number of tracks that are deemed to be spurious, i.e. unassigned to true objects, at a specific timestamp, :math:`{t}`. The output is a percentage, range :math:`0:1`, with a target score of 0. .. math:: S_{t} = \frac{N({t}) - N{A}({t})}{N({t})} where :math:`N{A}({t})` is the number of tracks assigned to true objects at timestamp :math:`{t}` and :math:`N({t})` is the number of tracks timestamp :math:`{t}`. Parameters ---------- manager : MetricManager containing the data to be used to create the metric(s) timestamp: datetime.datetime timestamp at which to compute the metric Returns ------- SingleTimeMetric Contains the metric information
https://github.com/dstl/stone-soup/blob/1ec7db239bc0ada0e59f2ff3f0249987f107e33f/stonesoup/metricgenerator/tracktotruthmetrics.py#L368-L405
import datetime import warnings from operator import attrgetter import numpy as np from .base import MetricGenerator from ..base import Property from ..measures import EuclideanWeighted from ..types.metric import SingleTimeMetric, TimeRangeMetric from ..types.time import TimeRange from ..types.track import Track class SIAPMetrics(MetricGenerator): position_weighting: np.ndarray = Property(default=None, doc="Weighting(s) to be used by euclidean measure " "in position kinematic accuracy calculations. " "If None, weights are all 1") velocity_weighting: np.ndarray = Property(default=None, doc="Weighting(s) to be used by euclidean measure " "in velocity kinematic accuracy calculations. " "If None, weights are all 1") position_mapping: np.ndarray = Property(default=None, doc="Mapping array which specifies which elements " "within state space state vectors correspond to " "position") velocity_mapping: np.ndarray = Property(default=None, doc="Mapping array which specifies which elements " "within state space state vectors correspond to " "velocity") position_mapping2: np.ndarray = Property(default=None, doc="Mapping array which specifies which elements " "within the ground truth state space state " "vectors correspond to position. Default is " "same as position_mapping") velocity_mapping2: np.ndarray = Property(default=None, doc="Mapping array which specifies which elements " "within the ground truth state space state " "vectors correspond to velocity. Default is " "same as velocity_mapping") truth_id: str = Property(default=None, doc="Metadata key for ID of each ground truth path in dataset") track_id: str = Property(default=None, doc="Metadata key for ID of each track in dataset") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.position_mapping2 is not None and self.position_mapping is None: raise ValueError("Cannot set position_mapping2 if position_mapping is None. " "If this is really what you meant to do, then" " set position_mapping to include all dimensions.") if self.velocity_mapping2 is not None and self.velocity_mapping is None: raise ValueError("Cannot set velocity_mapping2 if velocity_mapping is None. " "If this is really what you meant to do, then" " set velocity_mapping to include all dimensions.") if self.position_mapping2 is None and self.position_mapping is not None: self.position_mapping2 = self.position_mapping if self.velocity_mapping2 is None and self.velocity_mapping is not None: self.velocity_mapping2 = self.velocity_mapping def compute_metric(self, manager, *args, **kwargs): C = self.C_time_range(manager) A = self.A_time_range(manager) S = self.S_time_range(manager) LT = self.LT(manager) LS = self.LS(manager) nt = self.num_tracks(manager) nj = self.num_truths(manager) metrics = [C, A, S, LT, LS, nt, nj] timestamped_metrics = {'time-based SIAP C': [], 'time-based SIAP A': [], 'time-based SIAP S': []} timestamps = manager.list_timestamps() for timestamp in timestamps: timestamped_metrics['time-based SIAP C'].append(self.C_single_time(manager, timestamp)) timestamped_metrics['time-based SIAP A'].append(self.A_single_time(manager, timestamp)) timestamped_metrics['time-based SIAP S'].append(self.S_single_time(manager, timestamp)) t_metrics = [TimeRangeMetric(title=key, value=value, time_range=TimeRange(min(timestamps), max(timestamps)), generator=self) for key, value in timestamped_metrics.items()] if self.position_mapping is not None: PA = self.PA(manager) metrics.append(PA) t_PA = [] for timestamp in timestamps: t_PA.append(self.PA_single_time(manager, timestamp)) metrics.append(TimeRangeMetric(title='time-based SIAP PA', value=t_PA, time_range=TimeRange(min(timestamps), max(timestamps)), generator=self)) if self.velocity_mapping is not None: VA = self.VA(manager) metrics.append(VA) t_VA = [] for timestamp in timestamps: t_VA.append(self.VA_single_time(manager, timestamp)) metrics.append(TimeRangeMetric(title='time-based SIAP VA', value=t_VA, time_range=TimeRange(min(timestamps), max(timestamps)), generator=self)) metrics.extend(t_metrics) if self.track_id is not None: CID = self.CID_time_range(manager) metrics.append(CID) t_CID = [] for timestamp in timestamps: t_CID.append(self.CID_single_time(manager, timestamp)) metrics.append(TimeRangeMetric(title='time-based SIAP CID', value=t_CID, time_range=TimeRange(min(timestamps), max(timestamps)), generator=self)) if self.truth_id is not None: IDC = self.IDC_time_range(manager) IDA = self.IDA_time_range(manager) metrics.extend([IDC, IDA]) t_IDC = [] t_IDA = [] for timestamp in timestamps: t_IDC.append(self.IDC_single_time(manager, timestamp)) t_IDA.append(self.IDA_single_time(manager, timestamp)) metrics.append(TimeRangeMetric(title='time-based SIAP IDC', value=t_IDC, time_range=TimeRange(min(timestamps), max(timestamps)), generator=self)) metrics.append(TimeRangeMetric(title='time-based SIAP IDA', value=t_IDA, time_range=TimeRange(min(timestamps), max(timestamps)), generator=self)) return metrics @staticmethod def _warn_no_truth(manager): if len(manager.groundtruth_paths) == 0: warnings.warn("No truth to generate SIAP Metric", stacklevel=2) @staticmethod def _warn_no_tracks(manager): if len(manager.tracks) == 0: warnings.warn("No tracks to generate SIAP Metric", stacklevel=2) def C_single_time(self, manager, timestamp): numerator = self._jt_t(manager, timestamp) try: C = numerator / self._j_t(manager, timestamp) except ZeroDivisionError: C = 0 return SingleTimeMetric(title="SIAP C at timestamp", value=C, timestamp=timestamp, generator=self) def C_time_range(self, manager): timestamps = manager.list_timestamps() try: C = self._jt_sum(manager, timestamps) / self._j_sum( manager, timestamps) except ZeroDivisionError: self._warn_no_truth(manager) C = 0 return TimeRangeMetric( title="SIAP C", value=C, time_range=TimeRange(min(timestamps), max(timestamps)), generator=self) def A_single_time(self, manager, timestamp): try: A = self._na_t(manager, timestamp) / self._jt_t(manager, timestamp) except ZeroDivisionError: A = 1 return SingleTimeMetric(title="SIAP A at timestamp", value=A, timestamp=timestamp, generator=self) def A_time_range(self, manager): timestamps = manager.list_timestamps() try: A = self._na_sum(manager, timestamps) / self._jt_sum(manager, timestamps) except ZeroDivisionError: self._warn_no_truth(manager) self._warn_no_tracks(manager) A = 1 return TimeRangeMetric( title="SIAP A", value=A, time_range=TimeRange(min(timestamps), max(timestamps)), generator=self)
MIT License
googleapis/python-asset
google/cloud/asset_v1/services/asset_service/client.py
AssetServiceClient.get_feed
python
def get_feed( self, request: Union[asset_service.GetFeedRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> asset_service.Feed: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance(request, asset_service.GetFeedRequest): request = asset_service.GetFeedRequest(request) if name is not None: request.name = name rpc = self._transport._wrapped_methods[self._transport.get_feed] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response
r"""Gets details about an asset feed. Args: request (Union[google.cloud.asset_v1.types.GetFeedRequest, dict]): The request object. Get asset feed request. name (str): Required. The name of the Feed and it must be in the format of: projects/project_number/feeds/feed_id folders/folder_number/feeds/feed_id organizations/organization_number/feeds/feed_id This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.asset_v1.types.Feed: An asset feed used to export asset updates to a destinations. An asset feed filter controls what updates are exported. The asset feed must be created within a project, organization, or folder. Supported destinations are: Pub/Sub topics.
https://github.com/googleapis/python-asset/blob/5f7a7b44254103df0649bc7f991748a1b29723d3/google/cloud/asset_v1/services/asset_service/client.py#L704-L779
from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.transport import mtls from google.auth.transport.grpc import SslCredentials from google.auth.exceptions import MutualTLSChannelError from google.oauth2 import service_account from google.api_core import operation from google.api_core import operation_async from google.cloud.asset_v1.services.asset_service import pagers from google.cloud.asset_v1.types import asset_service from google.cloud.asset_v1.types import assets from google.type import expr_pb2 from .transports.base import AssetServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import AssetServiceGrpcTransport from .transports.grpc_asyncio import AssetServiceGrpcAsyncIOTransport class AssetServiceClientMeta(type): _transport_registry = OrderedDict() _transport_registry["grpc"] = AssetServiceGrpcTransport _transport_registry["grpc_asyncio"] = AssetServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[AssetServiceTransport]: if label: return cls._transport_registry[label] return next(iter(cls._transport_registry.values())) class AssetServiceClient(metaclass=AssetServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "cloudasset.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> AssetServiceTransport: return self._transport @staticmethod def access_level_path(access_policy: str, access_level: str,) -> str: return "accessPolicies/{access_policy}/accessLevels/{access_level}".format( access_policy=access_policy, access_level=access_level, ) @staticmethod def parse_access_level_path(path: str) -> Dict[str, str]: m = re.match( r"^accessPolicies/(?P<access_policy>.+?)/accessLevels/(?P<access_level>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def access_policy_path(access_policy: str,) -> str: return "accessPolicies/{access_policy}".format(access_policy=access_policy,) @staticmethod def parse_access_policy_path(path: str) -> Dict[str, str]: m = re.match(r"^accessPolicies/(?P<access_policy>.+?)$", path) return m.groupdict() if m else {} @staticmethod def asset_path() -> str: return "*".format() @staticmethod def feed_path(project: str, feed: str,) -> str: return "projects/{project}/feeds/{feed}".format(project=project, feed=feed,) @staticmethod def parse_feed_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)/feeds/(?P<feed>.+?)$", path) return m.groupdict() if m else {} @staticmethod def inventory_path(project: str, location: str, instance: str,) -> str: return "projects/{project}/locations/{location}/instances/{instance}/inventory".format( project=project, location=location, instance=instance, ) @staticmethod def parse_inventory_path(path: str) -> Dict[str, str]: m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/instances/(?P<instance>.+?)/inventory$", path, ) return m.groupdict() if m else {} @staticmethod def service_perimeter_path(access_policy: str, service_perimeter: str,) -> str: return "accessPolicies/{access_policy}/servicePerimeters/{service_perimeter}".format( access_policy=access_policy, service_perimeter=service_perimeter, ) @staticmethod def parse_service_perimeter_path(path: str) -> Dict[str, str]: m = re.match( r"^accessPolicies/(?P<access_policy>.+?)/servicePerimeters/(?P<service_perimeter>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, AssetServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = bool( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() if is_mtls: client_cert_source_func = mtls.default_client_cert_source() else: client_cert_source_func = None if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": if is_mtls: api_endpoint = self.DEFAULT_MTLS_ENDPOINT else: api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " "values: never, auto, always" ) if isinstance(transport, AssetServiceTransport): if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, ) def export_assets( self, request: Union[asset_service.ExportAssetsRequest, dict] = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: if not isinstance(request, asset_service.ExportAssetsRequest): request = asset_service.ExportAssetsRequest(request) rpc = self._transport._wrapped_methods[self._transport.export_assets] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) response = operation.from_gapic( response, self._transport.operations_client, asset_service.ExportAssetsResponse, metadata_type=asset_service.ExportAssetsRequest, ) return response def list_assets( self, request: Union[asset_service.ListAssetsRequest, dict] = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAssetsPager: has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance(request, asset_service.ListAssetsRequest): request = asset_service.ListAssetsRequest(request) if parent is not None: request.parent = parent rpc = self._transport._wrapped_methods[self._transport.list_assets] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) response = pagers.ListAssetsPager( method=rpc, request=request, response=response, metadata=metadata, ) return response def batch_get_assets_history( self, request: Union[asset_service.BatchGetAssetsHistoryRequest, dict] = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> asset_service.BatchGetAssetsHistoryResponse: if not isinstance(request, asset_service.BatchGetAssetsHistoryRequest): request = asset_service.BatchGetAssetsHistoryRequest(request) rpc = self._transport._wrapped_methods[self._transport.batch_get_assets_history] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response def create_feed( self, request: Union[asset_service.CreateFeedRequest, dict] = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> asset_service.Feed: has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance(request, asset_service.CreateFeedRequest): request = asset_service.CreateFeedRequest(request) if parent is not None: request.parent = parent rpc = self._transport._wrapped_methods[self._transport.create_feed] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response
Apache License 2.0
bukun/torcms
torcms/model/user_model.py
MUser.update_user_name
python
def update_user_name(user_email, user_name): out_dic = {'success': False, 'code': '00'} entry = TabMember.update(user_name=user_name).where( TabMember.user_email == user_email) entry.execute() out_dic['success'] = True return out_dic
Update the user_name of a user.
https://github.com/bukun/torcms/blob/5d7480865fd46e706b84f5f65a5c24cd03bb2142/torcms/model/user_model.py#L150-L163
import time from config import CMS_CFG from torcms.core import tools from torcms.model.core_tab import TabMember class MUser(): @staticmethod def query_all(limit=50): return TabMember.select().limit(limit) @staticmethod def get_by_uid(uid): try: return TabMember.get(TabMember.uid == uid) except Exception as err: print(repr(err)) return None @staticmethod def get_by_name(uname): try: return TabMember.get(user_name=uname) except Exception as err: print(repr(err)) return None @staticmethod def set_sendemail_time(uid): entry = TabMember.update( time_email=tools.timestamp(), ).where(TabMember.uid == uid) entry.execute() @staticmethod def get_by_email(useremail): try: return TabMember.get(user_email=useremail) except Exception as err: print(repr(err)) return None @staticmethod def check_user(user_id, u_pass): user_count = TabMember.select().where(TabMember.uid == user_id).count() if user_count == 0: return -1 the_user = TabMember.get(uid=user_id) if the_user.user_pass == tools.md5(u_pass): return 1 return 0 @staticmethod def check_user_by_name(user_name, u_pass): the_query = TabMember.select().where(TabMember.user_name == user_name) if the_query.count() == 0: return -1 the_user = the_query.get() failed_times = the_user.failed_times time_failed = the_user.time_failed c_tiemstamp = tools.timestamp() if c_tiemstamp - time_failed > 1 * 60 * 60: entry2 = TabMember.update(failed_times=0).where( TabMember.user_name == user_name) try: entry2.execute() except Exception as err: print(repr(err)) elif failed_times > 4: return 2 else: pass if the_user.user_pass == tools.md5(u_pass): return 1 return 0 @staticmethod def update_pass(user_id, newpass): out_dic = {'success': False, 'code': '00'} entry = TabMember.update(user_pass=tools.md5(newpass)).where( TabMember.uid == user_id) entry.execute() out_dic['success'] = True return out_dic @staticmethod
MIT License
mrgambal/vulyk
vulyk/blueprints/gamification/models/events.py
EventModel.amount_of_money_earned
python
def amount_of_money_earned(cls, user: Optional[User]) -> float: query = Q(coins__gt=0) if user is not None: query &= Q(user=user) return cls.objects(query).sum('coins')
Amount of money earned by current user or total amount earned if None is passed. :param user: User instance :type user: Optional[User] :return: Amount of money :rtype: float
https://github.com/mrgambal/vulyk/blob/4ea617bb9a1c4778ce6dfa084c53e2667d037f67/vulyk/blueprints/gamification/models/events.py#L187-L204
from typing import Generator, Iterator, Optional from flask_mongoengine import Document from mongoengine import ( DecimalField, ComplexDateTimeField, ReferenceField, BooleanField, ListField, IntField, Q ) from vulyk.models.tasks import AbstractAnswer, Batch from vulyk.models.user import User from .foundations import FundModel from .rules import RuleModel from ..core.events import Event __all__ = [ 'EventModel' ] class EventModel(Document): timestamp = ComplexDateTimeField(required=True) user = ReferenceField( document_type=User, db_field='user', required=True) answer = ReferenceField( document_type=AbstractAnswer, db_field='answer', required=False) points_given = DecimalField(min_value=0, required=True, db_field='points') coins = DecimalField(required=True) achievements = ListField( field=ReferenceField(document_type=RuleModel, required=False)) acceptor_fund = ReferenceField( document_type=FundModel, required=False, db_field='acceptorFund') level_given = IntField(min_value=1, required=False, db_field='level') viewed = BooleanField(default=False) meta = { 'collection': 'gamification.events', 'allow_inheritance': True, 'indexes': [ 'user', { 'fields': ['answer'], 'unique': True, 'sparse': True }, 'acceptor_fund', 'timestamp' ] } def to_event(self) -> Event: return Event.build( timestamp=self.timestamp, user=self.user, answer=self.answer, points_given=self.points_given, coins=self.coins, achievements=[a.to_rule() for a in self.achievements if hasattr(a, "to_rule")], acceptor_fund=None if self.acceptor_fund is None else self.acceptor_fund.to_fund(), level_given=self.level_given, viewed=self.viewed ) @classmethod def from_event(cls, event: Event): return cls( timestamp=event.timestamp, user=event.user, answer=event.answer, points_given=event.points_given, coins=event.coins, achievements=RuleModel.objects( id__in=[r.id for r in event.achievements]), acceptor_fund=None if event.acceptor_fund is None else FundModel.objects.get(id=event.acceptor_fund.id), level_given=event.level_given, viewed=event.viewed ) @classmethod def get_unread_events(cls, user: User) -> Generator[Event, None, None]: for ev in cls.objects(user=user, viewed=False): yield ev.to_event() @classmethod def mark_events_as_read(cls, user: User) -> None: cls.objects(user=user, viewed=False).update(set__viewed=True) @classmethod def get_all_events(cls, user: User) -> Iterator: for ev in cls.objects(user=user): yield ev.to_event() @classmethod def count_of_tasks_done_by_user(cls, user: User) -> int: return cls.objects(user=user, answer__exists=True).count() @classmethod def amount_of_money_donated(cls, user: Optional[User]) -> float: query = Q(acceptor_fund__ne=None) if user is not None: query &= Q(user=user) return -cls.objects(query).sum('coins') @classmethod
BSD 3-Clause New or Revised License
myriadrf/pylms7002soapy
pyLMS7002Soapy/LMS7002_SX.py
LMS7002_SX.EN_SDM_CLK
python
def EN_SDM_CLK(self): return self._readReg('CFG0', 'EN_SDM_CLK')
Get the value of EN_SDM_CLK
https://github.com/myriadrf/pylms7002soapy/blob/4f828eb9282c302dc6b187d91df5e77c8a6f2d61/pyLMS7002Soapy/LMS7002_SX.py#L202-L206
from pyLMS7002Soapy.LMS7002_base import LMS7002_base from time import sleep from math import floor class LMS7002_SX(LMS7002_base): __slots__ = [] def __init__(self, chip, Channel): if Channel not in ['R', 'T']: raise ValueError("Parameter Channel must be 'R' or 'T'") self.chip = chip self.channel = Channel self.prefix = "SXT_SXR_" @property def EN_DIR(self): prefix = self.prefix self.prefix = "" en_dir = self._readReg('TRX_EN_DIR', 'EN_DIR') self.prefix = prefix return en_dir @EN_DIR.setter def EN_DIR(self, value): if value not in [0, 1]: raise ValueError("Value must be [0,1]") prefix = self.prefix self.prefix = "" self._writeReg('TRX_EN_DIR', 'EN_DIR', value) self.prefix = prefix @property def RESET_N(self): return self._readReg('CFG0', 'RESET_N') @RESET_N.setter def RESET_N(self, value): if value not in [0, 1, 'RESET', 'NORMAL']: raise ValueError("Value must be [0,1,'RESET', 'NORMAL']") if value == 0 or value == 'RESET': val = 0 else: val = 1 self._writeReg('CFG0', 'RESET_N', val) @property def SPDUP_VCO(self): return self._readReg('CFG0', 'SPDUP_VCO') @SPDUP_VCO.setter def SPDUP_VCO(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG0', 'SPDUP_VCO', val) @property def BYPLDO_VCO(self): return self._readReg('CFG0', 'BYPLDO_VCO') @BYPLDO_VCO.setter def BYPLDO_VCO(self, value): if value not in [0, 1, 'BYP', 'ACT']: raise ValueError("Value must be [0,1,'BYP', 'ACT']") if value == 0 or value == 'ACT': val = 0 else: val = 1 self._writeReg('CFG0', 'BYPLDO_VCO', val) @property def EN_COARSEPLL(self): return self._readReg('CFG0', 'EN_COARSEPLL') @EN_COARSEPLL.setter def EN_COARSEPLL(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG0', 'EN_COARSEPLL', val) @property def CURLIM_VCO(self): return self._readReg('CFG0', 'CURLIM_VCO') @CURLIM_VCO.setter def CURLIM_VCO(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG0', 'CURLIM_VCO', val) @property def EN_DIV2_DIVPROG(self): return self._readReg('CFG0', 'EN_DIV2_DIVPROG') @EN_DIV2_DIVPROG.setter def EN_DIV2_DIVPROG(self, value): if value not in [0, 1, 'ON', 'OFF']: raise ValueError("Value must be [0,1,'ON', 'OFF']") if value == 0 or value == 'OFF': val = 0 else: val = 1 self._writeReg('CFG0', 'EN_DIV2_DIVPROG', val) @property def EN_INTONLY_SDM(self): return self._readReg('CFG0', 'EN_INTONLY_SDM') @EN_INTONLY_SDM.setter def EN_INTONLY_SDM(self, value): if value not in [0, 1, 'FRACN', 'INTN']: raise ValueError("Value must be [0,1,'FRACN', 'INTN']") if value == 0 or value == 'FRACN': val = 0 else: val = 1 self._writeReg('CFG0', 'EN_INTONLY_SDM', val) @property
Apache License 2.0
ocha-dap/hdx-python-country
src/hdx/location/country.py
Country.get_country_info_from_iso2
python
def get_country_info_from_iso2( cls, iso2: str, use_live: bool = True, exception: Optional[ExceptionUpperBound] = None, ) -> Optional[Dict[str,str]]: iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception) if iso3 is not None: return cls.get_country_info_from_iso3( iso3, use_live=use_live, exception=exception ) return None
Get country name from ISO2 code Args: iso2 (str): ISO2 code for which to get country information use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[Dict[str,str]]: Country information
https://github.com/ocha-dap/hdx-python-country/blob/7bcbef84030282a485e300df4845d4cff1c0ac11/src/hdx/location/country.py#L408-L429
import copy import logging import re from typing import Dict, List, Optional, Tuple, TypeVar, Union import hxl from hdx.utilities.path import script_dir_plus_file from hdx.utilities.text import get_words_in_sentence ExceptionUpperBound = TypeVar("T", bound="Exception") logger = logging.getLogger(__name__) class CountryError(Exception): pass class Country: abbreviations = { "DEM.": "DEMOCRATIC", "FMR.": "FORMER", "PROV.": "PROVINCE", "REP.": "REPUBLIC", "ST.": "SAINT", "UTD.": "UNITED", "U.": "UNITED", "N.": "NORTH", "E.": "EAST", "W.": "WEST", "K.": "KINGDOM", } major_differentiators = ["DEMOCRATIC", "NORTH", "SOUTH", "EAST", "WEST", "STATES"] multiple_abbreviations = { "FED.": ["FEDERATION", "FEDERAL", "FEDERATED"], "ISL.": ["ISLAND", "ISLANDS"], "S.": ["SOUTH", "STATES"], "TERR.": ["TERRITORY", "TERRITORIES"], } simplifications = [ "THE", "OF", "ISLAMIC", "STATES", "BOLIVARIAN", "PLURINATIONAL", "PEOPLE'S", "DUTCH PART", "FRENCH PART", "MALVINAS", "YUGOSLAV", "KINGDOM", "PROTECTORATE", ] _countriesdata = None _ochaurl_int = "https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596" _ochaurl = _ochaurl_int _country_name_overrides = dict() _country_name_mappings = dict() @classmethod def _add_countriesdata(cls, iso3: str, hxlcountry: hxl.Row) -> Dict: country = hxlcountry.dictionary for value in hxlcountry.get_all("#country+name"): if value: cls._countriesdata["countrynames2iso3"][value.upper()] = iso3 countryname = cls._country_name_overrides.get(iso3) if countryname is not None: country["#country+name+override"] = countryname iso2 = hxlcountry.get("#country+code+v_iso2") if iso2: cls._countriesdata["iso2iso3"][iso2] = iso3 cls._countriesdata["iso2iso3"][iso3] = iso2 m49 = hxlcountry.get("#country+code+num+v_m49") if m49: m49 = int(m49) cls._countriesdata["m49iso3"][m49] = iso3 cls._countriesdata["m49iso3"][iso3] = m49 cls._countriesdata["aliases"][iso3] = re.compile( hxlcountry.get("#country+regex"), re.IGNORECASE ) regionname = hxlcountry.get("#region+main+name+preferred") sub_regionname = hxlcountry.get("#region+name+preferred+sub") intermediate_regionname = hxlcountry.get("#region+intermediate+name+preferred") regionid = hxlcountry.get("#region+code+main") if regionid: regionid = int(regionid) sub_regionid = hxlcountry.get("#region+code+sub") if sub_regionid: sub_regionid = int(sub_regionid) intermediate_regionid = hxlcountry.get("#region+code+intermediate") if intermediate_regionid: intermediate_regionid = int(intermediate_regionid) def add_country_to_set(colname, idval, iso3): value = cls._countriesdata[colname].get(idval) if value is None: value = set() cls._countriesdata["regioncodes2countries"][idval] = value value.add(iso3) if regionname: add_country_to_set("regioncodes2countries", regionid, iso3) cls._countriesdata["regioncodes2names"][regionid] = regionname cls._countriesdata["regionnames2codes"][regionname.upper()] = regionid if sub_regionname: add_country_to_set("regioncodes2countries", sub_regionid, iso3) cls._countriesdata["regioncodes2names"][sub_regionid] = sub_regionname cls._countriesdata["regionnames2codes"][ sub_regionname.upper() ] = sub_regionid if intermediate_regionname: add_country_to_set("regioncodes2countries", intermediate_regionid, iso3) cls._countriesdata["regioncodes2names"][ intermediate_regionid ] = intermediate_regionname cls._countriesdata["regionnames2codes"][ intermediate_regionname.upper() ] = intermediate_regionid currency = hxlcountry.get("#currency+code") cls._countriesdata["currencies"][iso3] = currency return country @classmethod def set_countriesdata(cls, countries: str) -> None: cls._countriesdata = dict() cls._countriesdata["countries"] = dict() cls._countriesdata["iso2iso3"] = dict() cls._countriesdata["m49iso3"] = dict() cls._countriesdata["countrynames2iso3"] = dict() cls._countriesdata["regioncodes2countries"] = dict() cls._countriesdata["regioncodes2names"] = dict() cls._countriesdata["regionnames2codes"] = dict() cls._countriesdata["aliases"] = dict() cls._countriesdata["currencies"] = dict() for key, value in cls._country_name_mappings.items(): cls._countriesdata["countrynames2iso3"][key.upper()] = value.upper() for country in countries: iso3 = country.get("#country+code+v_iso3") if not iso3: continue iso3 = iso3.upper() countrydict = cls._add_countriesdata(iso3, country) cls._countriesdata["countries"][iso3] = countrydict def sort_list(colname): for idval in cls._countriesdata[colname]: cls._countriesdata[colname][idval] = sorted( list(cls._countriesdata[colname][idval]) ) sort_list("regioncodes2countries") @classmethod def countriesdata( cls, use_live: bool = True, country_name_overrides: Dict = None, country_name_mappings: Dict = None, ) -> List[Dict[str,Dict]]: if cls._countriesdata is None: countries = None if country_name_overrides is not None: cls.set_country_name_overrides(country_name_overrides) if country_name_mappings is not None: cls.set_country_name_mappings(country_name_mappings) if use_live: try: countries = hxl.data(cls._ochaurl) except OSError: logger.exception( "Download from OCHA feed failed! Falling back to stored file." ) if countries is None: countries = hxl.data( script_dir_plus_file( "Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv", Country, ), allow_local=True, ) cls.set_countriesdata(countries) return cls._countriesdata @classmethod def set_ocha_url(cls, url: str = None) -> None: if url is None: url = cls._ochaurl_int cls._ochaurl = url @classmethod def set_country_name_overrides(cls, country_name_overrides: Dict) -> None: cls._country_name_overrides = country_name_overrides @classmethod def set_country_name_mappings(cls, country_name_mappings: Dict) -> None: cls._country_name_mappings = country_name_mappings @classmethod def get_country_info_from_iso3( cls, iso3: str, use_live: bool = True, exception: Optional[ExceptionUpperBound] = None, ) -> Optional[Dict[str,str]]: countriesdata = cls.countriesdata(use_live=use_live) country = countriesdata["countries"].get(iso3.upper()) if country is not None: return country if exception is not None: raise exception return None @classmethod def get_country_name_from_iso3( cls, iso3: str, use_live: bool = True, exception: Optional[ExceptionUpperBound] = None, shortname: bool = False, ) -> Optional[str]: countryinfo = cls.get_country_info_from_iso3( iso3, use_live=use_live, exception=exception ) if countryinfo is not None: countryname = countryinfo.get("#country+name+override") if countryname is not None: return countryname if shortname: return countryinfo.get("#country+alt+i_en+name+v_unterm") else: return countryinfo.get("#country+name+preferred") return None @classmethod def get_currency_from_iso3( cls, iso3: str, use_live: bool = True, exception: Optional[ExceptionUpperBound] = None, ) -> Optional[int]: countriesdata = cls.countriesdata(use_live=use_live) currency = countriesdata["currencies"].get(iso3.upper()) if currency is not None: return currency if exception is not None: raise exception return None @classmethod def get_iso2_from_iso3( cls, iso3: str, use_live: bool = True, exception: Optional[ExceptionUpperBound] = None, ) -> Optional[str]: countriesdata = cls.countriesdata(use_live=use_live) iso2 = countriesdata["iso2iso3"].get(iso3.upper()) if iso2 is not None: return iso2 if exception is not None: raise exception return None @classmethod def get_iso3_from_iso2( cls, iso2: str, use_live: bool = True, exception: Optional[ExceptionUpperBound] = None, ) -> Optional[str]: countriesdata = cls.countriesdata(use_live=use_live) iso3 = countriesdata["iso2iso3"].get(iso2.upper()) if iso3 is not None: return iso3 if exception is not None: raise exception return None @classmethod
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_persistent_volume_spec.py
V1PersistentVolumeSpec.host_path
python
def host_path(self, host_path): self._host_path = host_path
Sets the host_path of this V1PersistentVolumeSpec. :param host_path: The host_path of this V1PersistentVolumeSpec. # noqa: E501 :type: V1HostPathVolumeSource
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_persistent_volume_spec.py#L509-L517
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1PersistentVolumeSpec(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'access_modes': 'list[str]', 'aws_elastic_block_store': 'V1AWSElasticBlockStoreVolumeSource', 'azure_disk': 'V1AzureDiskVolumeSource', 'azure_file': 'V1AzureFilePersistentVolumeSource', 'capacity': 'dict(str, str)', 'cephfs': 'V1CephFSPersistentVolumeSource', 'cinder': 'V1CinderPersistentVolumeSource', 'claim_ref': 'V1ObjectReference', 'csi': 'V1CSIPersistentVolumeSource', 'fc': 'V1FCVolumeSource', 'flex_volume': 'V1FlexPersistentVolumeSource', 'flocker': 'V1FlockerVolumeSource', 'gce_persistent_disk': 'V1GCEPersistentDiskVolumeSource', 'glusterfs': 'V1GlusterfsPersistentVolumeSource', 'host_path': 'V1HostPathVolumeSource', 'iscsi': 'V1ISCSIPersistentVolumeSource', 'local': 'V1LocalVolumeSource', 'mount_options': 'list[str]', 'nfs': 'V1NFSVolumeSource', 'node_affinity': 'V1VolumeNodeAffinity', 'persistent_volume_reclaim_policy': 'str', 'photon_persistent_disk': 'V1PhotonPersistentDiskVolumeSource', 'portworx_volume': 'V1PortworxVolumeSource', 'quobyte': 'V1QuobyteVolumeSource', 'rbd': 'V1RBDPersistentVolumeSource', 'scale_io': 'V1ScaleIOPersistentVolumeSource', 'storage_class_name': 'str', 'storageos': 'V1StorageOSPersistentVolumeSource', 'volume_mode': 'str', 'vsphere_volume': 'V1VsphereVirtualDiskVolumeSource' } attribute_map = { 'access_modes': 'accessModes', 'aws_elastic_block_store': 'awsElasticBlockStore', 'azure_disk': 'azureDisk', 'azure_file': 'azureFile', 'capacity': 'capacity', 'cephfs': 'cephfs', 'cinder': 'cinder', 'claim_ref': 'claimRef', 'csi': 'csi', 'fc': 'fc', 'flex_volume': 'flexVolume', 'flocker': 'flocker', 'gce_persistent_disk': 'gcePersistentDisk', 'glusterfs': 'glusterfs', 'host_path': 'hostPath', 'iscsi': 'iscsi', 'local': 'local', 'mount_options': 'mountOptions', 'nfs': 'nfs', 'node_affinity': 'nodeAffinity', 'persistent_volume_reclaim_policy': 'persistentVolumeReclaimPolicy', 'photon_persistent_disk': 'photonPersistentDisk', 'portworx_volume': 'portworxVolume', 'quobyte': 'quobyte', 'rbd': 'rbd', 'scale_io': 'scaleIO', 'storage_class_name': 'storageClassName', 'storageos': 'storageos', 'volume_mode': 'volumeMode', 'vsphere_volume': 'vsphereVolume' } def __init__(self, access_modes=None, aws_elastic_block_store=None, azure_disk=None, azure_file=None, capacity=None, cephfs=None, cinder=None, claim_ref=None, csi=None, fc=None, flex_volume=None, flocker=None, gce_persistent_disk=None, glusterfs=None, host_path=None, iscsi=None, local=None, mount_options=None, nfs=None, node_affinity=None, persistent_volume_reclaim_policy=None, photon_persistent_disk=None, portworx_volume=None, quobyte=None, rbd=None, scale_io=None, storage_class_name=None, storageos=None, volume_mode=None, vsphere_volume=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._access_modes = None self._aws_elastic_block_store = None self._azure_disk = None self._azure_file = None self._capacity = None self._cephfs = None self._cinder = None self._claim_ref = None self._csi = None self._fc = None self._flex_volume = None self._flocker = None self._gce_persistent_disk = None self._glusterfs = None self._host_path = None self._iscsi = None self._local = None self._mount_options = None self._nfs = None self._node_affinity = None self._persistent_volume_reclaim_policy = None self._photon_persistent_disk = None self._portworx_volume = None self._quobyte = None self._rbd = None self._scale_io = None self._storage_class_name = None self._storageos = None self._volume_mode = None self._vsphere_volume = None self.discriminator = None if access_modes is not None: self.access_modes = access_modes if aws_elastic_block_store is not None: self.aws_elastic_block_store = aws_elastic_block_store if azure_disk is not None: self.azure_disk = azure_disk if azure_file is not None: self.azure_file = azure_file if capacity is not None: self.capacity = capacity if cephfs is not None: self.cephfs = cephfs if cinder is not None: self.cinder = cinder if claim_ref is not None: self.claim_ref = claim_ref if csi is not None: self.csi = csi if fc is not None: self.fc = fc if flex_volume is not None: self.flex_volume = flex_volume if flocker is not None: self.flocker = flocker if gce_persistent_disk is not None: self.gce_persistent_disk = gce_persistent_disk if glusterfs is not None: self.glusterfs = glusterfs if host_path is not None: self.host_path = host_path if iscsi is not None: self.iscsi = iscsi if local is not None: self.local = local if mount_options is not None: self.mount_options = mount_options if nfs is not None: self.nfs = nfs if node_affinity is not None: self.node_affinity = node_affinity if persistent_volume_reclaim_policy is not None: self.persistent_volume_reclaim_policy = persistent_volume_reclaim_policy if photon_persistent_disk is not None: self.photon_persistent_disk = photon_persistent_disk if portworx_volume is not None: self.portworx_volume = portworx_volume if quobyte is not None: self.quobyte = quobyte if rbd is not None: self.rbd = rbd if scale_io is not None: self.scale_io = scale_io if storage_class_name is not None: self.storage_class_name = storage_class_name if storageos is not None: self.storageos = storageos if volume_mode is not None: self.volume_mode = volume_mode if vsphere_volume is not None: self.vsphere_volume = vsphere_volume @property def access_modes(self): return self._access_modes @access_modes.setter def access_modes(self, access_modes): self._access_modes = access_modes @property def aws_elastic_block_store(self): return self._aws_elastic_block_store @aws_elastic_block_store.setter def aws_elastic_block_store(self, aws_elastic_block_store): self._aws_elastic_block_store = aws_elastic_block_store @property def azure_disk(self): return self._azure_disk @azure_disk.setter def azure_disk(self, azure_disk): self._azure_disk = azure_disk @property def azure_file(self): return self._azure_file @azure_file.setter def azure_file(self, azure_file): self._azure_file = azure_file @property def capacity(self): return self._capacity @capacity.setter def capacity(self, capacity): self._capacity = capacity @property def cephfs(self): return self._cephfs @cephfs.setter def cephfs(self, cephfs): self._cephfs = cephfs @property def cinder(self): return self._cinder @cinder.setter def cinder(self, cinder): self._cinder = cinder @property def claim_ref(self): return self._claim_ref @claim_ref.setter def claim_ref(self, claim_ref): self._claim_ref = claim_ref @property def csi(self): return self._csi @csi.setter def csi(self, csi): self._csi = csi @property def fc(self): return self._fc @fc.setter def fc(self, fc): self._fc = fc @property def flex_volume(self): return self._flex_volume @flex_volume.setter def flex_volume(self, flex_volume): self._flex_volume = flex_volume @property def flocker(self): return self._flocker @flocker.setter def flocker(self, flocker): self._flocker = flocker @property def gce_persistent_disk(self): return self._gce_persistent_disk @gce_persistent_disk.setter def gce_persistent_disk(self, gce_persistent_disk): self._gce_persistent_disk = gce_persistent_disk @property def glusterfs(self): return self._glusterfs @glusterfs.setter def glusterfs(self, glusterfs): self._glusterfs = glusterfs @property def host_path(self): return self._host_path @host_path.setter
Apache License 2.0
apcode/tensorflow_fasttext
process_input.py
WriteExamples
python
def WriteExamples(examples, outputfile, num_shards): shard = 0 num_per_shard = len(examples) / num_shards + 1 for n, example in enumerate(examples): if n % num_per_shard == 0: shard += 1 writer = tf.python_io.TFRecordWriter(outputfile + '-%d-of-%d' % (shard, num_shards)) record = inputs.BuildTextExample( example["text"], example.get("ngrams", None), example["label"]) writer.write(record.SerializeToString())
Write examles in TFRecord format. Args: examples: list of feature dicts. {'text': [words], 'label': [labels]} outputfile: full pathname of output file
https://github.com/apcode/tensorflow_fasttext/blob/8120b5f7f68f009002f1d5875a031e6bb9318d6a/process_input.py#L81-L97
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path import re import sys import tensorflow as tf import inputs import text_utils from collections import Counter from six.moves import zip tf.flags.DEFINE_string("facebook_input", None, "Input file in facebook train|test format") tf.flags.DEFINE_string("text_input", None, """Input text file containing one text phrase per line. Must have --labels defined Used instead of --facebook_input""") tf.flags.DEFINE_string("labels", None, """Input text file containing one label for classification per line. Must have --text_input defined. Used instead of --facebook_input""") tf.flags.DEFINE_string("ngrams", None, "list of ngram sizes to create, e.g. --ngrams=2,3,4,5") tf.flags.DEFINE_string("output_dir", ".", "Directory to store resulting vector models and checkpoints in") tf.flags.DEFINE_integer("num_shards", 1, "Number of outputfiles to create") FLAGS = tf.flags.FLAGS def ParseFacebookInput(inputfile, ngrams): examples = [] for line in open(inputfile): words = line.split() match = re.match(r'__label__(.+)', words[0]) label = match.group(1) if match else None first = 2 if words[1] == "," else 1 words = words[first:] examples.append({ "text": words, "label": label }) if ngrams: examples[-1]["ngrams"] = text_utils.GenerateNgrams(words, ngrams) return examples def ParseTextInput(textfile, labelsfie, ngrams): examples = [] with open(textfile) as f1, open(labelsfile) as f2: for text, label in zip(f1, f2): words = text_utils.TokenizeText(text) examples.append({ "text": words, "label": label, }) if ngrams: examples[-1]["ngrams"] = text_utils.GenerateNgrams(words, ngrams) return examples
MIT License
tecnalia-advancedmanufacturing-robotics/ros_pkg_gen
package_generator/src/package_generator/package_xml_parser.py
PackageXMLParser.__repr__
python
def __repr__(self): msg = "object_state: \n" msg += "package spec: {}\n".format(self.data_pack_) msg += "dependencies: {}\n".format(self.data_depend_) msg += "components: {} \n".format(len(self.data_comp_)) for num, item in enumerate(self.data_comp_): msg += "item {}\n".format(num) msg += "attributes: {}\n".format(item['attributes']) msg += "interface:\n".format() for elt in item['interface'].items(): msg += "\t {}\n".format(elt) return msg
Print object state
https://github.com/tecnalia-advancedmanufacturing-robotics/ros_pkg_gen/blob/d2ed7f17abbc5e9352bea45981d895d46ef44f24/package_generator/src/package_generator/package_xml_parser.py#L96-L110
import sys import os import xml.etree.cElementTree as ET from xml.dom import minidom import rospkg from package_generator.enhanced_object import EnhancedObject from package_generator.template_spec import TemplateSpec from termcolor import colored def remove_empty_line(text): res = list() for line in text.splitlines(): if line.strip(): res.append(line) return res class PackageXMLParser(EnhancedObject): def get_template(self, filename): try: tree = ET.ElementTree(file=filename) except IOError: self.log_error("Prb while opening file {}".format(filename)) return None except ET.ParseError as error: self.log_error("Prb while parsing file: {}:".format(error)) return None root = tree.getroot() if 'template' not in root.attrib.keys(): self.log_error("Missing template tag in file {}".format(filename)) return None return root.attrib['template'] def __init__(self, name="PackageXMLParser"): super(PackageXMLParser, self).__init__(name) self.root_ = None self.spec_ = None self.data_pack_ = dict() self.data_depend_ = list() self.data_comp_ = list() self.active_comp_ = -1 self.is_dependency_complete_ = True
Apache License 2.0
openstack/oslo.privsep
oslo_privsep/capabilities.py
get_caps
python
def get_caps(): header = ffi.new('cap_user_header_t', {'version': crt._LINUX_CAPABILITY_VERSION_2, 'pid': 0}) data = ffi.new('struct __user_cap_data_struct[2]') ret = _capget(header, data) if ret != 0: errno = ffi.errno raise OSError(errno, os.strerror(errno)) return ( _mask_to_caps(data[0].effective | (data[1].effective << 32)), _mask_to_caps(data[0].permitted | (data[1].permitted << 32)), _mask_to_caps(data[0].inheritable | (data[1].inheritable << 32)), )
Return (effective, permitted, inheritable) as lists of caps
https://github.com/openstack/oslo.privsep/blob/fa138406f76964949708932683d93967975f88db/oslo_privsep/capabilities.py#L172-L190
import enum import os import platform import sys import cffi class Capabilities(enum.IntEnum): CAP_CHOWN = 0 CAP_DAC_OVERRIDE = 1 CAP_DAC_READ_SEARCH = 2 CAP_FOWNER = 3 CAP_FSETID = 4 CAP_KILL = 5 CAP_SETGID = 6 CAP_SETUID = 7 CAP_SETPCAP = 8 CAP_LINUX_IMMUTABLE = 9 CAP_NET_BIND_SERVICE = 10 CAP_NET_BROADCAST = 11 CAP_NET_ADMIN = 12 CAP_NET_RAW = 13 CAP_IPC_LOCK = 14 CAP_IPC_OWNER = 15 CAP_SYS_MODULE = 16 CAP_SYS_RAWIO = 17 CAP_SYS_CHROOT = 18 CAP_SYS_PTRACE = 19 CAP_SYS_PACCT = 20 CAP_SYS_ADMIN = 21 CAP_SYS_BOOT = 22 CAP_SYS_NICE = 23 CAP_SYS_RESOURCE = 24 CAP_SYS_TIME = 25 CAP_SYS_TTY_CONFIG = 26 CAP_MKNOD = 27 CAP_LEASE = 28 CAP_AUDIT_WRITE = 29 CAP_AUDIT_CONTROL = 30 CAP_SETFCAP = 31 CAP_MAC_OVERRIDE = 32 CAP_MAC_ADMIN = 33 CAP_SYSLOG = 34 CAP_WAKE_ALARM = 35 CAP_BLOCK_SUSPEND = 36 CAP_AUDIT_READ = 37 CAPS_BYNAME = {} CAPS_BYVALUE = {} module = sys.modules[__name__] for c in Capabilities: CAPS_BYNAME[c.name] = c.value CAPS_BYVALUE[c.value] = c.name setattr(module, c.name, c.value) CDEF = ''' /* Edited highlights from `echo '#include <sys/capability.h>' | gcc -E -` */ #define _LINUX_CAPABILITY_VERSION_2 0x20071026 #define _LINUX_CAPABILITY_U32S_2 2 typedef unsigned int __u32; typedef struct __user_cap_header_struct { __u32 version; int pid; } *cap_user_header_t; typedef struct __user_cap_data_struct { __u32 effective; __u32 permitted; __u32 inheritable; } *cap_user_data_t; int capset(cap_user_header_t header, const cap_user_data_t data); int capget(cap_user_header_t header, cap_user_data_t data); /* Edited highlights from `echo '#include <sys/prctl.h>' | gcc -E -` */ #define PR_GET_KEEPCAPS 7 #define PR_SET_KEEPCAPS 8 int prctl (int __option, ...); ''' ffi = cffi.FFI() ffi.cdef(CDEF) if platform.system() == 'Linux': crt = ffi.dlopen(None) _prctl = crt.prctl _capget = crt.capget _capset = crt.capset else: _prctl = None _capget = None _capset = None def set_keepcaps(enable): ret = _prctl(crt.PR_SET_KEEPCAPS, ffi.cast('unsigned long', bool(enable))) if ret != 0: errno = ffi.errno raise OSError(errno, os.strerror(errno)) def drop_all_caps_except(effective, permitted, inheritable): eff = _caps_to_mask(effective) prm = _caps_to_mask(permitted) inh = _caps_to_mask(inheritable) header = ffi.new('cap_user_header_t', {'version': crt._LINUX_CAPABILITY_VERSION_2, 'pid': 0}) data = ffi.new('struct __user_cap_data_struct[2]') data[0].effective = eff & 0xffffffff data[1].effective = eff >> 32 data[0].permitted = prm & 0xffffffff data[1].permitted = prm >> 32 data[0].inheritable = inh & 0xffffffff data[1].inheritable = inh >> 32 ret = _capset(header, data) if ret != 0: errno = ffi.errno raise OSError(errno, os.strerror(errno)) def _mask_to_caps(mask): return [i for i in range(64) if (1 << i) & mask] def _caps_to_mask(caps): mask = 0 for cap in caps: mask |= 1 << cap return mask
Apache License 2.0
brython-dev/brython
www/src/Lib/difflib.py
SequenceMatcher.get_grouped_opcodes
python
def get_grouped_opcodes(self, n=3): codes = self.get_opcodes() if not codes: codes = [("equal", 0, 1, 0, 1)] if codes[0][0] == 'equal': tag, i1, i2, j1, j2 = codes[0] codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2 if codes[-1][0] == 'equal': tag, i1, i2, j1, j2 = codes[-1] codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n) nn = n + n group = [] for tag, i1, i2, j1, j2 in codes: if tag == 'equal' and i2-i1 > nn: group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n))) yield group group = [] i1, j1 = max(i1, i2-n), max(j1, j2-n) group.append((tag, i1, i2, j1 ,j2)) if group and not (len(group)==1 and group[0][0] == 'equal'): yield group
Isolate change clusters by eliminating ranges with no changes. Return a generator of groups with up to n lines of context. Each group is in the same format as returned by get_opcodes(). >>> from pprint import pprint >>> a = list(map(str, range(1,40))) >>> b = a[:] >>> b[8:8] = ['i'] # Make an insertion >>> b[20] += 'x' # Make a replacement >>> b[23:28] = [] # Make a deletion >>> b[30] += 'y' # Make another replacement >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes())) [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)], [('equal', 16, 19, 17, 20), ('replace', 19, 20, 20, 21), ('equal', 20, 22, 21, 23), ('delete', 22, 27, 23, 23), ('equal', 27, 30, 23, 26)], [('equal', 31, 34, 27, 30), ('replace', 34, 35, 30, 31), ('equal', 35, 38, 31, 34)]]
https://github.com/brython-dev/brython/blob/33aeaab551f1b73209326c5a0aecf98642d4c126/www/src/Lib/difflib.py#L547-L595
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher', 'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff', 'unified_diff', 'diff_bytes', 'HtmlDiff', 'Match'] from heapq import nlargest as _nlargest from collections import namedtuple as _namedtuple from types import GenericAlias Match = _namedtuple('Match', 'a b size') def _calculate_ratio(matches, length): if length: return 2.0 * matches / length return 1.0 class SequenceMatcher: def __init__(self, isjunk=None, a='', b='', autojunk=True): self.isjunk = isjunk self.a = self.b = None self.autojunk = autojunk self.set_seqs(a, b) def set_seqs(self, a, b): self.set_seq1(a) self.set_seq2(b) def set_seq1(self, a): if a is self.a: return self.a = a self.matching_blocks = self.opcodes = None def set_seq2(self, b): if b is self.b: return self.b = b self.matching_blocks = self.opcodes = None self.fullbcount = None self.__chain_b() def __chain_b(self): b = self.b self.b2j = b2j = {} for i, elt in enumerate(b): indices = b2j.setdefault(elt, []) indices.append(i) self.bjunk = junk = set() isjunk = self.isjunk if isjunk: for elt in b2j.keys(): if isjunk(elt): junk.add(elt) for elt in junk: del b2j[elt] self.bpopular = popular = set() n = len(b) if self.autojunk and n >= 200: ntest = n // 100 + 1 for elt, idxs in b2j.items(): if len(idxs) > ntest: popular.add(elt) for elt in popular: del b2j[elt] def find_longest_match(self, alo=0, ahi=None, blo=0, bhi=None): a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__ if ahi is None: ahi = len(a) if bhi is None: bhi = len(b) besti, bestj, bestsize = alo, blo, 0 j2len = {} nothing = [] for i in range(alo, ahi): j2lenget = j2len.get newj2len = {} for j in b2j.get(a[i], nothing): if j < blo: continue if j >= bhi: break k = newj2len[j] = j2lenget(j-1, 0) + 1 if k > bestsize: besti, bestj, bestsize = i-k+1, j-k+1, k j2len = newj2len while besti > alo and bestj > blo and not isbjunk(b[bestj-1]) and a[besti-1] == b[bestj-1]: besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 while besti+bestsize < ahi and bestj+bestsize < bhi and not isbjunk(b[bestj+bestsize]) and a[besti+bestsize] == b[bestj+bestsize]: bestsize += 1 while besti > alo and bestj > blo and isbjunk(b[bestj-1]) and a[besti-1] == b[bestj-1]: besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 while besti+bestsize < ahi and bestj+bestsize < bhi and isbjunk(b[bestj+bestsize]) and a[besti+bestsize] == b[bestj+bestsize]: bestsize = bestsize + 1 return Match(besti, bestj, bestsize) def get_matching_blocks(self): if self.matching_blocks is not None: return self.matching_blocks la, lb = len(self.a), len(self.b) queue = [(0, la, 0, lb)] matching_blocks = [] while queue: alo, ahi, blo, bhi = queue.pop() i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi) if k: matching_blocks.append(x) if alo < i and blo < j: queue.append((alo, i, blo, j)) if i+k < ahi and j+k < bhi: queue.append((i+k, ahi, j+k, bhi)) matching_blocks.sort() i1 = j1 = k1 = 0 non_adjacent = [] for i2, j2, k2 in matching_blocks: if i1 + k1 == i2 and j1 + k1 == j2: k1 += k2 else: if k1: non_adjacent.append((i1, j1, k1)) i1, j1, k1 = i2, j2, k2 if k1: non_adjacent.append((i1, j1, k1)) non_adjacent.append( (la, lb, 0) ) self.matching_blocks = list(map(Match._make, non_adjacent)) return self.matching_blocks def get_opcodes(self): if self.opcodes is not None: return self.opcodes i = j = 0 self.opcodes = answer = [] for ai, bj, size in self.get_matching_blocks(): tag = '' if i < ai and j < bj: tag = 'replace' elif i < ai: tag = 'delete' elif j < bj: tag = 'insert' if tag: answer.append( (tag, i, ai, j, bj) ) i, j = ai+size, bj+size if size: answer.append( ('equal', ai, i, bj, j) ) return answer
BSD 3-Clause New or Revised License
lace/polliwog
polliwog/box/_box_object.py
Box.mid_z
python
def mid_z(self): return self.origin[2] + self.size[2] / 2
The `z` coordinate of the box's center.
https://github.com/lace/polliwog/blob/dfe3b57f57331899a8bd6e4da4b577474bc375ef/polliwog/box/_box_object.py#L111-L115
import numpy as np from vg.compat import v2 as vg from ..plane._plane_object import Plane class Box(object): def __init__(self, origin, size): vg.shape.check(locals(), "origin", (3,)) vg.shape.check(locals(), "size", (3,)) if any(np.less(size, 0)): raise ValueError("Shape should be zero or positive") self.origin = origin self.size = size @classmethod def from_points(cls, points): k = vg.shape.check(locals(), "points", (-1, 3)) if k == 0: raise ValueError("Need at least 1 point") return cls(np.min(points, axis=0), np.ptp(points, axis=0)) @property def ranges(self): ranges = np.array([self.origin, self.origin + self.size]).T return np.vstack([ranges.min(axis=1), ranges.max(axis=1)]).T @property def min_x(self): return self.origin[0] @property def min_y(self): return self.origin[1] @property def min_z(self): return self.origin[2] @property def max_x(self): return self.origin[0] + self.size[0] @property def max_y(self): return self.origin[1] + self.size[1] @property def max_z(self): return self.origin[2] + self.size[2] @property def mid_x(self): return self.origin[0] + self.size[0] / 2 @property def mid_y(self): return self.origin[1] + self.size[1] / 2 @property
BSD 2-Clause Simplified License
windelbouwman/ppci
ppci/lang/python/python2ir.py
PythonToIrCompiler.emit
python
def emit(self, instruction): self.builder.emit(instruction) return instruction
Emit an instruction
https://github.com/windelbouwman/ppci/blob/915c069e0667042c085ec42c78e9e3c9a5295324/ppci/lang/python/python2ir.py#L602-L605
import logging import ast import contextlib import inspect from ... import ir, irutils from ...common import SourceLocation, CompilerError from ...binutils import debuginfo def python_to_ir(f, imports=None): mod = PythonToIrCompiler().compile(f, imports=imports) return mod class Var: def __init__(self, value, lvalue, ty): self.value = value self.lvalue = lvalue self.ty = ty class PythonToIrCompiler: logger = logging.getLogger("p2p") def __init__(self): self.type_mapping = {"int": ir.i64, "float": ir.f64, "str": ir.ptr} def compile(self, f, imports=None): self.debug_db = debuginfo.DebugDb() src = f.read() self._filename = getattr(f, "name", None) x = ast.parse(src) self.function_map = {} self.builder = irutils.Builder() self.builder.prepare() self.builder.set_module(ir.Module("foo", debug_db=self.debug_db)) if imports: for name, signature in imports.items(): self.gen_import(name, signature) for df in x.body: self.logger.debug("Processing %s", df) if isinstance(df, ast.FunctionDef): self.gen_function(df) else: self.not_impl(df) mod = self.builder.module irutils.verify_module(mod) return mod def gen_import(self, name, signature): if isinstance(signature, tuple): return_type, arg_types = signature else: signature = inspect.signature(signature) if signature.return_annotation is inspect.Signature.empty: return_type = None else: return_type = signature.return_annotation arg_types = [p.annotation for p in signature.parameters.values()] ir_arg_types = [self.get_ty(t) for t in arg_types] if return_type: ir_function = ir.ExternalFunction( name, ir_arg_types, self.get_ty(return_type) ) else: ir_function = ir.ExternalProcedure(name, ir_arg_types) self.builder.module.add_external(ir_function) self.function_map[name] = ir_function, return_type, arg_types def gen_function(self, df): self.local_map = {} function_name = df.name binding = ir.Binding.GLOBAL dbg_int = debuginfo.DebugBaseType("int", 8, 1) return_type = self.get_ty(df.returns) if return_type: ir_function = self.builder.new_function( function_name, binding, return_type ) else: ir_function = self.builder.new_procedure(function_name, binding) dbg_args = [] arg_types = [] for arg in df.args.args: if not arg.annotation: self.error(arg, "Need type annotation for {}".format(arg.arg)) aty = self.get_ty(arg.annotation) arg_types.append(aty) arg_name = arg.arg param = ir.Parameter(arg_name, aty) dbg_args.append(debuginfo.DebugParameter(arg_name, dbg_int)) ir_function.add_parameter(param) self.function_map[function_name] = ir_function, return_type, arg_types self.logger.debug("Created function %s", ir_function) self.builder.block_number = 0 self.builder.set_function(ir_function) dfi = debuginfo.DebugFunction( ir_function.name, SourceLocation("foo.py", 1, 1, 1), dbg_int, dbg_args, ) self.debug_db.enter(ir_function, dfi) first_block = self.builder.new_block() self.builder.set_block(first_block) ir_function.entry = first_block for parameter in ir_function.arguments: para_var = self.get_variable(df, parameter.name, ty=parameter.ty) self.emit(ir.Store(parameter, para_var.value)) self.block_stack = [] self.gen_statement(df.body) assert not self.block_stack if not self.builder.block.is_closed: if return_type: if self.builder.block.is_empty: pass else: raise NotImplementedError() else: self.emit(ir.Exit()) ir_function.delete_unreachable() def gen_statement(self, statement): if isinstance(statement, list): for inner_statement in statement: self.gen_statement(inner_statement) else: with self.use_location(statement): if isinstance(statement, ast.Pass): pass elif isinstance(statement, ast.Return): self.gen_return(statement) elif isinstance(statement, ast.If): self.gen_if(statement) elif isinstance(statement, ast.While): self.gen_while(statement) elif isinstance(statement, ast.Break): self.gen_break(statement) elif isinstance(statement, ast.Continue): self.gen_continue(statement) elif isinstance(statement, ast.For): self.gen_for(statement) elif isinstance(statement, ast.Assign): self.gen_assign(statement) elif isinstance(statement, ast.Expr): self.gen_expr(statement.value) elif isinstance(statement, ast.AugAssign): self.gen_aug_assign(statement) else: self.not_impl(statement) def gen_break(self, statement): break_block = self.block_stack[-1][1] self.builder.emit_jump(break_block) unreachable_block = self.builder.new_block() self.builder.set_block(unreachable_block) def gen_continue(self, statement): continue_block = self.block_stack[-1][0] self.builder.emit_jump(continue_block) unreachable_block = self.builder.new_block() self.builder.set_block(unreachable_block) def gen_return(self, statement): if self.builder.function.is_procedure: if statement.value: self.error( statement, "Cannot return a value from a function without return type.", ) self.builder.emit_exit() else: if not statement.value: self.error( statement, "Must return a value from this function." ) value = self.gen_expr(statement.value) self.builder.emit_return(value) void_block = self.builder.new_block() self.builder.set_block(void_block) def gen_if(self, statement): ja_block = self.builder.new_block() else_block = self.builder.new_block() continue_block = self.builder.new_block() self.gen_cond(statement.test, ja_block, else_block) self.builder.set_block(ja_block) self.gen_statement(statement.body) self.builder.emit_jump(continue_block) self.builder.set_block(else_block) self.gen_statement(statement.orelse) self.builder.emit_jump(continue_block) self.builder.set_block(continue_block) def gen_while(self, statement): if statement.orelse: self.error(statement, "while-else not supported") test_block = self.builder.new_block() body_block = self.builder.new_block() final_block = self.builder.new_block() self.builder.emit_jump(test_block) self.builder.set_block(test_block) self.gen_cond(statement.test, body_block, final_block) self.enter_loop(test_block, final_block) self.builder.set_block(body_block) self.gen_statement(statement.body) self.builder.emit_jump(test_block) self.leave_loop() self.builder.set_block(final_block) def gen_for(self, statement): if statement.orelse: self.error(statement, "for-else not supported") if not isinstance(statement.iter, ast.Call): self.error(statement.iter, "Only range supported in for loops") if statement.iter.func.id != "range": self.error(statement.iter, "Only range supported in for loops") ra = statement.iter.args if len(ra) == 1: i_init = self.builder.emit_const(0, ir.i64) n2 = self.gen_expr(ra[0]) elif len(ra) == 2: i_init = self.gen_expr(ra[0]) n2 = self.gen_expr(ra[1]) else: self.error( statement.iter, "Does not support {} arguments".format(len(ra)), ) entry_block = self.builder.block test_block = self.builder.new_block() body_block = self.builder.new_block() final_block = self.builder.new_block() self.emit(ir.Jump(test_block)) self.builder.set_block(test_block) i_phi = self.emit(ir.Phi("i_phi", ir.i64)) i_phi.set_incoming(entry_block, i_init) self.emit(ir.CJump(i_phi, "<", n2, body_block, final_block)) self.local_map[statement.target.id] = Var(i_phi, False, ir.i64) self.enter_loop(test_block, final_block) self.builder.set_block(body_block) self.gen_statement(statement.body) self.leave_loop() one = self.builder.emit_const(1, ir.i64) i_inc = self.builder.emit_add(i_phi, one, ir.i64) i_phi.set_incoming(body_block, i_inc) self.builder.emit_jump(test_block) self.builder.set_block(final_block) def gen_assign(self, statement): if len(statement.targets) == 1: target = statement.targets[0] else: self.error( statement, "Only a single assignment target is supported." ) if isinstance(target, ast.Name): value = self.gen_expr(statement.value) self.store_value(target, value) elif isinstance(target, ast.Tuple): assert isinstance(statement.value, ast.Tuple) values = statement.value.elts targets = target.elts assert len(statement.value.elts) == len(targets) values = [self.gen_expr(v) for v in values] for target, value in zip(targets, values): self.store_value(target, value) else: self.not_impl(statement) def gen_aug_assign(self, statement): target = statement.target if isinstance(target, ast.Name): name = target.id assert isinstance(name, str) var = self.get_variable(target, name) assert var.lvalue lhs = self.builder.emit_load(var.value, var.ty) rhs = self.gen_expr(statement.value) op = self.binop_map[type(statement.op)] value = self.emit(ir.Binop(lhs, op, rhs, "augassign", var.ty)) self.emit(ir.Store(value, var.value)) else: self.not_impl(statement) def store_value(self, target, value): assert isinstance(target, ast.Name) name = target.id var = self.get_variable(target, name, ty=value.ty) assert var.lvalue self.emit(ir.Store(value, var.value)) def gen_cond(self, condition, yes_block, no_block): if isinstance(condition, ast.Compare): self.gen_compare(condition, yes_block, no_block) elif isinstance(condition, ast.BoolOp): self.gen_bool_op(condition, yes_block, no_block) else: self.not_impl(condition) def gen_compare(self, condition, yes_block, no_block): assert len(condition.ops) == len(condition.comparators) assert len(condition.ops) == 1 op_map = { ast.Gt: ">", ast.GtE: ">=", ast.Lt: "<", ast.LtE: "<=", ast.Eq: "==", ast.NotEq: "!=", } a = self.gen_expr(condition.left) op = op_map[type(condition.ops[0])] b = self.gen_expr(condition.comparators[0]) if a.ty is not b.ty: self.error(condition, "Type mismatch, types must be the same.") self.emit(ir.CJump(a, op, b, yes_block, no_block)) def gen_bool_op(self, condition, yes_block, no_block): assert len(condition.values) >= 1 first_values = condition.values[:-1] last_value = condition.values[-1] if isinstance(condition.op, ast.And): for value in first_values: all_true_block = self.builder.new_block() self.gen_cond(value, all_true_block, no_block) self.builder.set_block(all_true_block) self.gen_cond(last_value, yes_block, no_block) elif isinstance(condition.op, ast.Or): for value in first_values: all_false_block = self.builder.new_block() self.gen_cond(value, yes_block, all_false_block) self.builder.set_block(all_false_block) self.gen_cond(last_value, yes_block, no_block) else: self.not_impl(condition) def gen_expr(self, expr): with self.use_location(expr): if isinstance(expr, ast.BinOp): value = self.gen_binop(expr) elif isinstance(expr, ast.Name): value = self.gen_name(expr) elif isinstance(expr, ast.Call): value = self.gen_call(expr) elif hasattr(ast, "Constant") and isinstance(expr, ast.Constant): value = expr.value if isinstance(value, str): value = self.gen_string_constant(expr, value) elif isinstance(value, (int, float)): value = self.gen_num(expr, value) else: self.not_impl(condition) elif isinstance(expr, ast.Num): value = self.gen_num(expr, expr.n) elif isinstance(expr, ast.Str): value = self.gen_string_constant(expr, expr.s) else: self.not_impl(expr) return value def gen_name(self, expr): var = self.local_map[expr.id] if var.lvalue: value = self.builder.emit_load(var.value, var.ty) else: value = var.value return value binop_map = { ast.Add: "+", ast.Sub: "-", ast.Mult: "*", ast.Div: "/", ast.FloorDiv: "/", } def gen_binop(self, expr): a = self.gen_expr(expr.left) b = self.gen_expr(expr.right) if a.ty is not b.ty: self.error(expr, "Type mismatch, types must be the same.") ty = a.ty op_typ = type(expr.op) if op_typ in self.binop_map: op = self.binop_map[op_typ] else: self.not_impl(expr) value = self.builder.emit_binop(a, op, b, ty) return value def gen_call(self, expr): assert isinstance(expr.func, ast.Name) name = expr.func.id ir_function, return_type, arg_types = self.function_map[name] self.logger.warning("Function arguments not type checked!") args = [self.gen_expr(a) for a in expr.args] if return_type: value = self.emit( ir.FunctionCall(ir_function, args, "res", return_type) ) else: self.emit(ir.ProcedureCall(ir_function, args)) value = None return value def gen_num(self, expr, num): if isinstance(num, int): value = self.builder.emit_const(num, ir.i64) elif isinstance(num, float): value = self.builder.emit_const(num, ir.f64) else: self.not_impl(expr) return value def gen_string_constant(self, expr, value): data = value.encode("utf8") + bytes([0]) string_constant = self.emit(ir.LiteralData(data, "string_constant")) value = self.emit(ir.AddressOf(string_constant, "string_constant_ptr")) return value def get_variable(self, node, name, ty=None): if name in self.local_map: var = self.local_map[name] else: if ty is None: self.error(node, "Undefined variable") else: mem = self.emit(ir.Alloc("alloc_{}".format(name), 8, 8)) addr = self.emit(ir.AddressOf(mem, "addr_{}".format(name))) var = Var(addr, True, ty) self.local_map[name] = var return var def common_type(self, ty1, ty2): type_ranks = { float: 10, int: 5, } pass def coerce(self, value, ty): return value def not_impl(self, node): print(dir(node)) self.error(node, "Cannot do {}".format(node)) def node_location(self, node): location = SourceLocation( self._filename, node.lineno, node.col_offset + 1, 1 ) return location def error(self, node, message): location = self.node_location(node) raise CompilerError(message, location)
BSD 2-Clause Simplified License
numba/numba
numba/core/compiler_machinery.py
CompilerPass.pass_id
python
def pass_id(self, val): self._pass_id = val
Sets the ID of the pass
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/core/compiler_machinery.py#L52-L56
import timeit from abc import abstractmethod, ABCMeta from collections import namedtuple, OrderedDict import inspect from numba.core.compiler_lock import global_compiler_lock from numba.core import errors, config, transforms, utils from numba.core.tracing import event from numba.core.postproc import PostProcessor from numba.core.ir_utils import enforce_no_dels, legalize_single_scope _termcolor = errors.termcolor() class SimpleTimer(object): def __enter__(self): self.ts = timeit.default_timer() return self def __exit__(self, *exc): self.elapsed = timeit.default_timer() - self.ts class CompilerPass(metaclass=ABCMeta): @abstractmethod def __init__(self, *args, **kwargs): self._analysis = None self._pass_id = None @classmethod def name(cls): return cls._name @property def pass_id(self): return self._pass_id @pass_id.setter
BSD 2-Clause Simplified License
hfaran/tornado-json
tornado_json/utils.py
is_handler_subclass
python
def is_handler_subclass(cls, classnames=("ViewHandler", "APIHandler")): if isinstance(cls, list): return any(is_handler_subclass(c) for c in cls) elif isinstance(cls, type): return any(c.__name__ in classnames for c in inspect.getmro(cls)) else: raise TypeError( "Unexpected type `{}` for class `{}`".format( type(cls), cls ) )
Determines if ``cls`` is indeed a subclass of ``classnames``
https://github.com/hfaran/tornado-json/blob/fcc551e4b78cac9245e36376329d84afda296284/tornado_json/utils.py#L60-L72
import inspect import types from functools import wraps from collections.abc import Mapping def deep_update(source, overrides): for key, value in overrides.items(): if isinstance(value, Mapping) and value: returned = deep_update(source.get(key, {}), value) source[key] = returned else: source[key] = overrides[key] return source def container(dec): @wraps(dec) def meta_decorator(f): decorator = dec(f) decorator.orig_func = f return decorator return meta_decorator def extract_method(wrapped_method): return wrapped_method.orig_func if hasattr(wrapped_method, "orig_func") else wrapped_method def is_method(method): method = extract_method(method) return type(method) in [types.MethodType, types.FunctionType]
MIT License
morriswmz/doatools.py
doatools/estimation/music.py
f_music
python
def f_music(A, En): v = En.T.conj() @ A return np.reciprocal(np.sum(v * v.conj(), axis=0).real)
r"""Computes the classical MUSIC spectrum This is a vectorized implementation of the spectrum function: .. math:: P_{\mathrm{MUSIC}}(\theta) = \frac{1}{\mathbf{a}^H(\theta) \mathbf{E}_\mathrm{n} \mathbf{E}_\mathrm{n}^H \mathbf{a}(\theta)} Args: A: m x k steering matrix of candidate direction-of-arrivals, where m is the number of sensors and k is the number of candidate direction-of-arrivals. En: m x d matrix of noise eigenvectors, where d is the dimension of the noise subspace.
https://github.com/morriswmz/doatools.py/blob/9469db201e0418aef6b97583ef54b6fec2769502/doatools/estimation/music.py#L9-L27
import numpy as np from math import ceil from scipy.signal import find_peaks import warnings from ..model.sources import FarField1DSourcePlacement from .core import SpectrumBasedEstimatorBase, get_noise_subspace, ensure_covariance_size, ensure_n_resolvable_sources
MIT License
consensys/mythril
mythril/ethereum/interface/leveldb/state.py
Account.is_blank
python
def is_blank(self): return self.nonce == 0 and self.balance == 0 and self.code_hash == BLANK_HASH
checks if is a blank account. :return:
https://github.com/consensys/mythril/blob/df1d4dd0ebbb623054f4708717664dc6e27f76b9/mythril/ethereum/interface/leveldb/state.py#L113-L118
import rlp import binascii from ethereum.utils import ( normalize_address, hash32, trie_root, big_endian_int, address, int256, encode_hex, encode_int, big_endian_to_int, int_to_addr, zpad, parse_as_bin, parse_as_int, decode_hex, sha3, is_string, is_numeric, ) from rlp.sedes import big_endian_int, Binary, binary, CountableList from ethereum import utils from ethereum import trie from ethereum.trie import Trie from ethereum.securetrie import SecureTrie BLANK_HASH = utils.sha3(b"") BLANK_ROOT = utils.sha3rlp(b"") STATE_DEFAULTS = { "txindex": 0, "gas_used": 0, "gas_limit": 3141592, "block_number": 0, "block_coinbase": "\x00" * 20, "block_difficulty": 1, "timestamp": 0, "logs": [], "receipts": [], "bloom": 0, "suicides": [], "recent_uncles": {}, "prev_headers": [], "refunds": 0, } class Account(rlp.Serializable): fields = [ ("nonce", big_endian_int), ("balance", big_endian_int), ("storage", trie_root), ("code_hash", hash32), ] def __init__(self, nonce, balance, storage, code_hash, db, addr): self.db = db self.address = addr super(Account, self).__init__(nonce, balance, storage, code_hash) self.storage_cache = {} self.storage_trie = SecureTrie(Trie(self.db)) self.storage_trie.root_hash = self.storage self.touched = False self.existent_at_start = True self._mutable = True self.deleted = False @property def code(self): return self.db.get(self.code_hash) def get_storage_data(self, key): if key not in self.storage_cache: v = self.storage_trie.get(utils.encode_int32(key)) self.storage_cache[key] = utils.big_endian_to_int( rlp.decode(v) if v else b"" ) return self.storage_cache[key] @classmethod def blank_account(cls, db, addr, initial_nonce=0): db.put(BLANK_HASH, b"") o = cls(initial_nonce, 0, trie.BLANK_ROOT, BLANK_HASH, db, addr) o.existent_at_start = False return o
MIT License
mecha-karen/cake
cake/parsing/expression.py
Expression.mapping
python
def mapping(self): return cake.copy(self.__mappings)
Returns a copy of the variable mappings for unknowns
https://github.com/mecha-karen/cake/blob/f7bd11a137616c477afbf4d0121edb60b6aeea9a/cake/parsing/expression.py#L809-L811
import re import typing import string from cake import abc, errors import cake from ..core.markers import Operator, Symbol, PlusOrMinus, FunctionMarker from ..core.types.complex import Complex from ..core.types.irrational import Irrational from ..core.unknown import Unknown from .equation import Equation from ._ast import * from cake.helpers import convert_type from tokenize import ( tokenize, ENDMARKER, NEWLINE, ENCODING, OP, NAME, NUMBER, ERRORTOKEN, ) from io import BytesIO ASCII_CHARS = list(string.ascii_lowercase) BLACKLISTED = list(abc.KEYWORDS.keys()) + list(abc.CONSTANTS.keys()) VALID_SYMBOLS = {"!", "(", ")"} IGNORE = (ENDMARKER, NEWLINE, ENCODING) FIND_UNKNOWNS = re.compile("[a-zA-Z]+", re.IGNORECASE) INVALID_OPS = re.compile("[a-zA-Z]+[0-9]+", re.IGNORECASE) subExecGlobals = {'math': __import__('math'), 'cake': __import__('cake')} class Expression(object): def __new__( cls, expression: typing.Union[str, list], *default_args, **default_kwargs ): multiple = expression.split("\n") if type(expression) == str else expression if multiple == Ellipsis: multiple = "" if len(multiple) > 1: eqs = list() for eq in multiple: eqs.append(Expression(eq, *default_args, **default_kwargs)) return eqs return super(Expression, cls).__new__( Expression, *default_args, **default_kwargs ) def __init__( self, expression: typing.Union[str, typing.BinaryIO], *default_args, **default_kwargs, ) -> None: default_args = list(default_args) if hasattr(expression, "seek"): self.__expression = expression.read().decode( encoding="ASCII", errors="ignore" ) else: if expression == Ellipsis: expression = "" self.__expression = expression.lower() self.args = default_args self.kwargs = default_kwargs self.__mappings = self._sort_values(*default_args, **default_kwargs) def _sort_values(self, *args, **kwargs) -> dict: unknowns = FIND_UNKNOWNS.findall(self.__expression) for value in unknowns.copy(): if value in BLACKLISTED: unknowns.remove(value) as_dict = {i: None for i in unknowns} keys = list(as_dict.keys()) if not keys: return {} current_key = keys[0] current_index = 0 for arg in args: if (current_index + 1) > len(keys): break as_dict[current_key] = arg current_index += 1 for key, value in kwargs.items(): if key in as_dict: as_dict[key] = value as_dict = {k: v for k, v in as_dict.items() if v != None} return as_dict def _sub( self, update_mappings: bool = False, return_tokens: bool = False, *args, **kwargs, ): if update_mappings: self.update_variables(*args, **kwargs) unknown_mapping = self.__mappings else: unknown_mapping = self.update_variables(False, *args, **kwargs) invalid = INVALID_OPS.findall(self.expression) if invalid: found_first = invalid[0] index = 0 while True: if index == len(found_first): break if found_first[index].isdigit(): break index += 1 possible_correct = found_first[:index] possible_correct += " " possible_correct += found_first[index:] raise errors.SubstitutionError( f'String `{found_first}`, followed by integer. Perhaps you ment "{possible_correct}"' ) as_file = BytesIO(self.expression.encode(encoding="ASCII", errors="ignore")) as_file.seek(0) tokens = list(tokenize(as_file.readline)) if not tokens: return [] if tokens[0].type == ENCODING: tokens.pop(0) presence = list() OPEN_BRACKETS = 0 ACTUAL_INDEX = 0 TOKEN_INDEX = 0 SKIP = 0 while True: if TOKEN_INDEX > (len(tokens) - 1): break if SKIP: SKIP -= 1 TOKEN_INDEX += 1 continue token = tokens[TOKEN_INDEX] string = (token.string).lower() type_ = token.type if type_ in IGNORE: pass elif type_ == OP: if string == "(": POS_TOKENS = tokens[TOKEN_INDEX:] is_plus = POS_TOKENS[0:5] to_mapping = "".join([i.string for i in is_plus]) if to_mapping in ["(+|-)", "(-|+)"]: SKIP += 4 presence.append(PlusOrMinus()) else: try: comp = Complex(raw=to_mapping) presence.append(comp) SKIP += 4 except ValueError: presence.append(Symbol("(")) OPEN_BRACKETS += 1 elif string == ")": if OPEN_BRACKETS < 1: INCORRECT_BRACK_INDEX = token.start[1] raise errors.SubstitutionError( f"Unexpected `)` at index {INCORRECT_BRACK_INDEX}" ) presence.append(Symbol(")")) OPEN_BRACKETS -= 1 else: string_ = abc.MAP_OPERATORS.get(string, string) try: op = Operator(string_) presence.append(op) except ValueError as e: raise errors.SubstitutionError( f"Unknown Operator: {string}" ) from e elif type_ in [NAME, ERRORTOKEN]: TK_INDEX = (TOKEN_INDEX + 1) TOKENS = list() while True: if TK_INDEX > len(tokens): break tk = tokens[TK_INDEX] if tk.type == NAME: TOKENS.append(tk) SKIP += 1 else: break TK_INDEX += 1 if TOKENS: string = ' '.join(map(lambda _: _.string, TOKENS)) constant = abc.CONSTANTS.get(string) function = abc.KEYWORDS.get(string) symbol_func = abc.SYMBOL_KW.get(string) map_op = abc.MAP_OPERATORS.get(string) if len([i for i in (constant, function, symbol_func, map_op) if i is not None]) > 1: raise errors.SubstitutionError( f"{string} is defined as multiple keywords" ) elif constant: presence.append(Irrational(constant)) elif function: POS_TOKENS = tokens[(TOKEN_INDEX + 1) :] if not POS_TOKENS: raise errors.SubstitutionError( f"{string} Called with no parameters" ) if POS_TOKENS[0].string == "(": WRAPPED_IN_BRACKETS = True else: WRAPPED_IN_BRACKETS = False if not WRAPPED_IN_BRACKETS: _, COL = POS_TOKENS[0].start EQ = self.expression[COL:] EVALUATE = EQ.split(" ")[0] TREE, TOKENS = Expression(EVALUATE)._sub( return_tokens=True, **self._sort_values(*args, **kwargs) ) else: FUNQ_EQ = "" BRACKS = 0 for POSFIX in POS_TOKENS: if POSFIX.string == "(": BRACKS += 1 FUNQ_EQ += " ( " elif POSFIX.string == ")": if BRACKS < 1: OPEN_BRACKETS -= 1 presence.append(Symbol(")")) break BRACKS -= 1 FUNQ_EQ += " ) " else: FUNQ_EQ += f" {POSFIX.string} " if BRACKS > 1: raise errors.SubstitutionError( f"{BRACKS} Unclosed brackets whilst evaluating {function.__qualname__}" ) TREE, TOKENS = Expression(FUNQ_EQ)._sub( return_tokens=True, **self._sort_values(*args, **kwargs) ) if not TREE: raise errors.SubstitutionError( f"{string} Called with no parameters" ) func = FunctionMarker(function, TREE) SKIP += len(TOKENS) presence.append(func) elif symbol_func: LAST_POSFIX = presence[-1] func_name = symbol_func.__qualname__.title() if isinstance(LAST_POSFIX, Operator): raise errors.SubstitutionError( f"{func_name} called on an operator ({LAST_POSFIX.value}), at index {token.start[1]}." ) if isinstance(LAST_POSFIX, Symbol): if LAST_POSFIX.value != ")": raise errors.SubstitutionError( f"{func_name} called on an open bracket, at index {token.start[1]}" ) OPEN_BRACKS = 0 POS_INDEX = 0 POS_TOKENS = tokens[:TOKEN_INDEX][::-1] for POS_TOKEN in POS_TOKENS: string = POS_TOKEN.string if string == ")": OPEN_BRACKS += 1 elif string == "(": OPEN_BRACKS -= 1 if OPEN_BRACKS < 1: break POS_INDEX += 1 if OPEN_BRACKS: raise errors.SubstitutionError( f'{OPEN_BRACKS} Unclosed brackets whilst evalutating "{symbol_func.__qualname__}"' ) POS_TOKENS = POS_TOKENS[::-1] PS_IND = (len(POS_TOKENS) - 1) - POS_INDEX as_eq = [i.string for i in POS_TOKENS[PS_IND:]] del presence[ ((TOKEN_INDEX - POS_INDEX) - 1) : (TOKEN_INDEX + 1) ] TREE = Expression(" ".join(as_eq))._sub( **self._sort_values(*args, **kwargs) ) func = FunctionMarker(symbol_func, TREE) presence.append(func) else: new_pre = [Symbol("("), LAST_POSFIX, Symbol(")")] func = FunctionMarker(symbol_func, new_pre) presence[-1] = func elif map_op: presence.append(Operator(map_op)) else: if string in unknown_mapping: presence.append(convert_type(unknown_mapping[string])) if not (string in ASCII_CHARS): cd = '(' for st in string: if st in unknown_mapping: cd += str(unknown_mapping[st]) else: if st not in ASCII_CHARS: raise errors.SubstitutionError( f"Unknown Token ({string}) at index {token.start[1]}" ) cd += st cd += ' * ' cd = cd[:-3] + ')' presence.extend(Expression(cd, *self.args, **self.kwargs)._sub( update_mappings, *args, **kwargs)) else: presence.append(Unknown(string)) elif type_ == NUMBER: POS_TOKENS = tokens[TOKEN_INDEX:] CURRENT_NUMBER = convert_type(string) if not POS_TOKENS: presence.append(CURRENT_NUMBER) else: NEXT = POS_TOKENS[1] if NEXT.type == NAME: constant = abc.CONSTANTS.get(NEXT.string) function = abc.KEYWORDS.get(NEXT.string) value = unknown_mapping.get(NEXT.string) unk = Unknown(NEXT.string) if value: value = convert_type(value) else: value = unk if constant: SKIP += 1 presence.append(Irrational(constant)) elif not function: SKIP += 1 presence.extend( [ Symbol("("), CURRENT_NUMBER, Operator("*"), value, Symbol(")"), ] ) else: possible_correct = f"{string} * {NEXT.string}" raise errors.SubstitutionError( f'Invalid use of function "{function.__qualname__}" at index {NEXT.start[1]}. Perhaps you ment "{possible_correct}"' ) else: presence.append(CURRENT_NUMBER) else: if string.strip(): raise errors.SubstitutionError( f"Unknown Token ({string}) at index {token.start[1]}" ) ACTUAL_INDEX += len(string) TOKEN_INDEX += 1 if OPEN_BRACKETS > 1: raise errors.SubstitutionError(f"{OPEN_BRACKETS} Unclosed brackets") if return_tokens: return presence, tokens return presence def _glSubCode(self, update_mapping: bool = False, *args, **kwargs): if "dirty" in kwargs: dirty = True vars = kwargs.pop('vars') else: vars = list() dirty = False presence = kwargs.pop('dirty', self._sub(update_mapping, *args, **kwargs)) code = str() VARS = vars pm = 0 for posfix in presence: if isinstance(posfix, Unknown): if posfix.value not in VARS: VARS.append(f"{posfix.value} = Unknown('{posfix.value}')") code += f'{posfix.value}' elif isinstance(posfix, FunctionMarker): func, dirtyTokens = posfix.value evaluated = Expression(...)._glSubCode(*args, **{**kwargs, 'dirty': dirtyTokens, 'vars': VARS}) newVars, evaluated, _ = evaluated VARS.extend(newVars) VARS = list(set(VARS)) code += f"{func.__qualname__}({evaluated})" elif isinstance(posfix, cake.Number): code += f'{posfix.__class__.__name__}({posfix.value})' elif isinstance(posfix, PlusOrMinus): code += '(+|-)' pm += 1 elif isinstance(posfix, (Symbol, Operator)): posfix.validate code += f'{posfix.value}' if not dirty: return "{}\n{}".format('\n'.join(VARS), code), pm return VARS, code, pm def convertToCode(self, update_mapping: bool = False, imports: tuple = tuple(), *args, **kwargs): beginning = GEN_AUTO_CODE_MARKING(*imports) code, _ = self._glSubCode(update_mapping, *args, **kwargs) return f'{beginning}{code}' def substitute(self, update_mapping: bool = False, imports: tuple = tuple(), *args, **kwargs): _, pmCount = self._glSubCode(update_mapping, *args, **kwargs) code = self.convertToCode(update_mapping, imports, *args, **kwargs) combos = cake.getPlusMinusCombos(pmCount) if not combos: return execCode(code) toBeEvaluated = list() for combo in combos: codeCopy = code for symbol in combo: ind = codeCopy.find('(+|-)') cmCopy = list(codeCopy) cmCopy[ind:(ind + 5)] = symbol codeCopy = ''.join(cmCopy) toBeEvaluated.append(codeCopy) results = list() for rCode in toBeEvaluated: results.append(execCode(rCode)) return tuple(results) def solve(self, *args, **kwargs): raise NotImplementedError() def append(self, expr: typing.Union[str, "Expression"]) -> None: if isinstance(expr, Expression): expr = Expression.expression self.__expression += expr def prepend(self, expr: typing.Union[str, "Expression"]) -> None: if isinstance(expr, Expression): expr = Expression.expression self.__expression = expr + self.__expression def wrap_all(self, operator: str, ending: str, *eq_args, **eq_kwargs) -> None: op = Operator(operator) eq = f"({self.__expression})" eq += f" {op.value} {ending}" self.__expression = eq self.update_variables(*eq_args, **eq_kwargs) def update_variables( self, overwrite: bool = True, *args, **kwargs ) -> typing.Union[dict, None]: current_args = self.args default_args = list(args) + current_args[len(args) :] default_kwargs = {**self.kwargs, **kwargs} mapping = self._sort_values(*default_args, **default_kwargs) if overwrite: self.args = default_args self.kwargs = default_kwargs self.__mappings = mapping else: return mapping @property
MIT License
rgc99/irrigation_unlimited
custom_components/irrigation_unlimited/irrigation_unlimited.py
IURunQueue.find_last_index
python
def find_last_index(self, uid: int) -> int: result: int = None last_time: datetime = None for i, run in enumerate(self): if run.schedule is not None and run.schedule.uid == uid: if last_time is None or run.end_time > last_time: last_time = run.end_time result = i return result
Return the index of the run that finishes last in the queue. This routine does not require the list to be sorted.
https://github.com/rgc99/irrigation_unlimited/blob/edbac4ffe8808a2021c40f12809e6b0c2fe79245/custom_components/irrigation_unlimited/irrigation_unlimited.py#L726-L736
import typing from datetime import datetime, time, timedelta from types import MappingProxyType from typing import OrderedDict from logging import WARNING, Logger, getLogger, INFO, DEBUG, ERROR import uuid import time as tm import json from homeassistant.core import HomeAssistant, CALLBACK_TYPE, DOMAIN as HADOMAIN from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval, Event as HAEvent import homeassistant.helpers.sun as sun import homeassistant.util.dt as dt from homeassistant.const import ( CONF_AFTER, CONF_BEFORE, CONF_DELAY, CONF_ENTITY_ID, CONF_NAME, CONF_REPEAT, CONF_WEEKDAY, CONF_ID, EVENT_HOMEASSISTANT_STOP, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_ON, WEEKDAYS, ATTR_ENTITY_ID, ) from .const import ( CONF_ACTUAL, CONF_ALL_ZONES_CONFIG, CONF_DAY, CONF_DECREASE, CONF_FINISH, CONF_INCREASE, CONF_INDEX, CONF_LOGGING, CONF_OUTPUT_EVENTS, CONF_PERCENTAGE, CONF_REFRESH_INTERVAL, CONF_RESET, CONF_RESULTS, CONF_SEQUENCES, CONF_SEQUENCE_ID, CONF_SHOW_LOG, CONF_AUTOPLAY, CONF_ANCHOR, DEFAULT_GRANULATITY, DEFAULT_REFRESH_INTERVAL, DEFAULT_TEST_SPEED, CONF_DURATION, CONF_ENABLED, CONF_GRANULARITY, CONF_TIME, CONF_SUN, CONF_PREAMBLE, CONF_POSTAMBLE, CONF_TESTING, CONF_SPEED, CONF_TIMES, CONF_START, CONF_END, CONF_CONTROLLERS, CONF_SCHEDULES, CONF_ZONES, CONF_MINIMUM, CONF_MAXIMUM, CONF_MONTH, MONTHS, CONF_ODD, CONF_EVEN, CONF_SHOW, CONF_CONFIG, CONF_TIMELINE, CONF_ZONE_ID, CONF_FUTURE_SPAN, SERVICE_CANCEL, SERVICE_DISABLE, SERVICE_ENABLE, SERVICE_TOGGLE, SERVICE_MANUAL_RUN, SERVICE_TIME_ADJUST, STATUS_BLOCKED, STATUS_PAUSED, STATUS_DISABLED, STATUS_INITIALISING, ) _LOGGER: Logger = getLogger(__package__) def time_to_timedelta(offset: time) -> timedelta: return datetime.combine(datetime.min, offset) - datetime.min def dt2lstr(stime: datetime) -> str: return datetime.strftime(dt.as_local(stime), "%Y-%m-%d %H:%M:%S") SYSTEM_GRANULARITY: int = DEFAULT_GRANULATITY def reset_granularity() -> None: global SYSTEM_GRANULARITY SYSTEM_GRANULARITY = DEFAULT_GRANULATITY def granularity_time() -> timedelta: return timedelta(seconds=SYSTEM_GRANULARITY) def wash_td(delta: timedelta, granularity: int = None) -> timedelta: if delta is not None: if granularity is None: granularity = SYSTEM_GRANULARITY whole_seconds = int(delta.total_seconds()) rounded_seconds = int(whole_seconds / granularity) * granularity return timedelta(seconds=rounded_seconds) return None def wash_dt(date: datetime, granularity: int = None) -> datetime: if date is not None: if granularity is None: granularity = SYSTEM_GRANULARITY rounded_seconds = int(date.second / granularity) * granularity return date.replace(second=rounded_seconds, microsecond=0) return None def wash_t(stime: time, granularity: int = None) -> time: if stime is not None: if granularity is None: granularity = SYSTEM_GRANULARITY utc = dt.utcnow() full_date = utc.combine(utc.date(), stime) rounded_seconds = int(full_date.second / granularity) * granularity return full_date.replace(second=rounded_seconds, microsecond=0).timetz() return None def round_td(delta: timedelta, granularity: int = None) -> timedelta: if delta is not None: if granularity is None: granularity = SYSTEM_GRANULARITY rounded_seconds = ( int((delta.total_seconds() + granularity / 2) / granularity) * granularity ) return timedelta(seconds=rounded_seconds) return None class IUBase: def __init__(self, index: int) -> None: self._uid: int = uuid.uuid4().int self._index: int = index def __eq__(self, other) -> bool: return isinstance(other, IUBase) and self.uid == other.uid @property def uid(self) -> str: return self._uid @property def index(self) -> int: return self._index class IUAdjustment: def __init__(self) -> None: self._method: str = None self._time_adjustment = None self._minimum: timedelta = None self._maximum: timedelta = None def __str__(self) -> str: if self._method is None: result = "None" elif self._method == CONF_ACTUAL: result = f"={self._time_adjustment}" elif self._method == CONF_PERCENTAGE: result = f"%{self._time_adjustment}" elif self._method == CONF_INCREASE: result = f"+{self._time_adjustment}" elif self._method == CONF_DECREASE: result = f"-{self._time_adjustment}" else: result = str(self._time_adjustment) return result @property def has_adjustment(self) -> bool: return self._method is not None def clear(self) -> None: self._method = None self._time_adjustment = None self._minimum = None self._maximum = None def load(self, data: MappingProxyType) -> bool: old_method = self._method old_time_adjustment = self._time_adjustment old_minimum = self._minimum old_maximum = self._maximum if CONF_ACTUAL in data: self._method = CONF_ACTUAL self._time_adjustment = wash_td(data.get(CONF_ACTUAL)) elif CONF_PERCENTAGE in data: self._method = CONF_PERCENTAGE self._time_adjustment = data.get(CONF_PERCENTAGE) elif CONF_INCREASE in data: self._method = CONF_INCREASE self._time_adjustment = wash_td(data.get(CONF_INCREASE)) elif CONF_DECREASE in data: self._method = CONF_DECREASE self._time_adjustment = wash_td(data.get(CONF_DECREASE)) elif CONF_RESET in data: self._method = None self._time_adjustment = None self._minimum = wash_td(data.get(CONF_MINIMUM, None)) if self._minimum is not None: self._minimum = max(self._minimum, granularity_time()) self._maximum = wash_td(data.get(CONF_MAXIMUM, None)) return ( self._method != old_method or self._time_adjustment != old_time_adjustment or self._minimum != old_minimum or self._maximum != old_maximum ) def adjust(self, stime: timedelta) -> timedelta: new_time: timedelta if self._method is None: new_time = stime elif self._method == CONF_ACTUAL: new_time = self._time_adjustment elif self._method == CONF_PERCENTAGE: new_time = round_td(stime * self._time_adjustment / 100) elif self._method == CONF_INCREASE: new_time = stime + self._time_adjustment elif self._method == CONF_DECREASE: new_time = stime - self._time_adjustment else: new_time = stime if self._minimum is not None: new_time = max(new_time, self._minimum) if self._maximum is not None: new_time = min(new_time, self._maximum) return new_time class IUSchedule(IUBase): def __init__( self, hass: HomeAssistant, schedule_index: int, ) -> None: super().__init__(schedule_index) self._hass = hass self._time = None self._duration: timedelta = None self._name: str = None self._weekdays: list[int] = None self._months: list[int] = None self._days = None self._anchor: str = None self._dirty: bool = True @property def name(self) -> str: return self._name @property def is_setup(self) -> bool: return True @property def duration(self) -> timedelta: return self._duration def clear(self) -> None: self._dirty = True def load(self, config: OrderedDict) -> "IUSchedule": self.clear() self._time = config[CONF_TIME] self._anchor = config[CONF_ANCHOR] self._duration = wash_td(config.get(CONF_DURATION, None)) self._name = config.get(CONF_NAME, f"Schedule {self.index + 1}") if CONF_WEEKDAY in config: self._weekdays = [] for i in config[CONF_WEEKDAY]: self._weekdays.append(WEEKDAYS.index(i)) else: self._weekdays = None if CONF_MONTH in config: self._months = [] for i in config[CONF_MONTH]: self._months.append(MONTHS.index(i) + 1) else: self._months = None self._days = config.get(CONF_DAY, None) return self def as_dict(self) -> OrderedDict: result = OrderedDict() result[CONF_TIME] = self._time result[CONF_DURATION] = self._duration result[CONF_NAME] = self._name if self._weekdays is not None: result[CONF_WEEKDAY] = [] for item in self._weekdays: result[CONF_WEEKDAY].append(WEEKDAYS[item]) if self._months is not None: result[CONF_MONTH] = [] for item in self._months: result[CONF_MONTH].append(MONTHS[item - 1]) if self._days is not None: result[CONF_DAY] = self._days return result def get_next_run( self, stime: datetime, ftime: datetime, adjusted_duration: timedelta ) -> datetime: local_time = dt.as_local(stime) final_time = dt.as_local(ftime) next_run: datetime = None while True: if next_run is None: next_run = local_time else: next_run += timedelta(days=1) if next_run > final_time: return None if self._weekdays is not None and next_run.weekday() not in self._weekdays: continue if self._months is not None and next_run.month not in self._months: continue if self._days is not None: if self._days == CONF_ODD: if next_run.day % 2 == 0: continue elif self._days == CONF_EVEN: if next_run.day % 2 != 0: continue elif next_run.day not in self._days: continue if isinstance(self._time, time): next_run = datetime.combine( next_run.date(), self._time, next_run.tzinfo ) elif isinstance(self._time, dict) and CONF_SUN in self._time: sun_event = sun.get_astral_event_date( self._hass, self._time[CONF_SUN], next_run ) if sun_event is None: continue next_run = dt.as_local(sun_event) if CONF_AFTER in self._time: next_run += self._time[CONF_AFTER] if CONF_BEFORE in self._time: next_run -= self._time[CONF_BEFORE] else: return None if self._anchor == CONF_FINISH: next_run -= adjusted_duration next_run = wash_dt(next_run) if next_run >= local_time: break return dt.as_utc(next_run) class IURun(IUBase): def __init__( self, start_time: datetime, duration: timedelta, zone: "IUZone", schedule: "IUSchedule", sequence_run: "IUSequenceRun", ) -> None: super().__init__(None) self._start_time: datetime = start_time self._duration: timedelta = duration self._zone = zone self._schedule = schedule self._sequence_run = sequence_run self._end_time: datetime = self._start_time + self._duration self._remaining_time: timedelta = self._end_time - self._start_time self._percent_complete: int = 0 @property def start_time(self) -> datetime: return self._start_time @property def duration(self) -> timedelta: return self._duration @property def zone(self) -> "IUSchedule": return self._zone @property def schedule(self) -> "IUSchedule": return self._schedule @property def end_time(self) -> datetime: return self._end_time @property def time_remaining(self) -> timedelta: return self._remaining_time @property def percent_complete(self) -> float: return self._percent_complete @property def is_sequence(self) -> bool: return self._sequence_run is not None @property def sequence_run(self) -> "IUSequenceRun": return self._sequence_run @property def sequence(self) -> "IUSequence": if self.is_sequence: return self._sequence_run.sequence return None @property def sequence_zone(self) -> "IUSequenceZone": if self.is_sequence: return self._sequence_run.sequence_zone(self) return None @property def sequence_running(self) -> bool: return self.is_sequence and self._sequence_run.running @property def crumbs(self) -> str: return self._crumbs() def _crumbs(self) -> str: def get_index(obj: IUBase) -> int: if obj is not None: return obj.index + 1 return 0 if self.is_sequence: sidx = self.sequence_run.run_index(self) + 1 else: sidx = 0 return "{}.{}.{}.{}.{}".format( get_index(self._zone), get_index(self._schedule), get_index(self.sequence), get_index(self.sequence_zone), sidx, ) def sequence_has_adjustment(self, deep: bool) -> bool: if self.is_sequence: return self.sequence.has_adjustment(deep) return False def sequence_adjustment(self) -> str: if self.is_sequence: result = str(self._sequence_run.sequence.adjustment) sequence_zone = self._sequence_run.sequence_zone(self) if sequence_zone.has_adjustment: result = f"{result},{str(sequence_zone.adjustment)}" return result return None def is_manual(self) -> bool: return self._schedule is None def is_running(self, stime: datetime) -> bool: return self._start_time <= stime < self._end_time def is_expired(self, stime: datetime) -> bool: return stime >= self._end_time def sequence_start(self, stime: datetime) -> bool: result = ( self.is_sequence and self.is_running(stime) and not self._sequence_run.running ) if result: self._sequence_run.running = True return result def update_time_remaining(self, stime: datetime) -> bool: if self.is_running(stime): self._remaining_time = self._end_time - stime total_duration: timedelta = self._end_time - self._start_time time_elapsed: timedelta = stime - self._start_time self._percent_complete = int((time_elapsed / total_duration) * 100) return True return False def as_dict(self) -> OrderedDict: result = OrderedDict() result["start"] = self._start_time result["end"] = self._end_time return result class IURunQueue(typing.List[IURun]): DAYS_SPAN: int = 3 RQ_STATUS_CLEARED: int = 0x01 RQ_STATUS_EXTENDED: int = 0x02 RQ_STATUS_REDUCED: int = 0x04 RQ_STATUS_SORTED: int = 0x08 RQ_STATUS_UPDATED: int = 0x10 RQ_STATUS_CANCELED: int = 0x20 RQ_STATUS_CHANGED: int = 0x40 def __init__(self) -> None: super().__init__() self._current_run: IURun = None self._next_run: IURun = None self._sorted: bool = False self._cancel_request: bool = False self._future_span = wash_td(timedelta(days=self.DAYS_SPAN)) @property def current_run(self) -> IURun: return self._current_run @property def next_run(self) -> IURun: return self._next_run @property def in_sequence(self) -> bool: return self._in_sequence() def _in_sequence(self) -> bool: for run in self: if run.sequence_running: return True return False def add( self, start_time: datetime, duration: timedelta, zone: "IUZone", schedule: "IUSchedule", sequence_run: "IUSequenceRun", ) -> IURun: run = IURun(start_time, duration, zone, schedule, sequence_run) self.append(run) self._sorted = False return run def cancel(self) -> None: self._cancel_request = True def clear_all(self) -> bool: modified: bool = False if len(self) > 0: self._current_run = None super().clear() modified = True return modified def clear(self, stime: datetime) -> bool: modified: bool = False if len(self) > 0: i = len(self) - 1 while i >= 0: item = self[i] if not ( item.is_running(stime) or item.is_manual() or item.sequence_running ): self.pop(i) modified = True i -= 1 if modified: self._next_run = None self._sorted = True return modified
MIT License
google/capirca
tools/cgrep.py
compare_ip_token
python
def compare_ip_token(options, db): token = options.token results = [] for ip in options.ip: rval = db.GetIpParents(ip) if token in rval: results = '%s is in %s' % (ip, token) else: results = '%s is _not_ in %s' % (ip, token) return results
Looks to see if a network IP is contained in a network object. Args: options: the options sent to the script db: network and service definitions Returns: results : end-user string stating the results
https://github.com/google/capirca/blob/4d58459bd20690c09f02daf15a48c41119c13f49/tools/cgrep.py#L426-L444
import argparse import pprint import sys from absl import app from absl import logging from capirca.lib import nacaddr from capirca.lib import naming def is_valid_ip(arg): try: nacaddr.IP(arg) except: raise argparse.ArgumentTypeError('%s is an invalid ip address' % arg) return arg def cli_options(): parser = argparse.ArgumentParser( description='c[apirca]grep', formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument('-d', '--def', dest='defs', help='Network Definitions directory location. \n', default='./def') ip_group = parser.add_argument_group() ip_group.add_argument('-i', '--ip', dest='ip', nargs='+', type=is_valid_ip, help='Return list of definitions containing the ' 'IP(s).\nMultiple IPs permitted.') ip_group.add_argument('-t', '--token', dest='token', help=('See if an IP is contained within the given ' 'token.\nMust be used in conjunction with ' '-i/--ip [addr].')) exclusive_group = parser.add_mutually_exclusive_group() exclusive_group.add_argument('-c', '--cmp', dest='cmp', nargs=2, metavar=('OBJ', 'OBJ'), help=('Compare the two given network ' 'definition tokens')) exclusive_group.add_argument('-g', '--gmp', dest='gmp', nargs=2, type=is_valid_ip, metavar=('IP', 'IP'), help=('Diff the network objects to' ' which the given IP(s) belong')) exclusive_group.add_argument('-o', '--obj', dest='obj', nargs='+', help=('Return list of IP(s) contained within ' 'the given token(s)')) exclusive_group.add_argument('-s', '--svc', dest='svc', nargs='+', help=('Return list of port(s) contained ' 'within given token(s)')) exclusive_group.add_argument('-p', '--port', dest='port', nargs=2, metavar=('PORT', 'PROTO'), help=('Returns a list of tokens containing ' 'the given port and protocol')) return parser def main(argv): del argv parser = cli_options() options = parser.parse_args() db = naming.Naming(options.defs) p = pprint.PrettyPrinter(indent=1, depth=4, width=1).pprint if options.ip and any([options.gmp, options.cmp, options.obj, options.svc, options.port]): logging.info('You can only use -i with -t or by itself') elif options.token and options.ip: try: get_nets([options.token], db) except naming.UndefinedAddressError: logging.info("Network group '%s' is not defined!", options.token) else: results = compare_ip_token(options, db) logging.info(results) elif options.token and not options.ip: logging.info('You must specify an IP Address with -i [addr]') elif options.ip: for ip in options.ip: groups = get_ip_parents(ip, db) logging.info('Results for IP: %s', ip) for name, networks in groups: logging.info('%s %s', name, networks) elif options.gmp: common, diff1, diff2 = group_diff(options, db) print_diff(options.gmp[0], common, diff1, diff2) logging.info('') print_diff(options.gmp[1], common, diff2, diff1) elif options.cmp: meta, results = compare_tokens(options, db) first_name = meta[0] second_name = meta[1] union = meta[2] logging.info('Union of %s and %s:\n %s\n', first_name, second_name, union) logging.info('Diff of %s and %s:', first_name, second_name) for i in results: logging.info(' %s', i) logging.info('') first_obj, sec_obj = options.cmp if check_encapsulated('network', first_obj, sec_obj, db): logging.info('%s fully encapsulates %s', sec_obj, first_obj) else: logging.info('%s does _not_ fully encapsulate %s', sec_obj, first_obj) if check_encapsulated('network', sec_obj, first_obj, db): logging.info('%s fully encapsulates %s', first_obj, sec_obj) else: logging.info('%s does _not_ fully encapsulate %s', first_obj, sec_obj) elif options.obj: for obj in options.obj: try: token, ips = get_nets([obj], db)[0] except naming.UndefinedAddressError: logging.info('%s is an invalid object', obj) else: logging.info('%s:', token) ips.sort(key=lambda x: int(x.ip)) p([str(x) for x in ips]) elif options.svc: try: results = get_ports(options.svc, db) except naming.UndefinedServiceError: logging.info('%s contains an invalid service object', str(options.svc)) else: for result in get_ports(options.svc, db): svc, port = result logging.info('%s:', svc) p(port) elif options.port: port, protocol, result = get_services(options, db) logging.info('%s/%s:', port, protocol) p(result) elif not any((options.cmp, options.ip, options.token, options.obj, options.svc, options.port)): parser.print_help() logging.info('') def check_encapsulated(obj_type, first_obj, second_obj, db): if obj_type == 'network': first = get_nets([first_obj], db)[0][1] second = get_nets([second_obj], db)[0][1] elif obj_type == 'service': first = get_ports([first_obj], db)[0][1] second = get_ports([second_obj], db)[0][1] else: raise ValueError("check_encapsulated() currently only supports " "'network' and 'service' for the obj_type parameter") for obj in first: for sec_obj in second: if obj.version == sec_obj.version: if obj.subnet_of(sec_obj): break else: return False return True def print_diff(ip, common, diff1, diff2): logging.info('IP: %s', ip) if common: common = [' {0}'.format(elem) for elem in common] logging.info('\n'.join(common)) if diff1: diff = ['+ {0}'.format(elem) for elem in diff1] logging.info('\n'.join(diff)) if diff2: diff = ['- {0}'.format(elem) for elem in diff2] logging.info('\n'.join(diff)) def group_diff(options, db): nested_rvals = [] for ip in options.gmp: nested_rvals.append(get_ip_parents(ip, db)) group1 = [x[0] for x in nested_rvals[0]] group2 = [x[0] for x in nested_rvals[1]] common = sorted(list(set(group1) & set(group2))) diff1 = sorted(list(set(group1) - set(group2))) diff2 = sorted(list(set(group2) - set(group1))) return common, diff1, diff2 def get_ip_parents(ip, db): results = [] rval = db.GetIpParents(ip) for v in rval: nested = db.GetNetParents(v) prefix_and_nets = get_nets_and_highest_prefix(ip, v, db) if nested: for n in nested: results.append(('%s -> %s' % (n, v), prefix_and_nets)) else: results.append((v, prefix_and_nets)) results = sorted(results, key=lambda x: x[1][0], reverse=True) for index, group in enumerate(results): results[index] = (group[0], group[1][1]) return results def get_nets_and_highest_prefix(ip, net_group, db): highest_prefix_length = 0 networks = [] ip = nacaddr.IP(ip) for net in get_nets([net_group], db)[0][1]: if ip.version == net.version: if ip.subnet_of(net): networks.append(str(net)) if net.prefixlen > highest_prefix_length: highest_prefix_length = net.prefixlen return highest_prefix_length, networks def get_nets(objects, db): results = [] for obj in objects: net = db.GetNet(obj) results.append((obj, net)) return results def compare_tokens(options, db): t1, t2 = options.cmp d1 = db.GetNet(t1) d2 = db.GetNet(t2) union = list(set(d1 + d2)) meta = (t1, t2, union) results = [] for el in set(d1 + d2): el = nacaddr.IP(el) if el in d1 and el in d2: results.append(str(el)) elif el in d1: results.append(str(el)) elif el in d2: results.append(str(el)) return meta, results
Apache License 2.0
mandiant/speakeasy
speakeasy/winenv/api/usermode/shlwapi.py
Shlwapi.PathAppend
python
def PathAppend(self, emu, argv, ctx={}): pszPath, pszMore = argv cw = self.get_char_width(ctx) path = self.read_mem_string(pszPath, cw) more = self.read_mem_string(pszMore, cw) argv[0] = path argv[1] = more out = self.join_windows_path(path, more) out += '\0' self.write_mem_string(out, pszPath, cw) return 1
BOOL PathAppendA( LPSTR pszPath, LPCSTR pszMore );
https://github.com/mandiant/speakeasy/blob/e9b68610babba287c7032a32d0df2833ad1c5d7e/speakeasy/winenv/api/usermode/shlwapi.py#L279-L295
import os import ntpath from .. import api import speakeasy.winenv.arch as e_arch class Shlwapi(api.ApiHandler): name = 'shlwapi' apihook = api.ApiHandler.apihook impdata = api.ApiHandler.impdata def __init__(self, emu): super(Shlwapi, self).__init__(emu) self.funcs = {} self.data = {} self.window_hooks = {} self.handle = 0 self.win = None super(Shlwapi, self).__get_hook_attrs__(self) def join_windows_path(self, *args, **kwargs): args = list(map(lambda x: x.replace('\\', '/'), args)) return os.path.join(*args, **kwargs).replace('/', '\\') @apihook('PathIsRelative', argc=1) def PathIsRelative(self, emu, argv, ctx={}): pszPath, = argv cw = self.get_char_width(ctx) pn = '' rv = False if pszPath: pn = self.read_mem_string(pszPath, cw) if '..' in pn: rv = True argv[0] = pn return rv @apihook('StrStr', argc=2) def StrStr(self, emu, argv, ctx={}): hay, needle = argv cw = self.get_char_width(ctx) if hay: _hay = self.read_mem_string(hay, cw) argv[0] = _hay if needle: needle = self.read_mem_string(needle, cw) argv[1] = needle ret = _hay.find(needle) if ret != -1: ret = hay + ret else: ret = 0 return ret @apihook('StrStrI', argc=2) def StrStrI(self, emu, argv, ctx={}): hay, needle = argv cw = self.get_char_width(ctx) if hay: _hay = self.read_mem_string(hay, cw) argv[0] = _hay _hay = _hay.lower() if needle: needle = self.read_mem_string(needle, cw) argv[1] = needle needle = needle.lower() ret = _hay.find(needle) if ret != -1: ret = hay + ret else: ret = 0 return ret @apihook('PathFindExtension', argc=1) def PathFindExtension(self, emu, argv, ctx={}): pszPath, = argv cw = self.get_char_width(ctx) s = self.read_mem_string(pszPath, cw) argv[0] = s idx1 = s.rfind('\\') t = s[idx1 + 1:] idx2 = t.rfind('.') if idx2 == -1: return pszPath + len(s) argv[0] = t[idx2:] return pszPath + idx1 + 1 + idx2 @apihook('StrCmpI', argc=2) def StrCmpI(self, emu, argv, ctx={}): psz1, psz2 = argv cw = self.get_char_width(ctx) s1 = self.read_mem_string(psz1, cw) s2 = self.read_mem_string(psz2, cw) rv = 1 argv[0] = s1 argv[1] = s2 if s1.lower() == s2.lower(): rv = 0 return rv @apihook('PathFindFileName', argc=1) def PathFindFileName(self, emu, argv, ctx={}): pszPath, = argv cw = self.get_char_width(ctx) s = self.read_mem_string(pszPath, cw) argv[0] = s idx = s.rfind('\\') if idx == -1: return pszPath + len(s) argv[0] = s[idx + 1:] return pszPath + idx + 1 @apihook('PathRemoveExtension', argc=1) def PathRemoveExtension(self, emu, argv, ctx={}): pszPath, = argv cw = self.get_char_width(ctx) s = self.read_mem_string(pszPath, cw) argv[0] = s idx1 = s.rfind('\\') t = s[idx1 + 1:] idx2 = t.rfind('.') if idx2 == -1: return pszPath s = s[:idx1 + 1 + idx2] argv[0] = s self.write_mem_string(s, pszPath, cw) return pszPath @apihook('PathStripPath', argc=1) def PathStripPath(self, emu, argv, ctx={}): pszPath, = argv cw = self.get_char_width(ctx) s = self.read_mem_string(pszPath, cw) argv[0] = s mod_name = ntpath.basename(s) + '\x00' enc = self.get_encoding(cw) mod_name = mod_name.encode(enc) self.mem_write(pszPath, mod_name) @apihook('wvnsprintfA', argc=4) def wvnsprintfA(self, emu, argv, ctx={}): buffer, count, _format, argptr = argv rv = 0 fmt_str = self.read_mem_string(_format, 1) fmt_cnt = self.get_va_arg_count(fmt_str) vargs = self.va_args(argptr, fmt_cnt) fin = self.do_str_format(fmt_str, vargs) fin = fin[:count] + '\x00' rv = len(fin) self.mem_write(buffer, fin.encode('utf-8')) argv[0] = fin.replace('\x00', '') argv[1] = fmt_str return rv @apihook('wnsprintf', argc=e_arch.VAR_ARGS, conv=e_arch.CALL_CONV_CDECL) def wnsprintf(self, emu, argv, ctx={}): argv = emu.get_func_argv(e_arch.CALL_CONV_CDECL, 3) buf, max_buf_size, fmt = argv cw = self.get_char_width(ctx) fmt_str = self.read_mem_string(fmt, cw) fmt_cnt = self.get_va_arg_count(fmt_str) if not fmt_cnt: self.write_mem_string(fmt_str, buf, cw) return len(fmt_str) _argv = emu.get_func_argv(e_arch.CALL_CONV_CDECL, 3 + fmt_cnt)[3:] fin = self.do_str_format(fmt_str, _argv) rv = len(fin) if rv <= max_buf_size: self.write_mem_string(fin, buf, cw) argv[0] = fin argv[2] = fmt_str return rv else: return -1 @apihook('PathAppend', argc=2)
MIT License
jertel/elastalert2
elastalert/auth.py
RefeshableAWSRequestsAuth.__init__
python
def __init__(self, refreshable_credential, aws_host, aws_region, aws_service): self.refreshable_credential = refreshable_credential self.aws_host = aws_host self.aws_region = aws_region self.service = aws_service
:param refreshable_credential: A credential class that refreshes STS or IAM Instance Profile credentials :type refreshable_credential: :class:`botocore.credentials.RefreshableCredentials`
https://github.com/jertel/elastalert2/blob/8f54de112b539111622c3af6f471de4d78be802e/elastalert/auth.py#L12-L24
import os import boto3 from aws_requests_auth.aws_auth import AWSRequestsAuth class RefeshableAWSRequestsAuth(AWSRequestsAuth):
Apache License 2.0
stanford-mast/nn_dataflow
nn_dataflow/core/network.py
Network.input_layer
python
def input_layer(self): return self.layer_dict[self.INPUT_LAYER_KEY]
Get the input layer.
https://github.com/stanford-mast/nn_dataflow/blob/198a5274b9529125c6aa2b8b72b365d60cf83778/nn_dataflow/core/network.py#L48-L52
from collections import OrderedDict from .layer import Layer, InputLayer class Network(): INPUT_LAYER_KEY = '__INPUT__' def __init__(self, net_name): self.net_name = net_name self.layer_dict = OrderedDict() self.prevs_dict = {} self.nexts_dict = {} self.ext_dict = OrderedDict() def set_input_layer(self, input_layer): if self.INPUT_LAYER_KEY in self.layer_dict: raise KeyError('Network: only one input layer is allowed.') if not isinstance(input_layer, InputLayer): raise TypeError('Network: input_layer must be an InputLayer ' 'instance.') self.layer_dict[self.INPUT_LAYER_KEY] = input_layer
BSD 3-Clause New or Revised License
xorso/pyalarmdotcom
pyalarmdotcom/pyalarmdotcom.py
Alarmdotcom.__init__
python
def __init__(self, username, password, websession, loop): self._username = username self._password = password self._websession = websession self._loop = loop self._login_info = None self.state = None self.sensor_status = None
Use aiohttp to make a request to alarm.com :param username: Alarm.com username :param password: Alarm.com password :param websession: AIOHttp Websession :param loop: Async loop.
https://github.com/xorso/pyalarmdotcom/blob/9d2cfe1968d52bb23533aeda80ca5efbfb692304/pyalarmdotcom/pyalarmdotcom.py#L85-L100
import re import logging import aiohttp import asyncio import async_timeout from bs4 import BeautifulSoup _LOGGER = logging.getLogger(__name__) class Alarmdotcom(object): LOGIN_URL = 'https://www.alarm.com/pda/Default.aspx' LOGIN_USERNAME = ('name', 'ctl00$ContentPlaceHolder1$txtLogin') LOGIN_PASSWORD = ('name', 'ctl00$ContentPlaceHolder1$txtPassword') LOGIN_BUTTON = ('name', 'ctl00$ContentPlaceHolder1$btnLogin') STATUS_IMG = ('id', 'ctl00_phBody_lblArmingState') BTN_DISARM = ('id', 'ctl00_phBody_butDisarm') BTN_ARM_STAY = ('id', 'ctl00_phBody_butArmStay', 'ctl00_phBody_ArmingStateWidget_btnArmOptionStay') BTN_ARM_AWAY = ('id', 'ctl00_phBody_butArmAway', 'ctl00_phBody_ArmingStateWidget_btnArmOptionAway') STATUS_UPDATING = {'id': 'ctl00_phBody_ArmingStateWidget_imgArmingUpdating'} ALARMDOTCOM_URL = 'https://www.alarm.com/pda/' SESSION_KEY_RE = re.compile( '{url}(?P<sessionKey>.*)/default.aspx'.format(url=ALARMDOTCOM_URL)) USERNAME = 'ctl00$ContentPlaceHolder1$txtLogin' PASSWORD = 'ctl00$ContentPlaceHolder1$txtPassword' LOGIN_CONST = 'ctl00$ContentPlaceHolder1$btnLogin' ERROR_CONTROL = 'ctl00_ContentPlaceHolder1_ErrorControl1' MESSAGE_CONTROL = 'ctl00_ErrorControl1' VIEWSTATE = '__VIEWSTATE' VIEWSTATEGENERATOR = '__VIEWSTATEGENERATOR' VIEWSTATEENCRYPTED = '__VIEWSTATEENCRYPTED' EVENTVALIDATION = '__EVENTVALIDATION' DISARM_EVENT_VALIDATION = 'MnXvTutfO7KZZ1zZ7QR19E0sfvOVCpK7SV' 'yeJ0IkUkbXpfEqLa4fa9PzFK2ydqxNal' ARM_STAY_EVENT_VALIDATION = '/CwyHTpKH4aUp/pqo5gRwFJmKGubsvmx3RI6n' 'IFcyrtacuqXSy5dMoqBPX3aV2ruxZBTUVxenQ' '7luwjnNdcsxQW/p+YvHjN9ialbwACZfQsFt2o5' ARM_AWAY_EVENT_VALIDATION = '3ciB9sbTGyjfsnXn7J4LjfBvdGlkqiHoeh1vPjc5' DISARM_COMMAND = 'ctl00$phBody$butDisarm' ARM_STAY_COMMAND = 'ctl00$phBody$butArmStay' ARM_AWAY_COMMAND = 'ctl00$phBody$butArmAway' ARMING_PANEL = '#ctl00_phBody_pnlArming' ALARM_STATE = '#ctl00_phBody_lblArmingState' SENSOR_STATUS = '#ctl00_phBody_lblSensorStatus' COMMAND_LIST = {'Disarm': {'command': DISARM_COMMAND, 'eventvalidation': DISARM_EVENT_VALIDATION}, 'Arm+Stay': {'command': ARM_STAY_COMMAND, 'eventvalidation': ARM_STAY_EVENT_VALIDATION}, 'Arm+Away': {'command': ARM_AWAY_COMMAND, 'eventvalidation': ARM_AWAY_EVENT_VALIDATION}}
BSD 3-Clause New or Revised License
harmon758/harmonbot
Discord/cogs/role.py
Role.role_create
python
async def role_create(self, ctx, *, name: str = ""): role = await ctx.guild.create_role(name = name) await ctx.embed_reply(role.mention + " created")
Creates a role
https://github.com/harmon758/harmonbot/blob/def3849beabdaea5e0f9c594dcf6d6d8980782bd/Discord/cogs/role.py#L44-L48
import discord from discord.ext import commands from operator import attrgetter from utilities import checks def setup(bot): bot.add_cog(Role(bot)) class Role(commands.Cog): def __init__(self, bot): self.bot = bot @commands.group(aliases = ["roles"], invoke_without_command = True, case_insensitive = True) @checks.not_forbidden() async def role(self, ctx): await ctx.send_help(ctx.command) @role.command(name = "color", aliases = ["colour"]) @commands.guild_only() @checks.not_forbidden() async def role_color(self, ctx, role: discord.Role, *, color: discord.Color = None): if color: await commands.check_any(commands.has_guild_permissions(manage_roles = True), commands.is_owner()).predicate(ctx) await commands.bot_has_guild_permissions(manage_roles = True).predicate(ctx) await role.edit(color = color) await ctx.embed_reply(role.mention + " has been recolored") else: await ctx.embed_reply(role.mention + "'s color is {}".format(role.color)) @role.command(name = "create", aliases = ["make", "new"]) @commands.bot_has_guild_permissions(manage_roles = True) @commands.check_any(commands.has_guild_permissions(manage_roles = True), commands.is_owner())
MIT License
kastnerkyle/representation_mixing
code/lib/tfbldr/datasets/music/loaders.py
fetch_josquin
python
def fetch_josquin(keys=["C major", "A minor"], equal_voice_count=4, verbose=True): data_path = check_fetch_josquin() pickle_path = os.path.join(data_path, "__processed_josquin.pkl") mu = _music_extract(data_path, pickle_path, ext=".krn", verbose=verbose) mu_res = _common_features_from_music_extract(mu, equal_voice_count=equal_voice_count, verbose=verbose) return mu_res
Josquin transposed to C major or A minor (depending on original key). Requires music21.
https://github.com/kastnerkyle/representation_mixing/blob/146ddc7a2cc34544bb4516149ccfcbe72eedd102/code/lib/tfbldr/datasets/music/loaders.py#L358-L373
from ..loaders import get_tfbldr_dataset_dir from ..loaders import pe from ..loaders import copytree from ...core import get_logger logger = get_logger() import os import shutil import time import multiprocessing import cPickle as pickle import functools try: from music21 import converter, interval, pitch, harmony, analysis, spanner, midi, meter from music21 import corpus except ImportError: logger.info("Unable to retrieve music21 related utilities") from .music import music21_to_pitch_duration from .music import music21_to_chord_duration from .music import pitch_and_duration_to_quantized from .music import chord_and_chord_duration_to_quantized from .analysis import midi_to_notes from .analysis import notes_to_midi TIMEOUT_ID = "MULTIPROCESSING_TIMEOUT" def abortable_worker(func, *args, **kwargs): timeout = kwargs['timeout'] p = multiprocessing.dummy.Pool(1) res = p.apply_async(func, args=args) for i in range(timeout + 1): if i > 0: time.sleep(1) if res.ready(): if res.successful(): try: return res.get(timeout=1) except multiprocessing.TimeoutError: logger.info("Aborting due to timeout in get") p.terminate() return (TIMEOUT_ID,) logger.info("Aborting due to timeout") p.terminate() return (TIMEOUT_ID,) def _music_single_extract(files, data_path, verbose, n): if verbose: logger.info("Starting file {} of {}".format(n, len(files))) f = files[n] file_path = os.path.join(data_path, f) start_time = time.time() p = converter.parse(file_path) k = p.analyze("key") orig_key = k.name orig_mode = k.mode if k.mode not in ["minor", "major"]: logger.info("Mode neither minor not major in {}, aborting".format(f)) return (TIMEOUT_ID,) if verbose: parse_time = time.time() r = parse_time - start_time logger.info("Parse time {}:{}".format(f, r)) time_sigs = [str(ts).split(" ")[-1].split(">")[0] for ts in p.recurse().getElementsByClass(meter.TimeSignature)] nums = [int(ts.split("/")[0]) for ts in time_sigs] num_check = all([n == nums[0] for n in nums]) denoms = [int(ts.split("/")[1]) for ts in time_sigs] denom_check = all([d == denoms[0] for d in denoms]) if not denom_check: logger.info("Time signature denominator changed in {}, aborting".format(f)) return (TIMEOUT_ID,) if len(time_sigs) < 1: time_sigs = ["4/4"] """ https://gist.github.com/aldous-rey/68c6c43450517aa47474 https://github.com/feynmanliang/bachbot/blob/557abb971b6886f831e0566956ec76ee17aa9649/scripts/datasets.py#L97 """ majors = dict([("A-", 4),("A", 3),("B-", 2),("B", 1),("C", 0),("C#",-1),("D-", -1),("D", -2),("E-", -3),("E", -4),("F", -5),("F#",6),("G-", 6),("G", 5)]) minors = dict([("A-", 1),("A", 0),("B-", -1),("B", -2),("C", -3),("C#",-4),("D-", -4),("D", -5),("E-", 6),("E", 5),("F", 4),("F#",3),("G-", 3),("G", 2)]) if k.mode == "major": half_steps = majors[k.tonic.name] elif k.mode == "minor": half_steps = minors[k.tonic.name] p = p.transpose(half_steps) for ks in p.flat.getKeySignatures(): ks.sharps = 0 k = p.analyze("key") if verbose: transpose_time = time.time() r = transpose_time - start_time logger.info("Transpose time {}:{}".format(f, r)) chords, chord_functions, chord_durations = music21_to_chord_duration(p, k) pitches, parts_times, parts_delta_times, parts_fermatas = music21_to_pitch_duration(p) if verbose: pitch_duration_time = time.time() r = pitch_duration_time - start_time logger.info("music21 to pitch_duration time {}:{}".format(f, r)) str_key = k.name if verbose: ttime = time.time() r = ttime - start_time logger.info("Overall file time {}:{}".format(f, r)) str_time_sig = time_sigs[0] return (pitches, parts_times, parts_delta_times, str_key, orig_key, orig_mode, str_time_sig, f, p.quarterLength, chords, chord_functions, chord_durations, parts_fermatas) def _music_extract(data_path, pickle_path, ext=".xml", parse_timeout=100, multiprocess_count=4, verbose=False): if not os.path.exists(pickle_path): logger.info("Pickled file {} not found, creating. This may take a few minutes...".format(pickle_path)) itime = time.time() all_chords = [] all_chord_functions = [] all_chord_durations = [] all_parts_fermatas = [] all_pitches = [] all_parts_times = [] all_parts_delta_times = [] all_orig_keys = [] all_orig_modes = [] all_keys = [] all_time_sigs = [] all_filenames = [] all_quarter_lengths = [] if "basestring" not in globals(): basestring = str if isinstance(data_path, basestring): files = sorted([fi for fi in os.listdir(data_path) if fi.endswith(ext)]) else: files = sorted([ap for ap in data_path if ap.endswith(ext)]) logger.info("Processing {} files".format(len(files))) if multiprocess_count is not None: pool = multiprocessing.Pool(multiprocess_count) ex = functools.partial(_music_single_extract, files, data_path, verbose) abortable_ex = functools.partial(abortable_worker, ex, timeout=parse_timeout) result = pool.map(abortable_ex, range(len(files))) pool.close() pool.join() else: result = [] for n in range(len(files)): r = _music_single_extract(files, data_path, verbose, n) result.append(r) for n, r in enumerate(result): if r[0] != TIMEOUT_ID: (pitches, parts_times, parts_delta_times, key, orig_key, orig_mode, time_signature, fname, quarter_length, chords, chord_functions, chord_durations, fermatas) = r all_chords.append(chords) all_chord_functions.append(chord_functions) all_chord_durations.append(chord_durations) all_pitches.append(pitches) all_parts_times.append(parts_times) all_parts_delta_times.append(parts_delta_times) all_parts_fermatas.append(fermatas) all_keys.append(key) all_orig_keys.append(orig_key) all_orig_modes.append(orig_mode) all_time_sigs.append(time_signature) all_filenames.append(fname) all_quarter_lengths.append(quarter_length) else: logger.info("Result {} timed out".format(n)) gtime = time.time() if verbose: r = gtime - itime logger.info("Overall time {}".format(r)) d = {"data_pitches": all_pitches, "data_parts_times": all_parts_times, "data_parts_delta_times": all_parts_delta_times, "data_parts_fermatas": all_parts_fermatas, "data_keys": all_keys, "data_orig_keys": all_orig_keys, "data_orig_modes": all_orig_modes, "data_time_sigs": all_time_sigs, "data_chords": all_chords, "data_chord_functions": all_chord_functions, "data_chord_durations": all_chord_durations, "data_quarter_lengths": all_quarter_lengths, "filenames": all_filenames} with open(pickle_path, "wb") as f: logger.info("Saving pickle file {}".format(pickle_path)) pickle.dump(d, f) logger.info("Pickle file {} saved".format(pickle_path)) else: logger.info("Loading cached data from {}".format(pickle_path)) with open(pickle_path, "rb") as f: d = pickle.load(f) all_pitches = d["data_pitches"] all_parts_times = d["data_parts_times"] all_parts_delta_times = d["data_parts_delta_times"] all_parts_fermatas = d["data_parts_fermatas"] all_keys = d["data_keys"] all_orig_keys = d["data_orig_keys"] all_orig_modes = d["data_orig_modes"] all_time_sigs = d["data_time_sigs"] all_chords = d["data_chords"] all_chord_functions = d["data_chord_functions"] all_chord_durations = d["data_chord_durations"] all_chord_quarter_lengths = d["data_quarter_lengths"] all_filenames = d["filenames"] r = {"list_of_data_pitches": all_pitches, "list_of_data_times": all_parts_times, "list_of_data_time_deltas": all_parts_delta_times, "list_of_data_parts_fermatas": all_parts_fermatas, "list_of_data_keys": all_keys, "list_of_data_orig_keys": all_orig_keys, "list_of_data_orig_modes": all_orig_modes, "list_of_data_time_sigs": all_time_sigs, "list_of_data_chords": all_chords, "list_of_data_chord_functions": all_chord_functions, "list_of_data_chord_durations": all_chord_durations, "list_of_data_chord_quarter_lengths": all_chord_quarter_lengths, "list_of_filenames": all_filenames} return r def _common_features_from_music_extract(mu, equal_voice_count=4, verbose=False): all_quantized_16th_pitches = [] all_quantized_16th_pitches_no_hold = [] all_quantized_16th_fermatas = [] all_quantized_16th_subbeats = [] all_quantized_16th_chords = [] all_quantized_16th_chord_functions = [] all_pitches = mu["list_of_data_pitches"] all_parts_delta_times = mu["list_of_data_time_deltas"] all_parts_fermatas = mu["list_of_data_parts_fermatas"] all_chords = mu["list_of_data_chords"] all_chord_functions = mu["list_of_data_chord_functions"] all_chord_durations = mu["list_of_data_chord_durations"] invalids = [] for i in range(len(all_pitches)): try: qq = pitch_and_duration_to_quantized(all_pitches[i], all_parts_delta_times[i], .25, list_of_metas_voices=[all_parts_fermatas[i]], verbose=verbose) qqnh = pitch_and_duration_to_quantized(all_pitches[i], all_parts_delta_times[i], .25, list_of_metas_voices=[all_parts_fermatas[i]], verbose=verbose, hold_symbol=False) cc = chord_and_chord_duration_to_quantized(all_chords[i], all_chord_durations[i], .25, list_of_chord_metas=[all_chord_functions[i]], verbose=verbose) if qq[0].shape[1] != equal_voice_count: invalids.append(i) else: subbeat_counter = [1, 2, 3, 4] * (len(qq[0]) // 4 + 1) subbeat_counter = subbeat_counter[:len(qq[0])] collapsed_fermatas = [1 if sum([qq[1][vi][ti] for vi in range(len(qq[1]))]) > 0 else 0 for ti in range(len(qq[0]))] all_quantized_16th_pitches.append(qq[0]) all_quantized_16th_pitches_no_hold.append(qqnh[0]) all_quantized_16th_fermatas.append(collapsed_fermatas) all_quantized_16th_subbeats.append(subbeat_counter) all_quantized_16th_chords.append(cc[0]) all_quantized_16th_chord_functions.append(cc[1]) except: invalids.append(i) assert len(all_quantized_16th_chords) == len(all_quantized_16th_pitches) mu_res = {} for k, v in mu.items(): mu_res[k] = [vi for n, vi in enumerate(v) if n not in invalids] assert len(mu_res[k]) == len(all_quantized_16th_pitches) mu_res["list_of_data_quantized_16th_pitches"] = all_quantized_16th_pitches mu_res["list_of_data_quantized_16th_pitches_no_hold"] = all_quantized_16th_pitches_no_hold mu_res["list_of_data_quantized_16th_fermatas"] = all_quantized_16th_fermatas mu_res["list_of_data_quantized_16th_subbeats"] = all_quantized_16th_subbeats mu_res["list_of_data_quantized_16th_chords"] = all_quantized_16th_chords mu_res["list_of_data_quantized_16th_chord_functions"] = all_quantized_16th_chord_functions return mu_res def check_fetch_jsb(): all_bach_paths = corpus.getComposer("bach") partial_path = get_tfbldr_dataset_dir("jsb") for path in all_bach_paths: if "riemenschneider" in path: continue filename = os.path.split(path)[-1] local_path = os.path.join(partial_path, filename) if not os.path.exists(local_path): shutil.copy2(path, local_path) return partial_path def fetch_jsb(keys=["C major", "A minor"], equal_voice_count=4, verbose=True): data_path = check_fetch_jsb() pickle_path = os.path.join(data_path, "__processed_jsb.pkl") mu = _music_extract(data_path, pickle_path, ext=".mxl", verbose=verbose) mu_res = _common_features_from_music_extract(mu, equal_voice_count=equal_voice_count, verbose=verbose) return mu_res def check_fetch_josquin(): partial_path = get_tfbldr_dataset_dir("josquin") if not os.path.exists(partial_path + os.sep + "jrp-scores"): cur = os.getcwd() os.chdir(partial_path) pe("git clone --recursive https://github.com/josquin-research-project/jrp-scores") os.chdir("jrp-scores") pe("make webreduced") os.chdir(cur) jos_sub = partial_path + os.sep + "jrp-scores" + os.sep + "Jos" + os.sep + "kern-reduced" return jos_sub
BSD 3-Clause New or Revised License
byceps/byceps
byceps/services/shop/order/service.py
mark_order_as_paid
python
def mark_order_as_paid( order_id: OrderID, payment_method: str, initiator_id: UserID, *, additional_event_data: Optional[Mapping[str, str]] = None, ) -> ShopOrderPaid: order = _get_order_entity(order_id) if _is_paid(order): raise OrderAlreadyMarkedAsPaid() initiator = user_service.get_user(initiator_id) orderer_user = user_service.get_user(order.placed_by_id) now = datetime.utcnow() updated_at = now payment_state_from = order.payment_state payment_state_to = PaymentState.paid order.payment_method = payment_method _update_payment_state(order, payment_state_to, updated_at, initiator.id) event_type = 'order-paid' event_data: OrderEventData = {} if additional_event_data is not None: event_data.update(additional_event_data) event_data.update( { 'initiator_id': str(initiator.id), 'former_payment_state': payment_state_from.name, 'payment_method': payment_method, } ) event = DbOrderEvent(now, event_type, order.id, event_data) db.session.add(event) db.session.commit() action_service.execute_actions( _order_to_transfer_object(order), payment_state_to, initiator.id ) return ShopOrderPaid( occurred_at=updated_at, initiator_id=initiator.id, initiator_screen_name=initiator.screen_name, order_id=order.id, order_number=order.order_number, orderer_id=orderer_user.id, orderer_screen_name=orderer_user.screen_name, payment_method=payment_method, )
Mark the order as paid.
https://github.com/byceps/byceps/blob/138f928e98fd1e3d79943e1a8744ea04cef465b5/byceps/services/shop/order/service.py#L331-L388
from __future__ import annotations from datetime import datetime from typing import Iterator, Mapping, Optional, Sequence from flask import current_app from flask_babel import lazy_gettext from sqlalchemy.exc import IntegrityError from ....database import db, paginate, Pagination from ....events.shop import ShopOrderCanceled, ShopOrderPaid, ShopOrderPlaced from ....typing import UserID from ...user import service as user_service from ..article import service as article_service from ..cart.models import Cart, CartItem from ..shop.dbmodels import Shop as DbShop from ..shop import service as shop_service from ..shop.transfer.models import ShopID from ..storefront import service as storefront_service from ..storefront.transfer.models import StorefrontID from .dbmodels.line_item import LineItem as DbLineItem from .dbmodels.order import Order as DbOrder from .dbmodels.order_event import OrderEvent as DbOrderEvent, OrderEventData from . import action_service, event_service, sequence_service from .transfer.models import ( Address, Order, OrderID, LineItem, Orderer, OrderNumber, OrderState, PaymentState, ) class OrderFailed(Exception): pass def place_order( storefront_id: StorefrontID, orderer: Orderer, cart: Cart, *, created_at: Optional[datetime] = None, ) -> tuple[Order, ShopOrderPlaced]: storefront = storefront_service.get_storefront(storefront_id) shop = shop_service.get_shop(storefront.shop_id) orderer_user = user_service.get_user(orderer.user_id) order_number_sequence = sequence_service.get_order_number_sequence( storefront.order_number_sequence_id ) order_number = sequence_service.generate_order_number( order_number_sequence.id ) cart_items = cart.get_items() order = _build_order(shop.id, order_number, orderer, created_at) line_items = list(_build_line_items(cart_items, order)) order.total_amount = cart.calculate_total_amount() order.processing_required = any( line_item.processing_required for line_item in line_items ) db.session.add(order) db.session.add_all(line_items) _reduce_article_stock(cart_items) try: db.session.commit() except IntegrityError as e: current_app.logger.error('Order %s failed: %s', order_number, e) db.session.rollback() raise OrderFailed() order_dto = _order_to_transfer_object(order) event = ShopOrderPlaced( occurred_at=order.created_at, initiator_id=orderer_user.id, initiator_screen_name=orderer_user.screen_name, order_id=order.id, order_number=order.order_number, orderer_id=orderer_user.id, orderer_screen_name=orderer_user.screen_name, ) return order_dto, event def _build_order( shop_id: ShopID, order_number: OrderNumber, orderer: Orderer, created_at: Optional[datetime], ) -> DbOrder: return DbOrder( shop_id, order_number, orderer.user_id, orderer.first_names, orderer.last_name, orderer.country, orderer.zip_code, orderer.city, orderer.street, created_at=created_at, ) def _build_line_items( cart_items: list[CartItem], order: DbOrder ) -> Iterator[DbLineItem]: for cart_item in cart_items: article = cart_item.article quantity = cart_item.quantity line_amount = cart_item.line_amount yield DbLineItem( order, article.item_number, article.type_, article.description, article.price, article.tax_rate, quantity, line_amount, article.processing_required, ) def _reduce_article_stock(cart_items: list[CartItem]) -> None: for cart_item in cart_items: article = cart_item.article quantity = cart_item.quantity article_service.decrease_quantity(article.id, quantity, commit=False) def add_note(order_id: OrderID, author_id: UserID, text: str) -> None: order = get_order(order_id) author = user_service.get_user(author_id) event_type = 'order-note-added' data = { 'author_id': str(author.id), 'text': text, } event_service.create_event(event_type, order.id, data) def set_invoiced_flag(order_id: OrderID, initiator_id: UserID) -> None: order = _get_order_entity(order_id) initiator = user_service.get_user(initiator_id) now = datetime.utcnow() event_type = 'order-invoiced' data = { 'initiator_id': str(initiator.id), } event = DbOrderEvent(now, event_type, order.id, data) db.session.add(event) order.invoice_created_at = now db.session.commit() def unset_invoiced_flag(order_id: OrderID, initiator_id: UserID) -> None: order = _get_order_entity(order_id) initiator = user_service.get_user(initiator_id) now = datetime.utcnow() event_type = 'order-invoiced-withdrawn' data = { 'initiator_id': str(initiator.id), } event = DbOrderEvent(now, event_type, order.id, data) db.session.add(event) order.invoice_created_at = None db.session.commit() def set_shipped_flag(order_id: OrderID, initiator_id: UserID) -> None: order = _get_order_entity(order_id) initiator = user_service.get_user(initiator_id) if not order.processing_required: raise ValueError('Order contains no items that require shipping.') now = datetime.utcnow() event_type = 'order-shipped' data = { 'initiator_id': str(initiator.id), } event = DbOrderEvent(now, event_type, order.id, data) db.session.add(event) order.processed_at = now db.session.commit() def unset_shipped_flag(order_id: OrderID, initiator_id: UserID) -> None: order = _get_order_entity(order_id) initiator = user_service.get_user(initiator_id) if not order.processing_required: raise ValueError('Order contains no items that require shipping.') now = datetime.utcnow() event_type = 'order-shipped-withdrawn' data = { 'initiator_id': str(initiator.id), } event = DbOrderEvent(now, event_type, order.id, data) db.session.add(event) order.processed_at = None db.session.commit() class OrderAlreadyCanceled(Exception): pass class OrderAlreadyMarkedAsPaid(Exception): pass def cancel_order( order_id: OrderID, initiator_id: UserID, reason: str ) -> ShopOrderCanceled: order = _get_order_entity(order_id) if _is_canceled(order): raise OrderAlreadyCanceled() initiator = user_service.get_user(initiator_id) orderer_user = user_service.get_user(order.placed_by_id) has_order_been_paid = _is_paid(order) now = datetime.utcnow() updated_at = now payment_state_from = order.payment_state payment_state_to = ( PaymentState.canceled_after_paid if has_order_been_paid else PaymentState.canceled_before_paid ) _update_payment_state(order, payment_state_to, updated_at, initiator.id) order.cancelation_reason = reason event_type = ( 'order-canceled-after-paid' if has_order_been_paid else 'order-canceled-before-paid' ) data = { 'initiator_id': str(initiator.id), 'former_payment_state': payment_state_from.name, 'reason': reason, } event = DbOrderEvent(now, event_type, order.id, data) db.session.add(event) for line_item in order.line_items: article_service.increase_quantity( line_item.article.id, line_item.quantity, commit=False ) db.session.commit() action_service.execute_actions( _order_to_transfer_object(order), payment_state_to, initiator.id ) return ShopOrderCanceled( occurred_at=updated_at, initiator_id=initiator.id, initiator_screen_name=initiator.screen_name, order_id=order.id, order_number=order.order_number, orderer_id=orderer_user.id, orderer_screen_name=orderer_user.screen_name, )
BSD 3-Clause New or Revised License
geostat-framework/ogs5py
ogs5py/fileclasses/base.py
BlockFile.update_block
python
def update_block(self, index=None, main_key=None, **block): upd_block = self.get_block(index, as_dict=True) if main_key is not None: upd_block["main_key"] = main_key if "" in upd_block: tmp_block = {upd_block["main_key"]: upd_block[""]} upd_block = tmp_block upd_block.update(block) self.del_main_keyword(main_index=index, del_all=False) self.add_block(index=index, **upd_block)
Update a Block from the actual file. Parameters ---------- index : int or None, optional Positional index of the block of interest. As default, the last one is used. Default: None main_key : string, optional Main keyword of the block that should be updated (see: ``MKEYS``) This shouldn't be done. Default: None **block : keyword dict here the dict-keywords are the ogs-subkeywords and the value is the content that should be added with this ogs-subkeyword If a block should contain content directly connected to a main keyword, use this main keyword as input-keyword and the content as value: ``SUBKEY=content``
https://github.com/geostat-framework/ogs5py/blob/2bc4428c4c485d094e02c129ba5051745df58391/ogs5py/fileclasses/base.py#L502-L536
import os import shutil import time import copy from ogs5py.tools.tools import ( format_content_line, format_content, search_mkey, uncomment, get_key, is_key, is_mkey, is_skey, find_key_in_list, ) try: from ogs5py._version import __version__ as version except ImportError: version = "0.0.0.dev0" CWD = os.getcwd() TOP_COM = "|------------------ Written with ogs5py ------------------|" BOT_COM = ( "|-- Written with ogs5py (" + version + ") on: " + time.strftime("%Y-%m-%d_%H-%M-%S") + " --|" ) class File(object): def __init__(self, task_root=None, task_id="model", file_ext=".std"): self._name = None self.name_from_id = True if task_root is None: task_root = os.path.join(CWD, "ogs5model") self.task_root = task_root self.task_id = task_id self.top_com = TOP_COM self.bot_com = BOT_COM self.file_ext = file_ext self.copy_file = None self.copy_path = None self._force = False @classmethod def _get_clsname(cls): return cls.__name__ def get_file_type(self): return self._get_clsname() @property def name(self): if self.name_from_id: return self.task_id return self._name @name.setter def name(self, value=None): if value is None: self.name_from_id = True self._name = None else: self._name = str(value) self.name_from_id = False @property def file_path(self): return os.path.join(self.task_root, self.name + self.file_ext) @property def file_name(self): return os.path.basename(self.file_path) @property def is_empty(self): return False @property def force_writing(self): return self._force @force_writing.setter def force_writing(self, force): self._force = bool(force) def reset(self): pass def add_copy_link(self, path, symlink=False): if os.path.isfile(path): path = os.path.abspath(path) self.copy_file = "link" if symlink else "copy" self.copy_path = path else: print( "ogs5py " + self.get_file_type() + ": Given copy-path is not a readable file: " + path ) def del_copy_link(self): self.copy_file = None self.copy_path = None def read_file(self, path, encoding=None, verbose=False): pass def save(self, path, **kwargs): pass def write_file(self): self._update_out() if not os.path.exists(self.task_root): os.makedirs(self.task_root) f_path = self.file_path if self.copy_file is None: if self.force_writing or not self.is_empty: self.save(f_path) elif self.copy_file == "copy": shutil.copyfile(self.copy_path, f_path) else: os.symlink(self.copy_path, f_path) def check(self, verbose=True): return True def _update_in(self): pass def _update_out(self): pass def __bool__(self): return not self.is_empty def __nonzero__(self): return self.__bool__() def __str__(self): return self.__repr__() class LineFile(File): def __init__( self, lines=None, name=None, file_ext=".txt", task_root=None, task_id="model", ): super(LineFile, self).__init__(task_root, task_id, file_ext) self.lines = [] if lines is None else lines self.name = name @property def is_empty(self): if self.check(False): return not bool(self.lines) return True def reset(self): self.lines = [] def check(self, verbose=True): if verbose: print("This file is not checked!") try: iter(self.lines) except TypeError: return False return True def save(self, path): if self.lines: with open(path, "w") as fout: for line in self.lines: print(line, file=fout) def read_file(self, path, encoding=None, verbose=False): from io import open self.reset() try: with open(path, "r", encoding=encoding) as fin: self.lines = fin.read().splitlines() except IOError: if verbose: print( "ogs5py " + self.get_file_type() + ": could not read lines from: " + path ) def __repr__(self): out = "" for line in self.lines[:5]: out += line + "\n" if len(self.lines) > 5: out += "..." return out class BlockFile(File): MKEYS = [] SKEYS = [] STD = {} def __init__(self, task_root=None, task_id="model", file_ext=".std"): super(BlockFile, self).__init__(task_root, task_id, file_ext) self.mainkw = [] self.subkw = [] self.cont = [] @property def is_empty(self): return not bool(self.mainkw) def reset(self): self.del_main_keyword(del_all=True) self._update_in() @property def block_no(self): return self.get_block_no() def get_block_no(self): return len(self.mainkw) def get_multi_keys(self, index=None): index = len(self.mainkw) - 1 if index is None else int(index) if -len(self.mainkw) <= index < len(self.mainkw): sub_keys = self.subkw[index] else: print( "ogs5py " + self.get_file_type() + ": get_multi_keys index out of bounds - " + str(index) ) return {} result = {} for key in sub_keys: if not key: continue count = sub_keys.count(key) if count > 1 and key not in result: result[key] = count return result def is_block_unique(self, index=None): return not bool(self.get_multi_keys(index)) def get_block(self, index=None, as_dict=True): index = len(self.mainkw) - 1 if index is None else int(index) if -len(self.mainkw) <= index < len(self.mainkw): main_key = self.mainkw[index] sub_key = self.subkw[index] cont = self.cont[index] else: print( "ogs5py " + self.get_file_type() + ": get_block index out of bounds - " + str(index) ) if as_dict: return {} return None, [], [] if as_dict and self.is_block_unique(index): out = {"main_key": main_key} for sub, con in zip(sub_key, cont): out[sub] = con return out elif as_dict: raise ValueError( "ogs5py " + self.get_file_type() + ": get_block - block has no unique sub-keys and can not be " + "represented as dict." ) return main_key, sub_key, cont
MIT License
tox-dev/tox
src/tox/venv.py
VirtualEnv.path
python
def path(self): return self.envconfig.envdir
Path to environment base dir.
https://github.com/tox-dev/tox/blob/4cf816c97de18dca9d99dc86fec341c1d2c4a20a/src/tox/venv.py#L159-L161
import codecs import json import os import pipes import re import sys from itertools import chain import py import tox from tox import reporter from tox.action import Action from tox.config.parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE from tox.constants import INFO, PARALLEL_RESULT_JSON_PREFIX, PARALLEL_RESULT_JSON_SUFFIX from tox.package.local import resolve_package from tox.util.lock import get_unique_file from tox.util.path import ensure_empty_dir from .config import DepConfig MAXINTERP = 2048 class CreationConfig: def __init__( self, base_resolved_python_sha256, base_resolved_python_path, tox_version, sitepackages, usedevelop, deps, alwayscopy, ): self.base_resolved_python_sha256 = base_resolved_python_sha256 self.base_resolved_python_path = base_resolved_python_path self.tox_version = tox_version self.sitepackages = sitepackages self.usedevelop = usedevelop self.alwayscopy = alwayscopy self.deps = deps def writeconfig(self, path): lines = [ "{} {}".format(self.base_resolved_python_sha256, self.base_resolved_python_path), "{} {:d} {:d} {:d}".format( self.tox_version, self.sitepackages, self.usedevelop, self.alwayscopy, ), ] for dep in self.deps: lines.append("{} {}".format(*dep)) content = "\n".join(lines) path.ensure() path.write(content) return content @classmethod def readconfig(cls, path): try: lines = path.readlines(cr=0) base_resolved_python_info = lines.pop(0).split(None, 1) tox_version, sitepackages, usedevelop, alwayscopy = lines.pop(0).split(None, 4) sitepackages = bool(int(sitepackages)) usedevelop = bool(int(usedevelop)) alwayscopy = bool(int(alwayscopy)) deps = [] for line in lines: base_resolved_python_sha256, depstring = line.split(None, 1) deps.append((base_resolved_python_sha256, depstring)) base_resolved_python_sha256, base_resolved_python_path = base_resolved_python_info return CreationConfig( base_resolved_python_sha256, base_resolved_python_path, tox_version, sitepackages, usedevelop, deps, alwayscopy, ) except Exception: return None def matches_with_reason(self, other, deps_matches_subset=False): for attr in ( "base_resolved_python_sha256", "base_resolved_python_path", "tox_version", "sitepackages", "usedevelop", "alwayscopy", ): left = getattr(self, attr) right = getattr(other, attr) if left != right: return False, "attr {} {!r}!={!r}".format(attr, left, right) self_deps = set(self.deps) other_deps = set(other.deps) if self_deps != other_deps: if deps_matches_subset: diff = other_deps - self_deps if diff: return False, "missing in previous {!r}".format(diff) else: return False, "{!r}!={!r}".format(self_deps, other_deps) return True, None def matches(self, other, deps_matches_subset=False): outcome, _ = self.matches_with_reason(other, deps_matches_subset) return outcome class VirtualEnv(object): def __init__(self, envconfig=None, popen=None, env_log=None): self.envconfig = envconfig self.popen = popen self._actions = [] self.env_log = env_log self._result_json_path = None def new_action(self, msg, *args): config = self.envconfig.config command_log = self.env_log.get_commandlog( "test" if msg in ("run-test", "run-test-pre", "run-test-post") else "setup", ) return Action( self.name, msg, args, self.envconfig.envlogdir, config.option.resultjson, command_log, self.popen, self.envconfig.envpython, self.envconfig.suicide_timeout, self.envconfig.interrupt_timeout, self.envconfig.terminate_timeout, ) def get_result_json_path(self): if self._result_json_path is None: if self.envconfig.config.option.resultjson: self._result_json_path = get_unique_file( self.path, PARALLEL_RESULT_JSON_PREFIX, PARALLEL_RESULT_JSON_SUFFIX, ) return self._result_json_path @property def hook(self): return self.envconfig.config.pluginmanager.hook @property
MIT License
libtraffic/bigscity-libtraffic
libcity/data/dataset/trajectory_dataset.py
TrajectoryDataset.divide_data
python
def divide_data(self): train_data = [] eval_data = [] test_data = [] train_rate = self.config['train_rate'] eval_rate = self.config['eval_rate'] user_set = self.data['encoded_data'].keys() for uid in tqdm(user_set, desc="dividing data"): encoded_trajectories = self.data['encoded_data'][uid] traj_len = len(encoded_trajectories) train_num = math.ceil(traj_len * train_rate) eval_num = math.ceil( traj_len * (train_rate + eval_rate)) train_data += encoded_trajectories[:train_num] eval_data += encoded_trajectories[train_num:eval_num] test_data += encoded_trajectories[eval_num:] return train_data, eval_data, test_data
return: train_data (list) eval_data (list) test_data (list)
https://github.com/libtraffic/bigscity-libtraffic/blob/c1048510ca61f2eb1e236015ccc575e2f5e951b8/libcity/data/dataset/trajectory_dataset.py#L220-L243
import os import json import pandas as pd import math from tqdm import tqdm import importlib from logging import getLogger from libcity.data.dataset import AbstractDataset from libcity.utils import parse_time, cal_timeoff from libcity.data.utils import generate_dataloader parameter_list = ['dataset', 'min_session_len', 'min_sessions', "max_session_len", 'cut_method', 'window_size', 'min_checkins'] class TrajectoryDataset(AbstractDataset): def __init__(self, config): self.config = config self.cache_file_folder = './libcity/cache/dataset_cache/' self.cut_data_cache = './libcity/cache/dataset_cache/cut_traj' for param in parameter_list: self.cut_data_cache += '_' + str(self.config[param]) self.cut_data_cache += '.json' self.data_path = './raw_data/{}/'.format(self.config['dataset']) self.data = None self.encoder = self.get_encoder() self.pad_item = None self.logger = getLogger() def get_data(self): if self.data is None: if self.config['cache_dataset'] and os.path.exists(self.encoder.cache_file_name): f = open(self.encoder.cache_file_name, 'r') self.data = json.load(f) self.pad_item = self.data['pad_item'] f.close() else: if os.path.exists(self.cut_data_cache): f = open(self.cut_data_cache, 'r') cut_data = json.load(f) f.close() else: cut_data = self.cutter_filter() if not os.path.exists(self.cache_file_folder): os.makedirs(self.cache_file_folder) with open(self.cut_data_cache, 'w') as f: json.dump(cut_data, f) self.logger.info('finish cut data') encoded_data = self.encode_traj(cut_data) self.data = encoded_data self.pad_item = self.encoder.pad_item if self.config['cache_dataset']: if not os.path.exists(self.cache_file_folder): os.makedirs(self.cache_file_folder) with open(self.encoder.cache_file_name, 'w') as f: json.dump(encoded_data, f) train_data, eval_data, test_data = self.divide_data() return generate_dataloader(train_data, eval_data, test_data, self.encoder.feature_dict, self.config['batch_size'], self.config['num_workers'], self.pad_item, self.encoder.feature_max_len) def get_data_feature(self): res = self.data['data_feature'] res['distance_upper'] = self.config['distance_upper'] return res def cutter_filter(self): traj = pd.read_csv(os.path.join( self.data_path, '{}.dyna'.format(self.config['dataset']))) group_location = traj.groupby('location').count() filter_location = group_location[group_location['time'] >= self.config['min_checkins']] location_index = filter_location.index.tolist() traj = traj[traj['location'].isin(location_index)] user_set = pd.unique(traj['entity_id']) res = {} min_session_len = self.config['min_session_len'] max_session_len = self.config['max_session_len'] min_sessions = self.config['min_sessions'] window_size = self.config['window_size'] cut_method = self.config['cut_method'] if cut_method == 'time_interval': for uid in tqdm(user_set, desc="cut and filter trajectory"): usr_traj = traj[traj['entity_id'] == uid] sessions = [] session = [] for index, row in usr_traj.iterrows(): now_time = parse_time(row['time']) if index == 0: session.append(row.tolist()) prev_time = now_time else: time_off = cal_timeoff(now_time, prev_time) if time_off < window_size and time_off >= 0 and len(session) < max_session_len: session.append(row.tolist()) else: if len(session) >= min_session_len: sessions.append(session) session = [] session.append(row.tolist()) prev_time = now_time if len(session) >= min_session_len: sessions.append(session) if len(sessions) >= min_sessions: res[str(uid)] = sessions elif cut_method == 'same_date': for uid in tqdm(user_set, desc="cut and filter trajectory"): usr_traj = traj[traj['entity_id'] == uid] sessions = [] session = [] prev_date = None for index, row in usr_traj.iterrows(): now_time = parse_time(row['time']) now_date = now_time.day if index == 0: session.append(row.tolist()) else: if prev_date == now_date and len(session) < max_session_len: session.append(row.tolist()) else: if len(session) >= min_session_len: sessions.append(session) session = [] session.append(row.tolist()) prev_date = now_date if len(session) >= min_session_len: sessions.append(session) if len(sessions) >= min_sessions: res[str(uid)] = sessions else: if max_session_len != window_size: raise ValueError('the fixed length window is not equal to max_session_len') for uid in tqdm(user_set, desc="cut and filter trajectory"): usr_traj = traj[traj['entity_id'] == uid] sessions = [] session = [] for index, row in usr_traj.iterrows(): if len(session) < window_size: session.append(row.tolist()) else: sessions.append(session) session = [] session.append(row.tolist()) if len(session) >= min_session_len: sessions.append(session) if len(sessions) >= min_sessions: res[str(uid)] = sessions return res def encode_traj(self, data): encoded_data = {} for uid in tqdm(data, desc="encoding trajectory"): encoded_data[uid] = self.encoder.encode(int(uid), data[uid]) self.encoder.gen_data_feature() return { 'data_feature': self.encoder.data_feature, 'pad_item': self.encoder.pad_item, 'encoded_data': encoded_data }
Apache License 2.0
nyu-dl/dl4mt-seqgen
src/evaluation/xnli.py
XNLI.run
python
def run(self): params = self.params self.data = self.load_data() if not self.data['dico'] == self._embedder.dico: raise Exception(("Dictionary in evaluation data (%i words) seems different than the one " + "in the pretrained model (%i words). Please verify you used the same dictionary, " + "and the same values for max_vocab and min_count.") % (len(self.data['dico']), len(self._embedder.dico))) self.embedder = copy.deepcopy(self._embedder) self.embedder.cuda() self.proj = nn.Sequential(*[ nn.Dropout(params.dropout), nn.Linear(self.embedder.out_dim, 3) ]).cuda() if params.fp16: assert torch.backends.cudnn.enabled self.embedder.model = network_to_half(self.embedder.model) self.proj = network_to_half(self.proj) self.optimizer = get_optimizer( list(self.embedder.get_parameters(params.finetune_layers)) + list(self.proj.parameters()), params.optimizer ) if params.fp16: self.optimizer = FP16_Optimizer(self.optimizer, dynamic_loss_scale=True) for epoch in range(params.n_epochs): self.epoch = epoch logger.info("XNLI - Training epoch %i ..." % epoch) self.train() logger.info("XNLI - Evaluating epoch %i ..." % epoch) with torch.no_grad(): scores = self.eval() self.scores.update(scores)
Run XNLI training / evaluation.
https://github.com/nyu-dl/dl4mt-seqgen/blob/dd5a08182f1a32386e6203cbeaa1cee5a0b83994/src/evaluation/xnli.py#L55-L107
from logging import getLogger import os import copy import time import json from collections import OrderedDict import torch from torch import nn import torch.nn.functional as F from src.fp16 import network_to_half from apex.fp16_utils import FP16_Optimizer from ..utils import get_optimizer, concat_batches, truncate, to_cuda from ..data.dataset import ParallelDataset from ..data.loader import load_binarized, set_dico_parameters XNLI_LANGS = ['ar', 'bg', 'de', 'el', 'en', 'es', 'fr', 'hi', 'ru', 'sw', 'th', 'tr', 'ur', 'vi', 'zh'] logger = getLogger() class XNLI: def __init__(self, embedder, scores, params): self._embedder = embedder self.params = params self.scores = scores def get_iterator(self, splt, lang): assert splt in ['valid', 'test'] or splt == 'train' and lang == 'en' return self.data[lang][splt]['x'].get_iterator( shuffle=(splt == 'train'), group_by_size=self.params.group_by_size, return_indices=True )
BSD 3-Clause New or Revised License
asyml/texar-pytorch
texar/torch/data/data/data_iterators.py
DataIterator.dataset_names
python
def dataset_names(self) -> List[str]: return list(self._datasets.keys())
r"""A list of dataset names.
https://github.com/asyml/texar-pytorch/blob/5d67ae957763a3683ce8dd5e4d1208cc9fdb4d33/texar/torch/data/data/data_iterators.py#L501-L504
from typing import ( Dict, Iterable, Iterator, List, Optional, Sequence, Union, Mapping) import pkg_resources import torch from torch import __version__ as _torch_version from torch.utils.data import DataLoader from texar.torch.data.data.data_base import DatasetBase from texar.torch.data.data.dataset_utils import Batch from texar.torch.data.data.sampler import ( SamplerBase, SequentialSampler, RandomSampler, BufferShuffleSampler, BatchingStrategy, DynamicBatchSampler) from texar.torch.utils.types import MaybeSeq from texar.torch.utils.utils import ceildiv, map_structure _torch_version = pkg_resources.parse_version(_torch_version) __all__ = [ "DataIterator", "TrainTestDataIterator", ] DatasetsType = Union[Mapping[str, DatasetBase], MaybeSeq[DatasetBase]] if _torch_version >= pkg_resources.parse_version("1.2.0"): from torch.utils.data._utils.pin_memory import ( pin_memory as _pin_memory) elif _torch_version >= pkg_resources.parse_version("1.1.0"): from torch.utils.data._utils.pin_memory import ( pin_memory_batch as _pin_memory) else: from torch.utils.data.dataloader import ( pin_memory_batch as _pin_memory) def move_memory(data, device): def _move_fn(x): if isinstance(x, torch.Tensor): return x.to(device=device, non_blocking=True) return x if isinstance(data, Batch): return Batch(len(data), batch={ key: map_structure(_move_fn, value) for key, value in data.items() }) return map_structure(_move_fn, data) if _torch_version >= pkg_resources.parse_version("1.2.0"): from texar.torch.data.data.data_iterators_utils import TexarBaseDataLoaderIter as _BaseDataLoaderIter from texar.torch.data.data.data_iterators_utils import TexarSingleProcessDataLoaderIter as _SingleProcessDataLoaderIter from texar.torch.data.data.data_iterators_utils import TexarMultiProcessingDataLoaderIter as _MultiProcessingDataLoaderIter class _DataLoaderIter(_BaseDataLoaderIter): def __new__(cls, loader: 'SingleDatasetIterator'): if loader.num_workers > 0: return super().__new__(_MPDataLoaderIter) else: return super().__new__(_SPDataLoaderIter) def __init__(self, loader: 'SingleDatasetIterator'): self.device = loader.device self._batch_size = loader.batch_size super().__init__(loader) def __next__(self): batch = super().__next__() if (self._batch_size is not None and batch.batch_size < self._batch_size and not self.dataset.hparams.allow_smaller_final_batch): raise StopIteration if self.device is not None: batch = move_memory(batch, self.device) return batch class _SPDataLoaderIter(_DataLoaderIter, _SingleProcessDataLoaderIter): pass class _MPDataLoaderIter(_DataLoaderIter, _MultiProcessingDataLoaderIter): pass class _CacheDataLoaderIter(_BaseDataLoaderIter): def __new__(cls, loader: 'SingleDatasetIterator'): if loader.num_workers > 0: return super().__new__(_MPCacheDataLoaderIter) else: return super().__new__(_SPCacheDataLoaderIter) def __init__(self, loader: 'SingleDatasetIterator'): self._indices_dict: Dict[int, List[int]] = {} self._batch_size = loader.batch_size self.device = loader.device super().__init__(loader) class _SPCacheDataLoaderIter(_CacheDataLoaderIter, _SingleProcessDataLoaderIter): def __next__(self): index = self._next_index() data = self.dataset_fetcher.fetch(index) if self.dataset._should_yield_raw_example: index = [idx[0] for idx in index] examples, data = data self.dataset._add_cached_examples(index, examples) if self.pin_memory: data = move_memory(_pin_memory(data), self.device) return data class _MPCacheDataLoaderIter(_CacheDataLoaderIter, _MultiProcessingDataLoaderIter): dataset: DatasetBase worker_queue_idx: int def _try_put_index(self): assert self.tasks_outstanding < 2 * self.num_workers try: index = self._next_index() except StopIteration: return for _ in range(self.num_workers): worker_queue_idx = next(self.worker_queue_idx_cycle) if self.workers_status[worker_queue_idx]: break else: return self.index_queues[worker_queue_idx].put((self.send_idx, index)) if self.dataset._should_yield_raw_example: index = [idx[0] for idx in index] self._indices_dict[self.send_idx] = index self.task_info[self.send_idx] = (worker_queue_idx,) self.tasks_outstanding += 1 self.send_idx += 1 def _process_data(self, batch): batch = super()._process_data(batch) indices = self._indices_dict[self.rcvd_idx - 1] del self._indices_dict[self.rcvd_idx - 1] examples, batch = batch self.dataset._add_cached_examples(indices, examples) return batch def __next__(self): batch = super().__next__() if (self._batch_size is not None and batch.batch_size < self.dataset.batch_size and not self.dataset.hparams.allow_smaller_final_batch): raise StopIteration batch = move_memory(batch, self.device) return batch else: from torch.utils.data.dataloader import ( _DataLoaderIter as torch_DataLoaderIter) class _DataLoaderIter(torch_DataLoaderIter): def __init__(self, loader: 'SingleDatasetIterator'): self._batch_size = loader.batch_size self.device = loader.device super().__init__(loader) def __next__(self): batch = super().__next__() if (self._batch_size is not None and batch.batch_size < self._batch_size and not self.dataset.hparams.allow_smaller_final_batch): raise StopIteration batch = move_memory(batch, self.device) return batch class _CacheDataLoaderIter(torch_DataLoaderIter): dataset: DatasetBase worker_queue_idx: int def __init__(self, loader: 'SingleDatasetIterator'): self._indices_dict: Dict[int, List[int]] = {} self._batch_size = loader.batch_size self.device = loader.device super().__init__(loader) def _put_indices(self): assert self.batches_outstanding < 2 * self.num_workers indices = next(self.sample_iter, None) if indices is None: return self.index_queues[self.worker_queue_idx].put( (self.send_idx, indices)) if self.dataset._should_yield_raw_example: indices = [index[0] for index in indices] self._indices_dict[self.send_idx] = indices self.worker_queue_idx = ((self.worker_queue_idx + 1) % self.num_workers) self.batches_outstanding += 1 self.send_idx += 1 def _process_next_batch(self, batch): batch = super()._process_next_batch(batch) indices = self._indices_dict[self.rcvd_idx - 1] del self._indices_dict[self.rcvd_idx - 1] examples, batch = batch self.dataset._add_cached_examples(indices, examples) return batch def __next__(self): if self.num_workers == 0: indices = next(self.sample_iter) batch = self.collate_fn([self.dataset[i] for i in indices]) if self.dataset._should_yield_raw_example: indices = [index[0] for index in indices] examples, batch = batch self.dataset._add_cached_examples(indices, examples) if self.pin_memory: batch = _pin_memory(batch) else: batch = super().__next__() if (self._batch_size is not None and batch.batch_size < self.dataset.batch_size and not self.dataset.hparams.allow_smaller_final_batch): raise StopIteration batch = move_memory(batch, self.device) return batch class SingleDatasetIterator(DataLoader): dataset: DatasetBase def __init__(self, dataset: DatasetBase, batching_strategy: Optional[BatchingStrategy] = None, pin_memory: Optional[bool] = None): shuffle = dataset.hparams.shuffle shuffle_buffer_size = dataset.hparams.shuffle_buffer_size sampler: SamplerBase if shuffle and shuffle_buffer_size is not None: sampler = BufferShuffleSampler(dataset, shuffle_buffer_size) elif shuffle: sampler = RandomSampler(dataset) else: sampler = SequentialSampler(dataset) num_workers = dataset.hparams.num_parallel_calls collate_fn = dataset._collate_and_maybe_return is_cuda = dataset.device is not None and dataset.device.type == "cuda" if pin_memory is None: pin_memory = is_cuda self.device = None if pin_memory and is_cuda: self.device = dataset.device if batching_strategy is not None: batch_sampler = DynamicBatchSampler( dataset, sampler, batching_strategy) super().__init__( dataset, batch_sampler=batch_sampler, collate_fn=collate_fn, num_workers=num_workers, pin_memory=pin_memory) else: super().__init__( dataset, batch_size=dataset.batch_size, drop_last=False, sampler=sampler, collate_fn=collate_fn, num_workers=num_workers, pin_memory=pin_memory) def __iter__(self): if self.dataset._should_return_processed_examples: return _CacheDataLoaderIter(self) else: return _DataLoaderIter(self) def __len__(self): if self.batch_size is None: raise TypeError("__len__ not supported for dynamic batching") data_length = len(self.dataset) if self.dataset.hparams.allow_smaller_final_batch: return ceildiv(data_length, self.batch_size) return data_length // self.batch_size class DataIterator: def __init__(self, datasets: DatasetsType, batching_strategy: Optional[BatchingStrategy] = None, pin_memory: Optional[bool] = None): self._default_dataset_name = 'data' if isinstance(datasets, DatasetBase): datasets = {self._default_dataset_name: datasets} elif isinstance(datasets, Sequence): if any(not isinstance(d, DatasetBase) for d in datasets): raise ValueError("`datasets` must be an non-empty list of " "`texar.torch.data.DatasetBase` instances.") num_datasets = len(datasets) datasets = {d.name: d for d in datasets} if len(datasets) < num_datasets: raise ValueError("Names of datasets must be unique.") _datasets = { name: SingleDatasetIterator(dataset, batching_strategy, pin_memory) for name, dataset in datasets.items()} self._datasets = _datasets if len(self._datasets) <= 0: raise ValueError("`datasets` must not be empty.") self._current_dataset_name: Optional[str] = None @property def num_datasets(self) -> int: return len(self._datasets) @property
Apache License 2.0
coa-project/pycoa
coa/geo.py
GeoManager.get_GeoRegion
python
def get_GeoRegion(self): return self._gr
return the GeoRegion local instance
https://github.com/coa-project/pycoa/blob/b22577670b5a1ba5156f834506a31d4ffbca31d9/coa/geo.py#L72-L75
import inspect import warnings import pycountry as pc import pycountry_convert as pcc import pandas as pd import geopandas as gpd import shapely.geometry as sg import shapely.affinity as sa import shapely.ops as so import bs4 from coa.tools import verb,kwargs_test,get_local_from_url,dotdict,tostdstring from coa.error import * class GeoManager(): _list_standard=['iso2', 'iso3', 'name', 'num'] _list_db=[None,'jhu','worldometers','owid','opencovid19national','spfnational'] _list_output=['list','dict','pandas'] _standard = None def __init__(self,standard=_list_standard[0]): verb("Init of GeoManager() from "+str(inspect.stack()[1])) self.set_standard(standard) self._gr=GeoRegion()
MIT License
torchbox/wagtail-torchbox
tbx/core/templatetags/torchbox_tags.py
work_and_blog_listing
python
def work_and_blog_listing(context, count=10): blog_posts = BlogPage.objects.filter(live=True) works = WorkPage.objects.filter(live=True) blog_count = (count + 1) / 2 work_count = count / 2 blog_posts = blog_posts.order_by("-date")[:blog_count] works = works.order_by("-pk")[:work_count] return { "items": list(roundrobin(blog_posts, works)), "request": context["request"], }
An interleaved list of work and blog items.
https://github.com/torchbox/wagtail-torchbox/blob/6e94036537f84a8560f37f90fd769a07d410a012/tbx/core/templatetags/torchbox_tags.py#L155-L173
from django import template from django.conf import settings from tbx.blog.models import BlogPage from tbx.core.models import Advert, JobIndexPage, MainMenu from tbx.core.utils import roundrobin from tbx.people.models import PersonPage from tbx.work.models import WorkPage register = template.Library() @register.simple_tag def get_popular_tags(model): return model.get_popular_tags() @register.simple_tag def get_googe_maps_key(): return getattr(settings, "GOOGLE_MAPS_KEY", "") @register.simple_tag def get_next_sibling_by_order(page): sibling = page.get_next_siblings().live().first() if sibling: return sibling.specific @register.simple_tag def get_prev_sibling_by_order(page): sibling = page.get_prev_siblings().live().first() if sibling: return sibling.specific @register.simple_tag def get_next_sibling_blog(page): sibling = ( BlogPage.objects.filter(date__lt=page.date).order_by("-date").live().first() ) if sibling: return sibling.specific @register.simple_tag def get_prev_sibling_blog(page): sibling = ( BlogPage.objects.filter(date__gt=page.date).order_by("-date").live().last() ) if sibling: return sibling.specific @register.simple_tag(takes_context=True) def get_site_root(context): return context["request"].site.root_page @register.filter def content_type(value): return value.__class__.__name__.lower() @register.simple_tag def main_menu(): return MainMenu.objects.first() @register.inclusion_tag( "torchbox/tags/homepage_people_listing.html", takes_context=True ) def homepage_people_listing(context, count=3): people = PersonPage.objects.filter(live=True).order_by("?")[:count] return { "people": people, "request": context["request"], } @register.inclusion_tag("torchbox/tags/homepage_blog_listing.html", takes_context=True) def homepage_blog_listing(context, count=6): blog_posts = BlogPage.objects.live().in_menu().order_by("-date")[:count] return { "blog_posts": blog_posts, "request": context["request"], } @register.inclusion_tag("torchbox/tags/homepage_work_listing.html", takes_context=True) def homepage_work_listing(context, count=3): work = WorkPage.objects.filter(live=True)[:count] return { "work": work, "request": context["request"], } @register.inclusion_tag("torchbox/tags/homepage_job_listing.html", takes_context=True) def homepage_job_listing(context, count=3, intro_text=None): jobindex = JobIndexPage.objects.filter(live=True).first() if jobindex: jobs = jobindex.job.all() if count: jobs = jobs[:count] else: jobs = [] jobintro = intro_text or jobindex and jobindex.listing_intro return { "jobintro": jobintro, "jobindex": jobindex, "jobs": jobs, "request": context["request"], } @register.inclusion_tag("torchbox/tags/adverts.html", takes_context=True) def adverts(context): return { "adverts": Advert.objects.all(), "request": context["request"], } @register.inclusion_tag("torchbox/tags/person_blog_listing.html", takes_context=True) def person_blog_post_listing(context, calling_page=None): posts = ( BlogPage.objects.filter(authors__author__person_page_id=calling_page.id) .live() .order_by("-date") ) return { "posts": posts, "calling_page": calling_page, "request": context["request"], } @register.inclusion_tag("torchbox/tags/work_and_blog_listing.html", takes_context=True)
MIT License
yxyang/locomotion_simulation
locomotion/agents/whole_body_controller/locomotion_controller.py
LocomotionController.__init__
python
def __init__( self, robot: Any, gait_generator, state_estimator, swing_leg_controller, stance_leg_controller, clock, ): self._robot = robot self._clock = clock self._reset_time = self._clock() self._time_since_reset = 0 self._gait_generator = gait_generator self._state_estimator = state_estimator self._swing_leg_controller = swing_leg_controller self._stance_leg_controller = stance_leg_controller
Initializes the class. Args: robot: A robot instance. gait_generator: Generates the leg swing/stance pattern. state_estimator: Estimates the state of the robot (e.g. center of mass position or velocity that may not be observable from sensors). swing_leg_controller: Generates motor actions for swing legs. stance_leg_controller: Generates motor actions for stance legs. clock: A real or fake clock source.
https://github.com/yxyang/locomotion_simulation/blob/d2af9d7dbee93130541d6415b222fa672dc5913f/locomotion/agents/whole_body_controller/locomotion_controller.py#L18-L45
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import Any import numpy as np class LocomotionController(object):
MIT License
rucio/rucio
lib/rucio/db/sqla/migrate_repo/versions/c129ccdb2d5_add_lumiblocknr_to_dids.py
downgrade
python
def downgrade(): if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']: schema = context.get_context().version_table_schema if context.get_context().version_table_schema else '' drop_column('dids', 'lumiblocknr', schema=schema)
Downgrade the database to the previous revision
https://github.com/rucio/rucio/blob/6a6092798bb8220dec07328d0e3f7f42d1b931cd/lib/rucio/db/sqla/migrate_repo/versions/c129ccdb2d5_add_lumiblocknr_to_dids.py#L42-L49
import sqlalchemy as sa from alembic import context from alembic.op import add_column, drop_column revision = 'c129ccdb2d5' down_revision = '156fb5b5a14' def upgrade(): if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']: schema = context.get_context().version_table_schema if context.get_context().version_table_schema else '' add_column('dids', sa.Column('lumiblocknr', sa.Integer()), schema=schema)
Apache License 2.0
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/source_channel.py
SourceChannel.type
python
def type(self, type_): if type_ is not None: if not isinstance(type_, SourceChannelType): raise TypeError("Invalid type for `type`, type has to be `SourceChannelType`") self._type = type_
Sets the type of this SourceChannel. :param type_: The type of this SourceChannel. :type: SourceChannelType
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/source_channel.py#L91-L104
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.source_channel_type import SourceChannelType import pprint import six class SourceChannel(object): @poscheck_model def __init__(self, gain=None, type_=None, channel_number=None): self._gain = None self._type = None self._channel_number = None self.discriminator = None if gain is not None: self.gain = gain if type_ is not None: self.type = type_ if channel_number is not None: self.channel_number = channel_number @property def openapi_types(self): types = { 'gain': 'float', 'type': 'SourceChannelType', 'channel_number': 'int' } return types @property def attribute_map(self): attributes = { 'gain': 'gain', 'type': 'type', 'channel_number': 'channelNumber' } return attributes @property def gain(self): return self._gain @gain.setter def gain(self, gain): if gain is not None: if not isinstance(gain, (float, int)): raise TypeError("Invalid type for `gain`, type has to be `float`") self._gain = gain @property def type(self): return self._type @type.setter
MIT License
quantopian/zipline
zipline/assets/asset_writer.py
AssetDBWriter.write_direct
python
def write_direct(self, equities=None, equity_symbol_mappings=None, equity_supplementary_mappings=None, futures=None, exchanges=None, root_symbols=None, chunk_size=DEFAULT_CHUNK_SIZE): if equities is not None: equities = _generate_output_dataframe( equities, _direct_equities_defaults, ) if equity_symbol_mappings is None: raise ValueError( 'equities provided with no symbol mapping data', ) equity_symbol_mappings = _generate_output_dataframe( equity_symbol_mappings, _equity_symbol_mappings_defaults, ) _check_symbol_mappings( equity_symbol_mappings, exchanges, equities['exchange'], ) if equity_supplementary_mappings is not None: equity_supplementary_mappings = _generate_output_dataframe( equity_supplementary_mappings, _equity_supplementary_mappings_defaults, ) if futures is not None: futures = _generate_output_dataframe(_futures_defaults, futures) if exchanges is not None: exchanges = _generate_output_dataframe( exchanges.set_index('exchange'), _exchanges_defaults, ) if root_symbols is not None: root_symbols = _generate_output_dataframe( root_symbols, _root_symbols_defaults, ) _normalize_index_columns_in_place( equities=equities, equity_supplementary_mappings=equity_supplementary_mappings, futures=futures, exchanges=exchanges, root_symbols=root_symbols, ) self._real_write( equities=equities, equity_symbol_mappings=equity_symbol_mappings, equity_supplementary_mappings=equity_supplementary_mappings, futures=futures, exchanges=exchanges, root_symbols=root_symbols, chunk_size=chunk_size, )
Write asset metadata to a sqlite database in the format that it is stored in the assets db. Parameters ---------- equities : pd.DataFrame, optional The equity metadata. The columns for this dataframe are: symbol : str The ticker symbol for this equity. asset_name : str The full name for this asset. start_date : datetime The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. auto_close_date : datetime, optional The date on which to close any positions in this asset. exchange : str The exchange where this asset is traded. The index of this dataframe should contain the sids. futures : pd.DataFrame, optional The future contract metadata. The columns for this dataframe are: symbol : str The ticker symbol for this futures contract. root_symbol : str The root symbol, or the symbol with the expiration stripped out. asset_name : str The full name for this asset. start_date : datetime, optional The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. exchange : str The exchange where this asset is traded. notice_date : datetime The date when the owner of the contract may be forced to take physical delivery of the contract's asset. expiration_date : datetime The date when the contract expires. auto_close_date : datetime The date when the broker will automatically close any positions in this contract. tick_size : float The minimum price movement of the contract. multiplier: float The amount of the underlying asset represented by this contract. exchanges : pd.DataFrame, optional The exchanges where assets can be traded. The columns of this dataframe are: exchange : str The full name of the exchange. canonical_name : str The canonical name of the exchange. country_code : str The ISO 3166 alpha-2 country code of the exchange. root_symbols : pd.DataFrame, optional The root symbols for the futures contracts. The columns for this dataframe are: root_symbol : str The root symbol name. root_symbol_id : int The unique id for this root symbol. sector : string, optional The sector of this root symbol. description : string, optional A short description of this root symbol. exchange : str The exchange where this root symbol is traded. equity_supplementary_mappings : pd.DataFrame, optional Additional mappings from values of abitrary type to assets. chunk_size : int, optional The amount of rows to write to the SQLite table at once. This defaults to the default number of bind params in sqlite. If you have compiled sqlite3 with more bind or less params you may want to pass that value here.
https://github.com/quantopian/zipline/blob/014f1fc339dc8b7671d29be2d85ce57d3daec343/zipline/assets/asset_writer.py#L513-L667
from collections import namedtuple import re import numpy as np import pandas as pd import sqlalchemy as sa from toolz import first from zipline.errors import AssetDBVersionError from zipline.assets.asset_db_schema import ( ASSET_DB_VERSION, asset_db_table_names, asset_router, equities as equities_table, equity_symbol_mappings, equity_supplementary_mappings as equity_supplementary_mappings_table, futures_contracts as futures_contracts_table, exchanges as exchanges_table, futures_root_symbols, metadata, version_info, ) from zipline.utils.compat import ExitStack from zipline.utils.preprocess import preprocess from zipline.utils.range import from_tuple, intersecting_ranges from zipline.utils.sqlite_utils import coerce_string_to_eng AssetData = namedtuple( 'AssetData', ( 'equities', 'equities_mappings', 'futures', 'exchanges', 'root_symbols', 'equity_supplementary_mappings', ), ) SQLITE_MAX_VARIABLE_NUMBER = 999 symbol_columns = frozenset({ 'symbol', 'company_symbol', 'share_class_symbol', }) mapping_columns = symbol_columns | {'start_date', 'end_date'} _index_columns = { 'equities': 'sid', 'equity_supplementary_mappings': 'sid', 'futures': 'sid', 'exchanges': 'exchange', 'root_symbols': 'root_symbol', } def _normalize_index_columns_in_place(equities, equity_supplementary_mappings, futures, exchanges, root_symbols): for frame, column_name in ((equities, 'sid'), (equity_supplementary_mappings, 'sid'), (futures, 'sid'), (exchanges, 'exchange'), (root_symbols, 'root_symbol')): if frame is not None and column_name in frame: frame.set_index(column_name, inplace=True) def _default_none(df, column): return None def _no_default(df, column): if not df.empty: raise ValueError('no default value for column %r' % column) _equities_defaults = { 'symbol': _default_none, 'asset_name': _default_none, 'start_date': lambda df, col: 0, 'end_date': lambda df, col: np.iinfo(np.int64).max, 'first_traded': _default_none, 'auto_close_date': _default_none, 'exchange': _no_default, } _direct_equities_defaults = _equities_defaults.copy() del _direct_equities_defaults['symbol'] _futures_defaults = { 'symbol': _default_none, 'root_symbol': _default_none, 'asset_name': _default_none, 'start_date': lambda df, col: 0, 'end_date': lambda df, col: np.iinfo(np.int64).max, 'first_traded': _default_none, 'exchange': _default_none, 'notice_date': _default_none, 'expiration_date': _default_none, 'auto_close_date': _default_none, 'tick_size': _default_none, 'multiplier': lambda df, col: 1, } _exchanges_defaults = { 'canonical_name': lambda df, col: df.index, 'country_code': lambda df, col: '??', } _root_symbols_defaults = { 'sector': _default_none, 'description': _default_none, 'exchange': _default_none, } _equity_supplementary_mappings_defaults = { 'value': _default_none, 'field': _default_none, 'start_date': lambda df, col: 0, 'end_date': lambda df, col: np.iinfo(np.int64).max, } _equity_symbol_mappings_defaults = { 'sid': _no_default, 'company_symbol': _default_none, 'share_class_symbol': _default_none, 'symbol': _default_none, 'start_date': lambda df, col: 0, 'end_date': lambda df, col: np.iinfo(np.int64).max, } _delimited_symbol_delimiters_regex = re.compile(r'[./\-_]') _delimited_symbol_default_triggers = frozenset({np.nan, None, ''}) def split_delimited_symbol(symbol): if symbol in _delimited_symbol_default_triggers: return '', '' symbol = symbol.upper() split_list = re.split( pattern=_delimited_symbol_delimiters_regex, string=symbol, maxsplit=1, ) company_symbol = split_list[0] if len(split_list) > 1: share_class_symbol = split_list[1] else: share_class_symbol = '' return company_symbol, share_class_symbol def _generate_output_dataframe(data_subset, defaults): cols = set(data_subset.columns) desired_cols = set(defaults) data_subset.drop(cols - desired_cols, axis=1, inplace=True) for col in desired_cols - cols: data_subset[col] = defaults[col](data_subset, col) return data_subset def _check_asset_group(group): row = group.sort_values('end_date').iloc[-1] row.start_date = group.start_date.min() row.end_date = group.end_date.max() row.drop(list(symbol_columns), inplace=True) return row def _format_range(r): return ( str(pd.Timestamp(r.start, unit='ns')), str(pd.Timestamp(r.stop, unit='ns')), ) def _check_symbol_mappings(df, exchanges, asset_exchange): mappings = df.set_index('sid')[list(mapping_columns)].copy() mappings['country_code'] = exchanges['country_code'][ asset_exchange.loc[df['sid']] ].values ambigious = {} def check_intersections(persymbol): intersections = list(intersecting_ranges(map( from_tuple, zip(persymbol.start_date, persymbol.end_date), ))) if intersections: data = persymbol[ ['start_date', 'end_date'] ].astype('datetime64[ns]') msg_component = '\n '.join(str(data).splitlines()) ambigious[persymbol.name] = intersections, msg_component mappings.groupby(['symbol', 'country_code']).apply(check_intersections) if ambigious: raise ValueError( 'Ambiguous ownership for %d symbol%s, multiple assets held the' ' following symbols:\n%s' % ( len(ambigious), '' if len(ambigious) == 1 else 's', '\n'.join( '%s (%s):\n intersections: %s\n %s' % ( symbol, country_code, tuple(map(_format_range, intersections)), cs, ) for (symbol, country_code), (intersections, cs) in sorted( ambigious.items(), key=first, ) ), ) ) def _split_symbol_mappings(df, exchanges): mappings = df[list(mapping_columns)] with pd.option_context('mode.chained_assignment', None): mappings['sid'] = mappings.index mappings.reset_index(drop=True, inplace=True) asset_exchange = df[ ['exchange', 'end_date'] ].sort_values('end_date').groupby(level=0)['exchange'].nth(-1) _check_symbol_mappings(mappings, exchanges, asset_exchange) return ( df.groupby(level=0).apply(_check_asset_group), mappings, ) def _dt_to_epoch_ns(dt_series): index = pd.to_datetime(dt_series.values) if index.tzinfo is None: index = index.tz_localize('UTC') else: index = index.tz_convert('UTC') return index.view(np.int64) def check_version_info(conn, version_table, expected_version): version_from_table = conn.execute( sa.select((version_table.c.version,)), ).scalar() if version_from_table is None: version_from_table = 0 if (version_from_table != expected_version): raise AssetDBVersionError(db_version=version_from_table, expected_version=expected_version) def write_version_info(conn, version_table, version_value): conn.execute(sa.insert(version_table, values={'version': version_value})) class _empty(object): columns = () class AssetDBWriter(object): DEFAULT_CHUNK_SIZE = SQLITE_MAX_VARIABLE_NUMBER @preprocess(engine=coerce_string_to_eng(require_exists=False)) def __init__(self, engine): self.engine = engine def _real_write(self, equities, equity_symbol_mappings, equity_supplementary_mappings, futures, exchanges, root_symbols, chunk_size): with self.engine.begin() as conn: self.init_db(conn) if exchanges is not None: self._write_df_to_table( exchanges_table, exchanges, conn, chunk_size, ) if root_symbols is not None: self._write_df_to_table( futures_root_symbols, root_symbols, conn, chunk_size, ) if equity_supplementary_mappings is not None: self._write_df_to_table( equity_supplementary_mappings_table, equity_supplementary_mappings, conn, chunk_size, ) if futures is not None: self._write_assets( 'future', futures, conn, chunk_size, ) if equities is not None: self._write_assets( 'equity', equities, conn, chunk_size, mapping_data=equity_symbol_mappings, )
Apache License 2.0
rapidsai/cuml
python/cuml/thirdparty_adapters/sparsefuncs_fast.py
_csc_mean_variance_axis0
python
def _csc_mean_variance_axis0(X): n_samples, n_features = X.shape means = cp.empty(n_features) variances = cp.empty(n_features) counts_nan = cp.empty(n_features) start = X.indptr[0] for i, end in enumerate(X.indptr[1:]): col = X.data[start:end] _count_zeros = n_samples - col.size _count_nans = (col != col).sum() _mean = cp.nansum(col) / (n_samples - _count_nans) _variance = cp.nansum((col - _mean) ** 2) _variance += _count_zeros * (_mean ** 2) _variance /= (n_samples - _count_nans) means[i] = _mean variances[i] = _variance counts_nan[i] = _count_nans start = end return means, variances, counts_nan
Compute mean, variance and nans count on the axis 0 of a CSC matrix Parameters ---------- X : sparse CSC matrix Input array Returns ------- mean, variance, nans count
https://github.com/rapidsai/cuml/blob/91abe6747ea61a5b59526f76568ea14d52814454/python/cuml/thirdparty_adapters/sparsefuncs_fast.py#L56-L91
import cupy as cp from numba import cuda from math import ceil def csr_mean_variance_axis0(X): X = X.tocsc() means, variances, _ = _csc_mean_variance_axis0(X) return means, variances def csc_mean_variance_axis0(X): means, variances, _ = _csc_mean_variance_axis0(X) return means, variances
Apache License 2.0
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1beta1_cluster_role_list.py
V1beta1ClusterRoleList.kind
python
def kind(self): return self._kind
Gets the kind of this V1beta1ClusterRoleList. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1beta1ClusterRoleList. # noqa: E501 :rtype: str
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1beta1_cluster_role_list.py#L118-L126
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1beta1ClusterRoleList(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'items': 'list[V1beta1ClusterRole]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def items(self): return self._items @items.setter def items(self, items): if self.local_vars_configuration.client_side_validation and items is None: raise ValueError("Invalid value for `items`, must not be `None`") self._items = items @property
Apache License 2.0
phuijse/p4j
P4J/generator.py
synthetic_light_curve_generator.__init__
python
def __init__(self, T, N, rseed=None): self.rseed = rseed self.t = irregular_sampling(T, N, rseed) self.A = 1.0
Class for simple synthetic light curve generation Light curves are time series of stellar magnitude or flux as a function of time. In the following we consider Earth-based surveys. Light curves are irregularly sampled because the measurements are not taken at the same time every night due to observation constraints. Light curves may have data gaps, i.e. periods of time where no observations were registered. The noise present in the light curves comes from the sky background, the Earth atmosphere, telescope systematics, etc. Parameters ---------- T: float Time span of the vector, i.e. how long it is in time N: positive integer Number of samples of the resulting time vector rseed: Random seed to feed the random number generator
https://github.com/phuijse/p4j/blob/0e7730d62f2d1d347f66bc6f8d0b8bbfe6d2f3c4/P4J/generator.py#L6-L31
from __future__ import division, print_function import numpy as np from scipy.stats import gamma, exponnorm class synthetic_light_curve_generator:
MIT License
getnikola/nikola
nikola/utils.py
copy_tree
python
def copy_tree(src, dst, link_cutoff=None, ignored_filenames=None): ignore = set(['.svn', '.git']) | (ignored_filenames or set()) base_len = len(src.split(os.sep)) for root, dirs, files in os.walk(src, followlinks=True): root_parts = root.split(os.sep) if set(root_parts) & ignore: continue dst_dir = os.path.join(dst, *root_parts[base_len:]) makedirs(dst_dir) for src_name in files: if src_name in ('.DS_Store', 'Thumbs.db'): continue dst_file = os.path.join(dst_dir, src_name) src_file = os.path.join(root, src_name) yield { 'name': dst_file, 'file_dep': [src_file], 'targets': [dst_file], 'actions': [(copy_file, (src_file, dst_file, link_cutoff))], 'clean': True, }
Copy a src tree to the dst folder. Example: src = "themes/default/assets" dst = "output/assets" should copy "themes/defauts/assets/foo/bar" to "output/assets/foo/bar" If link_cutoff is set, then the links pointing at things *inside* that folder will stay as links, and links pointing *outside* that folder will be copied. ignored_filenames is a set of file names that will be ignored.
https://github.com/getnikola/nikola/blob/334000e049c42fff52170563e94592d0c886acc4/nikola/utils.py#L746-L782
import configparser import datetime import hashlib import io import lxml.html import operator import os import re import json import shutil import socket import subprocess import sys import threading import typing from collections import defaultdict, OrderedDict from collections.abc import Callable, Iterable from html import unescape as html_unescape from importlib import reload as _reload from unicodedata import normalize as unicodenormalize from urllib.parse import quote as urlquote from urllib.parse import unquote as urlunquote from urllib.parse import urlparse, urlunparse from zipfile import ZipFile as zipf import babel.dates import dateutil.parser import dateutil.tz import pygments.formatters import pygments.formatters._mapping import PyRSS2Gen as rss from blinker import signal from doit import tools from doit.cmdparse import CmdParse from pkg_resources import resource_filename from nikola.packages.pygments_better_html import BetterHtmlFormatter from unidecode import unidecode from nikola import DEBUG from .log import LOGGER, get_logger from .hierarchy_utils import TreeNode, clone_treenode, flatten_tree_structure, sort_classifications from .hierarchy_utils import join_hierarchical_category_path, parse_escaped_hierarchical_category_name try: import toml except ImportError: toml = None try: from ruamel.yaml import YAML except ImportError: YAML = None try: import hsluv except ImportError: hsluv = None __all__ = ('CustomEncoder', 'get_theme_path', 'get_theme_path_real', 'get_theme_chain', 'load_messages', 'copy_tree', 'copy_file', 'slugify', 'unslugify', 'to_datetime', 'apply_filters', 'config_changed', 'get_crumbs', 'get_tzname', 'get_asset_path', '_reload', 'Functionary', 'TranslatableSetting', 'TemplateHookRegistry', 'LocaleBorg', 'sys_encode', 'sys_decode', 'makedirs', 'get_parent_theme_name', 'demote_headers', 'get_translation_candidate', 'write_metadata', 'ask', 'ask_yesno', 'options2docstring', 'os_path_split', 'get_displayed_page_number', 'adjust_name_for_index_path_list', 'adjust_name_for_index_path', 'adjust_name_for_index_link', 'NikolaPygmentsHTML', 'create_redirect', 'clean_before_deployment', 'sort_posts', 'smartjoin', 'indent', 'load_data', 'html_unescape', 'rss_writer', 'map_metadata', 'req_missing', 'bool_from_meta', 'TreeNode', 'clone_treenode', 'flatten_tree_structure', 'sort_classifications', 'join_hierarchical_category_path', 'parse_escaped_hierarchical_category_name',) bytes_str = bytes unicode_str = str unichr = chr STDERR_HANDLER = None USE_SLUGIFY = True def req_missing(names, purpose, python=True, optional=False): if not (isinstance(names, tuple) or isinstance(names, list) or isinstance(names, set)): names = (names,) if not names: return False if python: whatarethey_s = 'Python package' whatarethey_p = 'Python packages' else: whatarethey_s = whatarethey_p = 'software' if len(names) == 1: msg = 'In order to {0}, you must install the "{1}" {2}.'.format( purpose, names[0], whatarethey_s) else: most = '", "'.join(names[:-1]) pnames = most + '" and "' + names[-1] msg = 'In order to {0}, you must install the "{1}" {2}.'.format( purpose, pnames, whatarethey_p) if optional: LOGGER.warning(msg) else: LOGGER.error(msg) LOGGER.error('Exiting due to missing dependencies.') sys.exit(5) return msg ENCODING = sys.getfilesystemencoding() or sys.stdin.encoding def sys_encode(thing): if isinstance(thing, str): return thing.encode(ENCODING) return thing def sys_decode(thing): if isinstance(thing, bytes): return thing.decode(ENCODING) return thing def makedirs(path): if not path: return if os.path.exists(path): if not os.path.isdir(path): raise OSError('Path {0} already exists and is not a folder.'.format(path)) else: return try: os.makedirs(path) return except Exception: if os.path.isdir(path): return raise class Functionary(defaultdict): def __init__(self, default, default_lang): super().__init__(default) self.default_lang = default_lang def __call__(self, key, lang=None): if lang is None: lang = LocaleBorg().current_lang return self[lang][key] class TranslatableSetting(object): lang = None default_lang = 'en' def __getattribute__(self, attr): try: return super().__getattribute__(attr) except AttributeError: return self().__getattribute__(attr) def __dir__(self): return list(set(self.__dict__).union(set(dir(str)))) def __init__(self, name, inp, translations): self.name = name self._inp = inp self.translations = translations self.overriden_default = False self.values = defaultdict() if isinstance(inp, dict) and inp: self.translated = True self.values.update(inp) if self.default_lang not in self.values.keys(): self.default_lang = list(self.values.keys())[0] self.overridden_default = True self.values.default_factory = lambda: self.values[self.default_lang] for k in translations.keys(): if k not in self.values.keys(): self.values[k] = inp[self.default_lang] else: self.translated = False self.values[self.default_lang] = inp self.values.default_factory = lambda: inp def get_lang(self): if self.lang: return self.lang elif not self.translated: return self.default_lang else: try: return LocaleBorg().current_lang except AttributeError: return self.default_lang def __call__(self, lang=None): if lang is None: return self.values[self.get_lang()] else: return self.values[lang] def __str__(self): return str(self.values[self.get_lang()]) def __repr__(self): return '<TranslatableSetting: {0!r} = {1!r}>'.format(self.name, self._inp) def format(self, *args, **kwargs): for l in self.values: self.values[l] = self.values[l].format(*args, **kwargs) self.values.default_factory = lambda: self.values[self.default_lang] return self def langformat(self, formats): if not formats: return self else: keys = list(formats) if self.default_lang in keys: d = formats[self.default_lang] else: d = formats[keys[0]] langkeys = [] for f in formats.values(): for a in f[0] + tuple(f[1].values()): if isinstance(a, dict): langkeys += list(a) allvalues = set(keys + langkeys + list(self.values)) self.values['__orig__'] = self.values[self.default_lang] for l in allvalues: if l in keys: oargs, okwargs = formats[l] else: oargs, okwargs = d args = [] kwargs = {} for a in oargs: if isinstance(a, dict): a = TranslatableSetting('NULL', a, self.translations) args.append(a(l)) else: args.append(a) for k, v in okwargs.items(): if isinstance(v, dict): v = TranslatableSetting('NULL', v, self.translations) kwargs.update({k: v(l)}) else: kwargs.update({k: v}) if l in self.values: self.values[l] = self.values[l].format(*args, **kwargs) else: self.values[l] = self.values['__orig__'].format(*args, **kwargs) self.values.default_factory = lambda: self.values[self.default_lang] return self def __getitem__(self, key): return self.values[key] def __setitem__(self, key, value): self.values[key] = value def __eq__(self, other): try: return self.values == other.values except AttributeError: return self(self.default_lang) == other def __ne__(self, other): try: return self.values != other.values except AttributeError: return self(self.default_lang) != other class TemplateHookRegistry(object): def __init__(self, name, site): self._items = [] self.name = name self.site = site self.context = None def generate(self): for c, inp, site, args, kwargs in self._items: if c: if site: kwargs['site'] = self.site kwargs['context'] = self.context yield inp(*args, **kwargs) else: yield inp def __call__(self): return '\n'.join(self.generate()) def append(self, inp, wants_site_and_context=False, *args, **kwargs): c = callable(inp) self._items.append((c, inp, wants_site_and_context, args, kwargs)) def calculate_deps(self): deps = [] for is_callable, inp, wants_site_and_context, args, kwargs in self._items: if not is_callable: name = inp elif hasattr(inp, 'template_registry_identifier'): name = inp.template_registry_identifier elif hasattr(inp, '__doc__'): name = inp.__doc__ else: name = '_undefined_callable_' deps.append((is_callable, name, wants_site_and_context, args, kwargs)) def __hash__(self): return hash(config_changed({self.name: self.calculate_deps()})._calc_digest()) def __str__(self): return '<TemplateHookRegistry: {0}>'.format(self._items) def __repr__(self): return '<TemplateHookRegistry: {0}>'.format(self.name) class CustomEncoder(json.JSONEncoder): def default(self, obj): try: return super().default(obj) except TypeError: if isinstance(obj, (set, frozenset)): return self.encode(sorted(list(obj))) elif isinstance(obj, TranslatableSetting): s = json.dumps(obj._inp, cls=CustomEncoder, sort_keys=True) else: s = repr(obj).split('0x', 1)[0] return s class config_changed(tools.config_changed): def __init__(self, config, identifier=None): super().__init__(config) self.identifier = '_config_changed' if identifier is not None: self.identifier += ':' + identifier @classmethod def _write_into_debug_db(cls, digest: str, data: str) -> None: import sqlite3 try: cls.debug_db_cursor except AttributeError: cls.debug_db_conn = sqlite3.connect("cc_debug.sqlite3") cls.debug_db_id = datetime.datetime.now().isoformat() cls.debug_db_cursor = cls.debug_db_conn.cursor() cls.debug_db_cursor.execute(""" CREATE TABLE IF NOT EXISTS hashes (hash CHARACTER(32) PRIMARY KEY, json_data TEXT); """) cls.debug_db_conn.commit() try: cls.debug_db_cursor.execute("INSERT INTO hashes (hash, json_data) VALUES (?, ?);", (digest, data)) cls.debug_db_conn.commit() except sqlite3.IntegrityError: cls.debug_db_conn.rollback() def _calc_digest(self): if isinstance(self.config, str): return self.config elif isinstance(self.config, dict): data = json.dumps(self.config, cls=CustomEncoder, sort_keys=True) if isinstance(data, str): byte_data = data.encode("utf-8") else: byte_data = data digest = hashlib.md5(byte_data).hexdigest() return digest else: raise Exception('Invalid type of config_changed parameter -- got ' '{0}, must be string or dict'.format(type( self.config))) def configure_task(self, task): task.value_savers.append(lambda: {self.identifier: self._calc_digest()}) def __call__(self, task, values): last_success = values.get(self.identifier) if last_success is None: return False return (last_success == self._calc_digest()) def __repr__(self): return "Change with config: {0}".format(json.dumps(self.config, cls=CustomEncoder, sort_keys=True)) def get_theme_path_real(theme, themes_dirs): for themes_dir in themes_dirs: dir_name = os.path.join(themes_dir, theme) if os.path.isdir(dir_name): return dir_name dir_name = resource_filename('nikola', os.path.join('data', 'themes', theme)) if os.path.isdir(dir_name): return dir_name raise Exception("Can't find theme '{0}'".format(theme)) def get_theme_path(theme): return theme def parse_theme_meta(theme_dir): cp = configparser.ConfigParser() theme_name = os.path.basename(theme_dir) or os.path.basename(os.path.dirname(theme_dir)) theme_meta_path = os.path.join(theme_dir, theme_name + '.theme') cp.read(theme_meta_path) return cp if cp.has_section('Theme') else None def get_template_engine(themes): for theme_name in themes: meta = parse_theme_meta(theme_name) if meta: e = meta.get('Theme', 'engine', fallback=None) if e: return e else: engine_path = os.path.join(theme_name, 'engine') if os.path.isfile(engine_path): with open(engine_path) as fd: return fd.readlines()[0].strip() return 'mako' def get_parent_theme_name(theme_name, themes_dirs=None): meta = parse_theme_meta(theme_name) if meta: parent = meta.get('Theme', 'parent', fallback=None) if themes_dirs and parent: return get_theme_path_real(parent, themes_dirs) return parent else: parent_path = os.path.join(theme_name, 'parent') if os.path.isfile(parent_path): with open(parent_path) as fd: parent = fd.readlines()[0].strip() if themes_dirs: return get_theme_path_real(parent, themes_dirs) return parent return None def get_theme_chain(theme, themes_dirs): themes = [get_theme_path_real(theme, themes_dirs)] while True: parent = get_parent_theme_name(themes[-1], themes_dirs=themes_dirs) if parent is None or parent in themes: break themes.append(parent) return themes def html_tostring_fragment(document): try: doc = lxml.html.tostring(document.body, encoding='unicode').strip() except Exception: doc = lxml.html.tostring(document, encoding='unicode').strip() start_fragments = ["<html>", "<body>"] end_fragments = ["</body>", "</html>"] for start in start_fragments: if doc.startswith(start): doc = doc[len(start):].strip() print(repr(doc)) for end in end_fragments: if doc.endswith(end): doc = doc[:-len(end)].strip() print(repr(doc)) return doc INCOMPLETE_LANGUAGES_WARNED = set() class LanguageNotFoundError(Exception): def __init__(self, lang, orig): self.lang = lang self.orig = orig def __str__(self): return 'cannot find language {0}'.format(self.lang) def load_messages(themes, translations, default_lang, themes_dirs): messages = Functionary(dict, default_lang) oldpath = list(sys.path) found = {lang: False for lang in translations.keys()} last_exception = None completion_status = {lang: False for lang in translations.keys()} for theme_name in themes[::-1]: msg_folder = os.path.join(get_theme_path(theme_name), 'messages') default_folder = os.path.join(get_theme_path_real('base', themes_dirs), 'messages') sys.path.insert(0, default_folder) sys.path.insert(0, msg_folder) english = __import__('messages_en') _reload(english) for lang in translations.keys(): try: translation = __import__('messages_' + lang) _reload(translation) found[lang] = True if sorted(translation.MESSAGES.keys()) != sorted(english.MESSAGES.keys()): completion_status[lang] = completion_status[lang] or False else: completion_status[lang] = True messages[lang].update(english.MESSAGES) for k, v in translation.MESSAGES.items(): if v: messages[lang][k] = v del(translation) except ImportError as orig: last_exception = orig del(english) sys.path = oldpath if not all(found.values()): raise LanguageNotFoundError(lang, last_exception) for lang, status in completion_status.items(): if not status and lang not in INCOMPLETE_LANGUAGES_WARNED: LOGGER.warning("Incomplete translation for language '{0}'.".format(lang)) INCOMPLETE_LANGUAGES_WARNED.add(lang) return messages
MIT License
googlecloudplatform/perfkitbenchmarker
perfkitbenchmarker/linux_packages/sysbench.py
AptInstall
python
def AptInstall(vm): _Install(vm)
Installs the sysbench package on the VM.
https://github.com/googlecloudplatform/perfkitbenchmarker/blob/c14a122016d414351d41167029c79c9a19709384/perfkitbenchmarker/linux_packages/sysbench.py#L30-L32
def _Install(vm): vm.InstallPackages('sysbench') def YumInstall(vm): vm.InstallEpelRepo() _Install(vm)
Apache License 2.0
deepmind/xmanager
xmanager/docker/docker_adapter.py
DockerAdapter.run_container
python
def run_container( self, name: str, image_id: str, args: Sequence[str], env_vars: Mapping[str, str], network: str, ports: Ports, volumes: Dict[str, str], ) -> containers.Container: make_mount = lambda guest: {'bind': guest, 'mode': 'rw'} return self._client.containers.run( image_id, name=name, hostname=name, network=network, detach=True, remove=True, command=args, environment=env_vars, ports=ports, volumes={host: make_mount(guest) for host, guest in volumes.items()}, )
Runs a given container image.
https://github.com/deepmind/xmanager/blob/4963986b77228bed72afcb6ada7008a7eb3a1393/xmanager/docker/docker_adapter.py#L77-L100
import functools from typing import Dict, List, Mapping, Sequence, Tuple, Union from absl import logging import docker from docker import errors from docker.models import containers from docker.utils import utils Ports = Dict[Union[int, str], Union[None, int, Tuple[str, int], List[int]]] @functools.lru_cache() def instance() -> 'DockerAdapter': return DockerAdapter(docker.from_env()) class DockerAdapter(object): def __init__(self, client: docker.DockerClient) -> None: self._client = client def has_network(self, name: str) -> bool: return bool(self._client.networks.list([name])) def create_network(self, name: str) -> str: return self._client.networks.create(name).id def get_client(self) -> docker.DockerClient: return self._client def is_registry_label(self, label: str) -> bool: try: self._client.images.get_registry_data(label) return True except errors.NotFound: return False def split_tag(self, image_tag: str) -> Tuple[str, str]: repository, tag = utils.parse_repository_tag(image_tag) return repository, tag or 'latest' def pull_image(self, image_tag: str) -> str: repository, tag = self.split_tag(image_tag) return self._client.images.pull(repository, tag=tag).id def load_image(self, path: str) -> str: with open(path, 'rb') as data: images = self._client.images.load(data) if len(images) != 1: raise ValueError(f'{path} must contain precisely one image') return images[0].id
Apache License 2.0
terrapower/armi
armi/scripts/migration/m0_1_0_newDbFormat.py
_collectSymmetry
python
def _collectSymmetry(oldDB): geomPath = "/inputs/geomFile" if geomPath in oldDB: geom = systemLayoutInput.SystemLayoutInput() geom.readGeomFromStream(io.StringIO(oldDB["inputs/geomFile"][()])) return {"symmetry": geom.symmetry, "geomType": geom.geomType}
Read symmetry and geomType off old-style geometry input str in DB.
https://github.com/terrapower/armi/blob/b4fceeb5c3c7f2feeaa8c9ac05aa635e5f1a15a0/armi/scripts/migration/m0_1_0_newDbFormat.py#L173-L179
import os import io import h5py from armi import __version__ as version from armi import getApp, runLog, utils from armi.reactor import geometry from armi.reactor import systemLayoutInput from armi.scripts.migration.base import DatabaseMigration class ConvertDB2toDB3(DatabaseMigration): def __init__(self, stream=None, path=None): DatabaseMigration.__init__(self, stream=stream, path=path) if stream: raise ValueError("Can only migrate database by path.") def apply(self): _migrateDatabase(self.path, _preCollector, _visit, _postApplier) def _migrateDatabase(databasePath, preCollector, visitor, postApplier): if not os.path.exists(databasePath): raise OSError("Database file {} does not exist".format(databasePath)) runLog.info("Migrating database file: {}".format(databasePath)) runLog.info("Generating SHA-1 hash for original database: {}".format(databasePath)) shaHash = utils.getFileSHA1Hash(databasePath) runLog.info(" Database: {}\n" " SHA-1: {}".format(databasePath, shaHash)) _remoteFolder, remoteDbName = os.path.split(databasePath) root, ext = os.path.splitext(remoteDbName) newDBName = root + "_migrated" + ext runLog.info("Copying database from {} to {}".format(databasePath, newDBName)) with h5py.File(newDBName, "w") as newDB, h5py.File(databasePath, "r") as oldDB: preCollection = preCollector(oldDB) def closure(name, dataset): visitor(newDB, preCollection, name, dataset) oldDB.visititems(closure) for key, val in oldDB.attrs.items(): newDB.attrs[key] = val newDB.attrs["original-armi-version"] = oldDB.attrs["version"] newDB.attrs["original-db-hash"] = shaHash newDB.attrs["original-databaseVersion"] = oldDB.attrs["databaseVersion"] newDB.attrs["version"] = version postApplier(oldDB, newDB, preCollection) runLog.info("Successfully generated migrated database file: {}".format(newDBName)) def _visit(newDB, preCollection, name, dataset): updated = False path = name.split("/") if path[0] == "inputs": pass elif len(path) > 1 and path[1] == "layout": updated = _updateLayout(newDB, preCollection, name, dataset) elif len(path) == 3: updated = _updateParams(newDB, preCollection, name, dataset) if not updated: if isinstance(dataset, h5py.Group): msg = "Skipped" else: newDB.copy(dataset, dataset.name) msg = "Copied" else: msg = "Updated" runLog.important(f"{msg} Dataset {name}") def _preCollector(oldDB): preCollection = {} preCollection.update(_collectParamRenames()) preCollection.update(_collectSymmetry(oldDB)) return preCollection def _postApplier(oldDB, newDB, preCollection): pass def _updateLayout(newDB, preCollection, name, dataset): path = name.split("/") if len(path) == 4 and path[2] == "grids" and path[3] != "type": if "symmetry" not in dataset: newDB.create_dataset(f"{name}/symmetry", data=preCollection["symmetry"]) if "geomType" not in dataset: newDB.create_dataset(f"{name}/geomType", data=preCollection["geomType"]) gridGroup = newDB[f"{name}"] for key, val in dataset.attrs.items(): gridGroup.attrs[key] = val return True return False def _updateParams(newDB, preCollection, name, dataset): renames = preCollection["paramRenames"] updated = _applyRenames(newDB, renames, name, dataset) return updated def _collectParamRenames(): return {"paramRenames": getApp().getParamRenames()}
Apache License 2.0
beijbom/coralnet
project/vision_backend/tasks.py
_handle_job_result
python
def _handle_job_result(job_res: JobReturnMsg): task_name = job_res.original_job.task_name if not job_res.ok: logger.error("Job failed: {}".format(job_res.error_message)) if task_name == 'train_classifier': train_fail(job_res) elif task_name == 'classify_image': mail_admins("Spacer job failed", repr(job_res)) deploy_fail(job_res) else: mail_admins("Spacer job failed", repr(job_res)) return for task, res in zip(job_res.original_job.tasks, job_res.results): pk = th.decode_spacer_job_token(task.job_token)[0] if task_name == 'extract_features': if th.featurecollector(task, res): classify_image.apply_async(args=[pk], eta=now() + timedelta(seconds=10)) elif task_name == 'train_classifier': if th.classifiercollector(task, res): classifier = Classifier.objects.get(pk=pk) for image in Image.objects.filter(source=classifier.source, features__extracted=True, annoinfo__confirmed=False): classify_image.apply_async( args=[image.id], eta=now() + timedelta(seconds=10)) elif task_name == 'classify_image': th.deploycollector(task, res) else: logger.error('Job task type {} not recognized'.format(task_name)) logger.info("Collected job: {} with pk: {}".format(task_name, pk))
Handles the job results found in queue.
https://github.com/beijbom/coralnet/blob/1f47f666a783f5ed4bcb5057513a4ae76e3d2d8c/project/vision_backend/tasks.py#L285-L330
import logging from datetime import timedelta from celery.decorators import task, periodic_task from django.conf import settings from django.core.files.storage import get_storage_class from django.core.mail import mail_admins from django.db import IntegrityError from django.utils import timezone from django.utils.timezone import now from spacer.messages import ExtractFeaturesMsg, TrainClassifierMsg, ClassifyFeaturesMsg, ClassifyImageMsg, ClassifyReturnMsg, JobMsg, JobReturnMsg, DataLocation from spacer.tasks import classify_features as spacer_classify_features from accounts.utils import get_robot_user from annotations.models import Annotation from api_core.models import ApiJobUnit from images.models import Source, Image, Point from labels.models import Label from . import task_helpers as th from .models import Classifier, Score, BatchJob from .queues import get_queue_class logger = logging.getLogger(__name__) @task(name="Submit Features") def submit_features(image_id, force=False): try: img = Image.objects.get(pk=image_id) except Image.DoesNotExist: logger.info("Image {} does not exist.".format(image_id)) return log_str = "Image {} [Source: {} [{}]]".format(image_id, img.source, img.source_id) if img.features.extracted and not force: logger.info("{} already has features".format(log_str)) return storage = get_storage_class()() rowcols = [(p.row, p.column) for p in Point.objects.filter(image=img)] task = ExtractFeaturesMsg( job_token=th.encode_spacer_job_token([image_id]), feature_extractor_name=img.source.feature_extractor, rowcols=rowcols, image_loc=storage.spacer_data_loc(img.original_file.name), feature_loc=storage.spacer_data_loc( settings.FEATURE_VECTOR_FILE_PATTERN.format( full_image_path=img.original_file.name)) ) msg = JobMsg(task_name='extract_features', tasks=[task]) queue = get_queue_class()() queue.submit_job(msg) logger.info("Submitted feature extraction for {}".format(log_str)) return msg @periodic_task(run_every=timedelta(hours=24), name='Periodic Classifiers Submit', ignore_result=True) def submit_all_classifiers(): for source in Source.objects.filter(): if source.need_new_robot(): submit_classifier.delay(source.id) @task(name="Submit Classifier") def submit_classifier(source_id, nbr_images=1e5, force=False): try: source = Source.objects.get(pk=source_id) except Source.DoesNotExist: logger.info("Can't find source [{}]".format(source_id)) return if not source.need_new_robot() and not force: logger.info("Source {} [{}] don't need new classifier.".format( source.name, source.pk)) return images = Image.objects.filter(source=source, annoinfo__confirmed=True, features__extracted=True)[:nbr_images] classifier = Classifier(source=source, nbr_train_images=len(images)) classifier.save() logger.info("Preparing new classifier ({}) for {} [{}].".format( classifier.pk, source.name, source.pk)) storage = get_storage_class()() train_labels = th.make_dataset([image for image in images if image.trainset]) val_labels = th.make_dataset([image for image in images if image.valset]) prev_classifiers = source.get_accepted_robots() pc_pks = [pc.pk for pc in prev_classifiers] task = TrainClassifierMsg( job_token=th.encode_spacer_job_token([classifier.pk] + pc_pks), trainer_name='minibatch', nbr_epochs=settings.NBR_TRAINING_EPOCHS, clf_type=settings.CLASSIFIER_MAPPINGS[source.feature_extractor], train_labels=train_labels, val_labels=val_labels, features_loc=storage.spacer_data_loc(''), previous_model_locs=[storage.spacer_data_loc( settings.ROBOT_MODEL_FILE_PATTERN.format(pk=pc.pk)) for pc in prev_classifiers], model_loc=storage.spacer_data_loc( settings.ROBOT_MODEL_FILE_PATTERN.format(pk=classifier.pk)), valresult_loc=storage.spacer_data_loc( settings.ROBOT_MODEL_VALRESULT_PATTERN.format(pk=classifier.pk)) ) msg = JobMsg(task_name='train_classifier', tasks=[task]) queue = get_queue_class()() queue.submit_job(msg) logger.info("Submitted classifier {} for source {} [{}] with {} images.". format(classifier.pk, source.name, source.id, len(images))) return msg @task(name="Deploy") def deploy(job_unit_id): try: job_unit = ApiJobUnit.objects.get(pk=job_unit_id) except ApiJobUnit.DoesNotExist: logger.info("Job unit of id {} does not exist.".format(job_unit_id)) return job_unit.status = ApiJobUnit.IN_PROGRESS job_unit.save() classifier_id = job_unit.request_json['classifier_id'] try: classifier = Classifier.objects.get(pk=classifier_id) except Classifier.DoesNotExist: error_message = ( "Classifier of id {} does not exist. Maybe it was deleted." .format(classifier_id)) job_unit.result_json = dict( url=job_unit.request_json['url'], errors=[error_message], ) job_unit.status = ApiJobUnit.FAILURE job_unit.save() logger.error(error_message) return storage = get_storage_class()() task = ClassifyImageMsg( job_token=th.encode_spacer_job_token([job_unit_id]), image_loc=DataLocation( storage_type='url', key=job_unit.request_json['url'] ), feature_extractor_name=classifier.source.feature_extractor, rowcols=[(point['row'], point['column']) for point in job_unit.request_json['points']], classifier_loc=storage.spacer_data_loc( settings.ROBOT_MODEL_FILE_PATTERN.format(pk=classifier.pk)) ) msg = JobMsg(task_name='classify_image', tasks=[task]) queue = get_queue_class()() queue.submit_job(msg) logger.info("Submitted image at url: {} for deploy with job unit {}.". format(job_unit.request_json['url'], job_unit.pk)) return msg @task(name="Classify Image") def classify_image(image_id): try: img = Image.objects.get(pk=image_id) except Image.DoesNotExist: logger.info("Image {} does not exist.".format(image_id)) return if not img.features.extracted: return classifier = img.source.get_latest_robot() if not classifier: return storage = get_storage_class()() msg = ClassifyFeaturesMsg( job_token=th.encode_spacer_job_token([image_id]), feature_loc=storage.spacer_data_loc( settings.FEATURE_VECTOR_FILE_PATTERN.format( full_image_path=img.original_file.name)), classifier_loc=storage.spacer_data_loc( settings.ROBOT_MODEL_FILE_PATTERN.format(pk=classifier.pk) ) ) res: ClassifyReturnMsg = spacer_classify_features(msg) label_objs = [Label.objects.get(pk=pk) for pk in res.classes] if not img.annoinfo.confirmed: try: th.add_annotations(image_id, res, label_objs, classifier) except IntegrityError: logger_message = "Failed to classify Image {} [Source: {} [{}] with " "classifier {}. There might have been a race condition " "when trying to save annotations. Will try again later." logger.info(logger_message.format(img.id, img.source, img.source_id, classifier.id)) classify_image.apply_async(args=[image_id], eta=now() + timedelta(seconds=10)) return th.add_scores(image_id, res, label_objs) img.features.classified = True img.features.save() logger.info("Classified Image {} [Source: {} [{}]] with classifier {}". format(img.id, img.source, img.source_id, classifier.id)) @periodic_task(run_every=timedelta(seconds=60), name='Collect all jobs', ignore_result=True) def collect_all_jobs(): logger.info('Collecting all jobs in result queue.') queue = get_queue_class()() while True: job_res = queue.collect_job() if job_res: _handle_job_result(job_res) else: break logger.info('Done collecting all jobs in result queue.')
BSD 2-Clause Simplified License
richardaecn/class-balanced-loss
tpu/models/official/amoeba_net/model_builder.py
_build_loss
python
def _build_loss(loss_fn, loss_name, logits, end_points, labels, add_summary=False): losses = [] aux_head_endpoint = None if 'AuxLogits' in end_points: aux_head_endpoint = end_points['AuxLogits'] elif 'aux_logits' in end_points: aux_head_endpoint = end_points['aux_logits'], if aux_head_endpoint: aux_loss = loss_fn( labels, tf.squeeze(aux_head_endpoint, axis=[0]), weights=0.4, scope='aux_loss') tf.logging.info('Adding to aux loss.') if add_summary: tf.summary.scalar('losses/aux_loss', aux_loss) losses.append(aux_loss) primary_loss = loss_fn(labels, logits, weights=1.0, scope=loss_name) losses.append(primary_loss) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) if reg_losses: fp32_reg_losses = [] for reg_loss in reg_losses: fp32_reg_losses.append(tf.cast(reg_loss, tf.float32)) reg_loss = tf.add_n(fp32_reg_losses, name='regularization_loss') losses.append(reg_loss) total_loss = tf.add_n(losses, name='total_loss') if add_summary: tf.summary.scalar('losses/' + loss_name, primary_loss) tf.summary.scalar('losses/regularization_loss', reg_loss) tf.summary.scalar('losses/total_loss', total_loss) return total_loss
Compute total loss based on the specified loss function.
https://github.com/richardaecn/class-balanced-loss/blob/1d7857208a2abc03d84e35a9d5383af8225d4b4d/tpu/models/official/amoeba_net/model_builder.py#L34-L80
from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import numpy as np import tensorflow as tf import network_utils arg_scope = tf.contrib.framework.arg_scope slim = tf.contrib.slim
MIT License
schemaorg/sdopythonapp
lib/pyRdfa/utils.py
dump
python
def dump(node) : print( node.toprettyxml(indent="", newl="") )
This is just for debug purposes: it prints the essential content of the node in the tree starting at node. @param node: DOM node
https://github.com/schemaorg/sdopythonapp/blob/128be97d359178b26e5211a3e758933ff3a7b3df/lib/pyRdfa/utils.py#L243-L249
import os, os.path, sys, imp, datetime, socket if sys.version_info[0] >= 3 : from urllib.request import Request from urllib.parse import urljoin, quote from http.server import BaseHTTPRequestHandler from urllib.error import HTTPError as urllib_HTTPError else : from urllib2 import Request from urllib2 import HTTPError as urllib_HTTPError from urlparse import urljoin from urllib import quote from BaseHTTPServer import BaseHTTPRequestHandler from .extras.httpheader import content_type, parse_http_datetime import rdflib if rdflib.__version__ >= "3.0.0" : from rdflib import RDF as ns_rdf else : from rdflib.RDF import RDFNS as ns_rdf from .host import HostLanguage, preferred_suffixes class URIOpener : CONTENT_LOCATION = 'Content-Location' CONTENT_TYPE = 'Content-Type' LAST_MODIFIED = 'Last-Modified' EXPIRES = 'Expires' def __init__(self, name, additional_headers = {}) : try : url = name.split('#')[0] if socket.getfqdn().endswith('.w3.org'): import checkremote checkremote.check_url_safety(url) if 'Accept' not in additional_headers: additional_headers['Accept'] = 'text/html, application/xhtml+xml' import requests r = requests.get(url, headers=additional_headers, verify=False) self.data = r.content self.headers = r.headers if URIOpener.CONTENT_TYPE in self.headers : ct = content_type(self.headers[URIOpener.CONTENT_TYPE]) self.content_type = ct.media_type if 'charset' in ct.parmdict : self.charset = ct.parmdict['charset'] else : self.charset = None else : self.charset = None self.content_type = "" for suffix in preferred_suffixes.keys() : if name.endswith(suffix) : self.content_type = preferred_suffixes[suffix] break if URIOpener.CONTENT_LOCATION in self.headers : self.location = urljoin(r.url,self.headers[URIOpener.CONTENT_LOCATION]) else : self.location = name self.expiration_date = datetime.datetime.utcnow() + datetime.timedelta(days=1) if URIOpener.EXPIRES in self.headers : try : self.expiration_date = parse_http_datetime(self.headers[URIOpener.EXPIRES]) except : pass self.last_modified_date = None if URIOpener.LAST_MODIFIED in self.headers : try : self.last_modified_date = parse_http_datetime(self.headers[URIOpener.LAST_MODIFIED]) except : pass except urllib_HTTPError : e = sys.exc_info()[1] from . import HTTPError msg = BaseHTTPRequestHandler.responses[e.code] raise HTTPError('%s' % msg[1], e.code) except Exception : e = sys.exc_info()[1] from . import RDFaError raise RDFaError('%s' % e) _unquotedChars = ':/\?=#~' _warnChars = [' ','\n','\r','\t'] def quote_URI(uri, options = None) : from . import err_unusual_char_in_URI suri = uri.strip() for c in _warnChars : if suri.find(c) != -1 : if options != None : options.add_warning(err_unusual_char_in_URI % suri) break return quote(suri, _unquotedChars) def create_file_name(uri) : suri = uri.strip() final_uri = quote(suri,_unquotedChars) return final_uri.replace(' ','_').replace('%','_').replace('-','_').replace('+','_').replace('/','_').replace('?','_').replace(':','_').replace('=','_').replace('#','_') def has_one_of_attributes(node,*args) : if len(args) == 0 : return None if isinstance(args[0], tuple) or isinstance(args[0], list) : rargs = args[0] else : rargs = args return True in [ node.hasAttribute(attr) for attr in rargs ] def traverse_tree(node, func) : if func(node) : return for n in node.childNodes : if n.nodeType == node.ELEMENT_NODE : traverse_tree(n, func) def return_XML(state, inode, base = True, xmlns = True) : node = inode.cloneNode(True) if base : node.setAttribute("xml:base",state.base) if xmlns : for prefix in state.term_or_curie.xmlns : if not node.hasAttribute("xmlns:%s" % prefix) : node.setAttribute("xmlns:%s" % prefix,"%s" % state.term_or_curie.xmlns[prefix]) if not node.getAttribute("xmlns") and state.defaultNS != None : node.setAttribute("xmlns", state.defaultNS) if sys.version_info[0] >= 3 : return node.toxml() else : q = node.toxml(encoding='utf-8') return unicode(q, encoding='utf-8')
Apache License 2.0
argoproj-labs/argo-client-python
argo/workflows/client/models/v1alpha1_artifact.py
V1alpha1Artifact.path
python
def path(self): return self._path
Gets the path of this V1alpha1Artifact. # noqa: E501 Path is the container path to the artifact # noqa: E501 :return: The path of this V1alpha1Artifact. # noqa: E501 :rtype: str
https://github.com/argoproj-labs/argo-client-python/blob/993d684cab39a834770b296e028519cec035c7b5/argo/workflows/client/models/v1alpha1_artifact.py#L427-L435
import pprint import re import six from argo.workflows.client.configuration import Configuration class V1alpha1Artifact(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'archive': 'V1alpha1ArchiveStrategy', 'archive_logs': 'bool', 'artifactory': 'V1alpha1ArtifactoryArtifact', '_from': 'str', 'gcs': 'V1alpha1GCSArtifact', 'git': 'V1alpha1GitArtifact', 'global_name': 'str', 'hdfs': 'V1alpha1HDFSArtifact', 'http': 'V1alpha1HTTPArtifact', 'mode': 'int', 'name': 'str', 'optional': 'bool', 'oss': 'V1alpha1OSSArtifact', 'path': 'str', 'raw': 'V1alpha1RawArtifact', 'recurse_mode': 'bool', 's3': 'V1alpha1S3Artifact', 'sub_path': 'str' } attribute_map = { 'archive': 'archive', 'archive_logs': 'archiveLogs', 'artifactory': 'artifactory', '_from': 'from', 'gcs': 'gcs', 'git': 'git', 'global_name': 'globalName', 'hdfs': 'hdfs', 'http': 'http', 'mode': 'mode', 'name': 'name', 'optional': 'optional', 'oss': 'oss', 'path': 'path', 'raw': 'raw', 'recurse_mode': 'recurseMode', 's3': 's3', 'sub_path': 'subPath' } def __init__(self, archive=None, archive_logs=None, artifactory=None, _from=None, gcs=None, git=None, global_name=None, hdfs=None, http=None, mode=None, name=None, optional=None, oss=None, path=None, raw=None, recurse_mode=None, s3=None, sub_path=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._archive = None self._archive_logs = None self._artifactory = None self.__from = None self._gcs = None self._git = None self._global_name = None self._hdfs = None self._http = None self._mode = None self._name = None self._optional = None self._oss = None self._path = None self._raw = None self._recurse_mode = None self._s3 = None self._sub_path = None self.discriminator = None if archive is not None: self.archive = archive if archive_logs is not None: self.archive_logs = archive_logs if artifactory is not None: self.artifactory = artifactory if _from is not None: self._from = _from if gcs is not None: self.gcs = gcs if git is not None: self.git = git if global_name is not None: self.global_name = global_name if hdfs is not None: self.hdfs = hdfs if http is not None: self.http = http if mode is not None: self.mode = mode self.name = name if optional is not None: self.optional = optional if oss is not None: self.oss = oss if path is not None: self.path = path if raw is not None: self.raw = raw if recurse_mode is not None: self.recurse_mode = recurse_mode if s3 is not None: self.s3 = s3 if sub_path is not None: self.sub_path = sub_path @property def archive(self): return self._archive @archive.setter def archive(self, archive): self._archive = archive @property def archive_logs(self): return self._archive_logs @archive_logs.setter def archive_logs(self, archive_logs): self._archive_logs = archive_logs @property def artifactory(self): return self._artifactory @artifactory.setter def artifactory(self, artifactory): self._artifactory = artifactory @property def _from(self): return self.__from @_from.setter def _from(self, _from): self.__from = _from @property def gcs(self): return self._gcs @gcs.setter def gcs(self, gcs): self._gcs = gcs @property def git(self): return self._git @git.setter def git(self, git): self._git = git @property def global_name(self): return self._global_name @global_name.setter def global_name(self, global_name): self._global_name = global_name @property def hdfs(self): return self._hdfs @hdfs.setter def hdfs(self, hdfs): self._hdfs = hdfs @property def http(self): return self._http @http.setter def http(self, http): self._http = http @property def mode(self): return self._mode @mode.setter def mode(self, mode): self._mode = mode @property def name(self): return self._name @name.setter def name(self, name): if self.local_vars_configuration.client_side_validation and name is None: raise ValueError("Invalid value for `name`, must not be `None`") self._name = name @property def optional(self): return self._optional @optional.setter def optional(self, optional): self._optional = optional @property def oss(self): return self._oss @oss.setter def oss(self, oss): self._oss = oss @property
Apache License 2.0
xilinx/finn
src/finn/util/imagenet.py
get_val_images
python
def get_val_images(n_images=100, interleave_classes=False): try: val_path = os.environ["IMAGENET_VAL_PATH"] val_folders = sorted(os.listdir(val_path)) assert len(val_folders) == 1000, "Expected 1000 subfolders in ILSVRC2012 val" assert n_images <= 50000, "ILSVRC2012 validation dataset has 50k images" n_current_folder = 0 n_current_file = 0 total = 0 while total != n_images: current_folder = os.path.join(val_path, val_folders[n_current_folder]) current_files = sorted(os.listdir(current_folder)) current_file = os.path.join(current_folder, current_files[n_current_file]) yield (current_file, n_current_folder) total += 1 if interleave_classes: n_current_folder += 1 if n_current_folder == 1000: n_current_file += 1 n_current_folder = 0 else: n_current_file += 1 if n_current_file == 50: n_current_folder += 1 n_current_file = 0 except KeyError: return None
Returns generator over (path_to_jpeg, imagenet_class_id) for the first n_images in the ILSVRC2012 validation dataset. The IMAGENET_VAL_PATH environment variable must point to the validation dataset folder, containing 1000 folders (one for each ImageNet-1K class), in turn each containing 50 test images. interleave_classes controls the ordering of the picked images. If False (default), consecutive images will have the same class until that class has no more images. Otherwise, consecutive images will be from classes 0, 1, 2... and back to class 0 after the first 1000 images. For more information on how to prepare the ILSVRC2012 validation dataset, please see: https://github.com/Xilinx/brevitas/blob/dev/brevitas_examples/imagenet_classification/README.md
https://github.com/xilinx/finn/blob/bdfbd4b79088accf92e60fc1fe790e697500dfe7/src/finn/util/imagenet.py#L36-L76
import os import numpy as np from PIL import Image from finn.core.data_layout import NCHW, NHWC from finn.util.test import resize_smaller_side, crop_center
BSD 3-Clause New or Revised License
armet/python-armet
armet/resources/resource/base.py
Resource.view
python
def view(cls, request, response): test = cls.meta.trailing_slash if test ^ request.path.endswith('/'): path = request.path + '/' if test else request.path[:-1] response['Location'] = '{}://{}{}{}{}'.format( request.protocol.lower(), request.host, request.mount_point, path, '?' + request.query if request.query else '') return cls.redirect(request, response) try: obj = cls(request, response) request.bind(obj) response.bind(obj) obj._request = request result = obj.dispatch(request, response) if not response.asynchronous: if (isinstance(result, collections.Iterable) and not isinstance(result, six.string_types)): return cls.stream(response, result) else: response.end(result) if response.body: return response.body except http.exceptions.BaseHTTPException as e: response.status = e.status response.headers.update(e.headers) if e.content: response.send(e.content, serialize=True, format='json') response.close() if response.body: return response.body except Exception: logger.exception('Internal server error') if not response.streaming and not response.closed: response.status = http.client.INTERNAL_SERVER_ERROR response.headers.clear() response.close()
Entry-point of the request / response cycle; Handles resource creation and delegation. @param[in] requset The HTTP request object; containing accessors for information about the request. @param[in] response The HTTP response object; contains accessors for modifying the information that will be sent to the client.
https://github.com/armet/python-armet/blob/d61eca9082256cb1e7f7f3c7f2fbc4b697157de7/armet/resources/resource/base.py#L71-L157
from __future__ import absolute_import, unicode_literals, division import logging import re import six import collections import mimeparse from armet import http, utils logger = logging.getLogger(__name__) class Resource(object): meta = None _deserializer_map = None _serializer_map = None def __new__(cls, request, response, *args, **kwargs): cls, params = cls.traverse(request) obj = super(Resource, cls).__new__(cls) obj.__dict__.update(params) return obj @classmethod def redirect(cls, request, response): if cls.meta.legacy_redirect: if request.method in ('GET', 'HEAD',): response.status = http.client.MOVED_PERMANENTLY else: response.status = http.client.TEMPORARY_REDIRECT else: response.status = http.client.PERMANENT_REDIRECT response.close() @classmethod
MIT License
nuagenetworks/vspk-python
vspk/v5_0/nueventlog.py
NUEventLog.entity_type
python
def entity_type(self, value): self._entity_type = value
Set entity_type value. Notes: The entity type of this event. It may be Domain, VirtualMachine, etc., This attribute is named `entityType` in VSD API.
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v5_0/nueventlog.py#L335-L345
from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from bambou import NURESTObject class NUEventLog(NURESTObject): __rest_name__ = "eventlog" __resource_name__ = "eventlogs" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" def __init__(self, **kwargs): super(NUEventLog, self).__init__() self._request_id = None self._diff = None self._enterprise = None self._entities = None self._entity_id = None self._entity_parent_id = None self._entity_parent_type = None self._entity_scope = None self._entity_type = None self._user = None self._event_received_time = None self._external_id = None self._type = None self.expose_attribute(local_name="request_id", remote_name="requestID", attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name="diff", remote_name="diff", attribute_type=dict, is_required=False, is_unique=False) self.expose_attribute(local_name="enterprise", remote_name="enterprise", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entities", remote_name="entities", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_id", remote_name="entityID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_parent_id", remote_name="entityParentID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_parent_type", remote_name="entityParentType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="entity_type", remote_name="entityType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="user", remote_name="user", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="event_received_time", remote_name="eventReceivedTime", attribute_type=float, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=False, is_unique=False) self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) @property def request_id(self): return self._request_id @request_id.setter def request_id(self, value): self._request_id = value @property def diff(self): return self._diff @diff.setter def diff(self, value): self._diff = value @property def enterprise(self): return self._enterprise @enterprise.setter def enterprise(self, value): self._enterprise = value @property def entities(self): return self._entities @entities.setter def entities(self, value): self._entities = value @property def entity_id(self): return self._entity_id @entity_id.setter def entity_id(self, value): self._entity_id = value @property def entity_parent_id(self): return self._entity_parent_id @entity_parent_id.setter def entity_parent_id(self, value): self._entity_parent_id = value @property def entity_parent_type(self): return self._entity_parent_type @entity_parent_type.setter def entity_parent_type(self, value): self._entity_parent_type = value @property def entity_scope(self): return self._entity_scope @entity_scope.setter def entity_scope(self, value): self._entity_scope = value @property def entity_type(self): return self._entity_type @entity_type.setter
BSD 3-Clause New or Revised License
ngageoint/sarpy
sarpy/annotation/rcs.py
RCSValueCollection.elements
python
def elements(self): return self._elements
None|List[RCSValue]: The RCSValue elements.
https://github.com/ngageoint/sarpy/blob/91405721a7e6ffe7c76dd7b143915fee4bee1e82/sarpy/annotation/rcs.py#L272-L278
__classification__ = "UNCLASSIFIED" __author__ = "Thomas McCullough" import logging from collections import OrderedDict import os import json from typing import Union, Any, List, Dict from sarpy.geometry.geometry_elements import _Jsonable, FeatureCollection, Feature, Polygon, MultiPolygon from sarpy.compliance import string_types, int_func, integer_types logger = logging.getLogger(__name__) class RCSStatistics(_Jsonable): __slots__ = ('name', 'mean', 'std', 'max', 'min') _type = 'RCSStatistics' def __init__(self, name=None, mean=None, std=None, max=None, min=None): if mean is not None: mean = float(mean) if std is not None: std = float(std) if max is not None: max = float(max) if min is not None: min = float(min) self.name = name self.mean = mean self.std = std self.max = max self.min = min @classmethod def from_dict(cls, the_json): typ = the_json['type'] if typ != cls._type: raise ValueError('RCSStatistics cannot be constructed from {}'.format(the_json)) return cls( name=the_json.get('name', None), mean=the_json.get('mean', None), std=the_json.get('std', None), max=the_json.get('max', None), min=the_json.get('min', None)) def to_dict(self, parent_dict=None): if parent_dict is None: parent_dict = OrderedDict() parent_dict['type'] = self.type for attr in self.__slots__: parent_dict[attr] = getattr(self, attr) return parent_dict class RCSValue(_Jsonable): __slots__ = ('polarization', '_statistics', '_name_to_index') _type = 'RCSValue' def __init__(self, polarization=None, statistics=None): self._statistics = None self._name_to_index = None self.polarization = polarization if statistics is not None: self.statistics = statistics def __len__(self): if self._statistics is None: return 0 return len(self._statistics) def __getitem__(self, item): if isinstance(item, string_types): return self._statistics[self._name_to_index[item]] return self._statistics[item] @property def statistics(self): return self._statistics @statistics.setter def statistics(self, statistics): if statistics is None: self._statistics = None if not isinstance(statistics, list): raise TypeError('statistics must be a list of RCSStatistics elements') for element in statistics: self.insert_new_element(element) def insert_new_element(self, element): if isinstance(element, dict): element = RCSStatistics.from_dict(element) if not isinstance(element, RCSStatistics): raise TypeError('element must be an RCSStatistics instance') if self._statistics is None: self._statistics = [element,] self._name_to_index = {element.name : 0} else: self._statistics.append(element) self._name_to_index[element.name] = len(self._statistics) - 1 @classmethod def from_dict(cls, the_json): typ = the_json['type'] if typ != cls._type: raise ValueError('RCSValue cannot be constructed from {}'.format(the_json)) return cls(polarization=the_json.get('polarization', None), statistics=the_json.get('statistics', None)) def to_dict(self, parent_dict=None): if parent_dict is None: parent_dict = OrderedDict() parent_dict['type'] = self.type parent_dict['polarization'] = self.polarization if self._statistics is None: parent_dict['statistics'] = None else: parent_dict['statistics'] = [entry.to_dict() for entry in self._statistics] return parent_dict class RCSValueCollection(_Jsonable): __slots__ = ('_name', '_description', '_pixel_count', '_elements') _type = 'RCSValueCollection' def __init__(self, name=None, description=None, pixel_count=None, elements=None): self._name = None self._description = None self._pixel_count = None self._elements = None self.name = name self.description = description self.pixel_count = pixel_count self.elements = elements def __len__(self): if self._elements is None: return 0 return len(self._elements) def __getitem__(self, item): return self._elements[item] @property def name(self): return self._name @name.setter def name(self, value): if value is None: self._name = None return if not isinstance(value, string_types): raise TypeError('name is required to be of string type.') self._name = value @property def description(self): return self._description @description.setter def description(self, value): if value is None: self._description = None return if not isinstance(value, string_types): raise TypeError('description is required to be of string type.') self._description = value @property def pixel_count(self): return self._pixel_count @pixel_count.setter def pixel_count(self, value): if value is None: self._pixel_count = None return if not isinstance(value, integer_types): value = int_func(value) self._pixel_count = value @property
MIT License
nic30/hwt
hwt/simulator/simTestCase.py
SimTestCase.rmSim
python
def rmSim(self): self.u = None self.__class__.u = None self.rtl_simulator_cls = None self.__class__.rtl_simulator_cls = None self.rtl_simulator = None self.hdl_simulator = None self.__class__.hdl_simulator = None
Remove all buid sim objects from this object :note: Can be used to avoid unneccessary sim intialization (from prev. test) before next test.
https://github.com/nic30/hwt/blob/db57819a4234d818d9bc00b927e5bc208195a530/hwt/simulator/simTestCase.py#L160-L172
import os from random import Random from typing import Optional import unittest from hwt.simulator.agentConnector import autoAddAgents, collect_processes_from_sim_agents from hwt.simulator.rtlSimulatorVcd import BasicRtlSimulatorVcd from hwt.simulator.utils import reconnectUnitSignalsToModel, valToInt, allValuesToInts from hwt.synthesizer.dummyPlatform import DummyPlatform from hwt.synthesizer.unit import Unit from hwtSimApi.constants import CLK_PERIOD from hwtSimApi.hdlSimulator import HdlSimulator from hwtSimApi.triggers import Timer class DummySimPlatform(DummyPlatform): _UNSPECIFIED = object() class SimTestCase(unittest.TestCase): _defaultSeed = 317 RECOMPILE = True rtl_simulator_cls = None hdl_simulator = None DEFAULT_BUILD_DIR = None DEFAULT_LOG_DIR = "tmp" DEFAULT_SIMULATOR = BasicRtlSimulatorVcd def assertValEqual(self, first, second, msg=None): try: first = first.read() except AttributeError: pass if not isinstance(first, int) and first is not None: first = valToInt(first) return unittest.TestCase.assertEqual(self, first, second, msg=msg) def assertEmpty(self, val, msg=None): return unittest.TestCase.assertEqual(self, len(val), 0, msg=msg) def assertValSequenceEqual(self, seq1, seq2, msg=None, seq_type=None): seq1 = allValuesToInts(seq1) if len(seq1) == len(seq2): _seq2 = [] for v1, v2 in zip(seq1, seq2): if v2 is None: v2 = v1 _seq2.append(v2) seq2 = _seq2 self.assertSequenceEqual(seq1, seq2, msg, seq_type) def getTestName(self): className, testName = self.id().split(".")[-2:] return f"{className:s}_{testName:s}" def runSim(self, until: float, name=None): if name is None: if self.DEFAULT_LOG_DIR is None: outputFileName = None else: outputFileName = os.path.join(self.DEFAULT_LOG_DIR, self.getTestName() + ".vcd") else: outputFileName = name if outputFileName is not None: d = os.path.dirname(outputFileName) if d: os.makedirs(d, exist_ok=True) self.rtl_simulator.set_trace_file(outputFileName, -1) procs = collect_processes_from_sim_agents(self.u) self.hdl_simulator.run(until=until, extraProcesses=self.procs + procs) self.rtl_simulator.finalize() return self.hdl_simulator def randomize(self, intf): assert intf._isExtern, intf assert intf._ag is not None, intf randomEnProc = simpleRandomizationProcess(self, intf._ag) self.procs.append(randomEnProc()) def restartSim(self): rtl_simulator = self.rtl_simulator_cls() hdl_simulator = HdlSimulator(rtl_simulator) unit = self.u reconnectUnitSignalsToModel(unit, rtl_simulator) autoAddAgents(unit, hdl_simulator) self.procs = [] self.u, self.rtl_simulator, self.hdl_simulator = unit, rtl_simulator, hdl_simulator return unit, rtl_simulator, self.procs
MIT License
mdlockyer/printtags
PrintTags/print_tags.py
success
python
def success(*args: Any, tag_text: Optional[str] = 'success', add_datetime: bool = False, prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False, file: Optional[TextIO] = None, flush: bool = False) -> None: tag: Optional[str] = tag_text if tag_text is None else f'[{tag_text}]' _print_with_color(args, Colors.green, add_datetime, (prefix, tag), sep, end, closed_ok, file, flush)
Used to indicate successful execution. Args: tag_text (str, optional): The text content of the tag that will be prepended to the print. `None` for no tag. Default `'success'`. add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`. prefix (str, optional): A string interpolatable value that will be prepended to the print. Default `None`. sep (str, optional): string inserted between values, default is a space. Default `' '`. end (str, optional): string appended after the last value, default is a newline. Default `'\n'`. closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be suppressed. Default `False`. file (TextIO, optional): defaults to the current sys.stdout. Default `None`. flush (bool, optional): whether to forcibly flush the stream. Default `False`.
https://github.com/mdlockyer/printtags/blob/9a1b4bbbeaee2ac91b0f96c745c7cf0a7823e831/PrintTags/print_tags.py#L221-L241
from datetime import datetime from .colors import Colors from typing import List, Tuple, TextIO, Optional, Callable, Any def _get_datetime() -> str: return datetime.now().strftime('%d-%b-%Y %I:%M:%S%p') def _print_with_color(args: Tuple[Any, ...], color_fn: Callable[[str], str], add_datetime: bool, prefixes: Tuple[Optional[str], ...], sep: str, end: str, closed_ok: bool, file: Optional[TextIO], flush: bool) -> None: _args: List[str] = [str(arg) for arg in args] for prefix in reversed(prefixes): if prefix is None: continue _args[0] = f'{prefix}{_args[0]}' if prefix.endswith(' ') else f'{prefix} {_args[0]}' if add_datetime: _args[0] = f'{_get_datetime()} {_args[0]}' _args = [color_fn(arg) for arg in _args] try: print(*_args, sep=color_fn(sep), end=color_fn(end), file=file, flush=flush) except ValueError: if closed_ok: pass else: raise def black(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False, file: Optional[TextIO] = None, flush: bool = False) -> None: _print_with_color(args, Colors.black, add_datetime, (prefix,), sep, end, closed_ok, file, flush) def red(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False, file: Optional[TextIO] = None, flush: bool = False) -> None: _print_with_color(args, Colors.red, add_datetime, (prefix,), sep, end, closed_ok, file, flush) def green(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False, file: Optional[TextIO] = None, flush: bool = False) -> None: _print_with_color(args, Colors.green, add_datetime, (prefix,), sep, end, closed_ok, file, flush) def yellow(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False, file: Optional[TextIO] = None, flush: bool = False) -> None: _print_with_color(args, Colors.yellow, add_datetime, (prefix,), sep, end, closed_ok, file, flush) def blue(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False, file: Optional[TextIO] = None, flush: bool = False) -> None: _print_with_color(args, Colors.blue, add_datetime, (prefix,), sep, end, closed_ok, file, flush) def magenta(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False, file: Optional[TextIO] = None, flush: bool = False) -> None: _print_with_color(args, Colors.magenta, add_datetime, (prefix,), sep, end, closed_ok, file, flush) def cyan(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False, file: Optional[TextIO] = None, flush: bool = False) -> None: _print_with_color(args, Colors.cyan, add_datetime, (prefix,), sep, end, closed_ok, file, flush) def white(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False, file: Optional[TextIO] = None, flush: bool = False) -> None: _print_with_color(args, Colors.white, add_datetime, (prefix,), sep, end, closed_ok, file, flush) def info(*args: Any, tag_text: Optional[str] = 'info', add_datetime: bool = False, prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False, file: Optional[TextIO] = None, flush: bool = False) -> None: tag: Optional[str] = tag_text if tag_text is None else f'[{tag_text}]' _print_with_color(args, Colors.cyan, add_datetime, (prefix, tag), sep, end, closed_ok, file, flush)
MIT License
cineuse/cncgtoolkit
apps/pw_multiScriptEditor/managers/nuke/main.py
Node.maxOutputs
python
def maxOutputs(self): return 0
self.maximumOutputs() -> Maximum number of outputs this node can have. @return: Maximum number of outputs this node can have.
https://github.com/cineuse/cncgtoolkit/blob/7a21f358e34aa276cf209a6d5887a7964190cf0a/apps/pw_multiScriptEditor/managers/nuke/main.py#L564-L568
import math, geo class KnobType(object): pass class Knob(object): def clearAnimated(self): pass def setLabel(self,s): pass def setTooltip(self,s): pass def removeKey(self): pass def removeKeyAt(self): pass def visible(self): pass def warning(self,message): pass def getIntegral(self): pass def isKeyAt(self): pass def hasExpression(self,index=-1): pass def __new__(self,S, ): pass def getKeyTime(self): pass def tooltip(self): return '' def label(self): return '' def setFlag(self,f): pass def getNumKeys(self): pass def critical(self,message): pass def toScript(self): return '' def clearFlag(self,f): pass def Class(self): return '' def node(self): return Node() def setEnabled(self,enabled): pass def setValue(self): pass def setName(self,s): pass def isAnimated(self): pass def setAnimated(self): pass def getDerivative(self): pass def setExpression(self,expr, channel=-1, view=None): pass def setValueAt(self,val, time, chan): pass def getNthDerivative(self): pass def getValueAt(self): pass def name(self): return '' def isKey(self): pass def fromScript(self): pass def enabled(self): pass def value(self): pass def getValue(self): pass def getKeyIndex(self): pass def error(self,message): pass def debug(self,message): pass def setVisible(self,visible): pass def animation(self, i): return AnimationCurve() class Node(object): def getNumKnobs(self): return 0 def writeKnobs(self ,i): return '' def autoplace(self): pass def forceValidate(self): pass def help(self): return '' def lastFrame(self): return 0 def setSelected(self,selected): pass def treeHasError(self): pass def maximumInputs(self): pass def hasError(self): pass def deepSample(self,chan, x, y, sample): return 0.0 def height(self): return 0 def sample(self, chan, x, y, dx, dy, frame): return 0.0 def setInput(self, i, node): pass def dependencies(self,what): return [Node(),] def canSetInput(self, i, node): pass def maximumOutputs(self): return 0 def screenWidth(self): return 0 def linkableKnobs(self,knobType): return [Knob(),] def minimumInputs(self): return 0 def firstFrame(self): return 0 def setXpos(self,x): pass def shown(self): pass def numKnobs(self): return 0 def maxInputs(self): return 0 def isSelected(self): pass def opHashes(self): return (0,) def setYpos(self,y): pass def showControlPanel(self): pass def width(self): return 0 def connectInput(self, i, node): pass def allKnobs(self): return {'':Knob(),} def deepSampleCount(self,x, y): return 0 def removeKnob(self,k): pass def input(self,i): return 0 def knobs(self): return {'':Knob(),} def Class(self): return ''
MIT License
openmined/sympc
src/sympc/tensor/replicatedshare_tensor.py
ReplicatedSharedTensor.__getitem__
python
def __getitem__(self, key: int) -> torch.Tensor: return self.shares[key]
Allows to subset shares. Args: key (int): The share to be retrieved. Returns: share (torch.Tensor): Returned share.
https://github.com/openmined/sympc/blob/634396a9d663eda514f66654a96c23ae2ab7d2de/src/sympc/tensor/replicatedshare_tensor.py#L694-L703
import copy import dataclasses from functools import reduce import operator from typing import Any from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import Set from typing import Union from uuid import UUID import torch import sympc from sympc.config import Config from sympc.encoder import FixedPointEncoder from sympc.session import Session from sympc.tensor import ShareTensor from sympc.utils import RING_SIZE_TO_TYPE from sympc.utils import get_nr_bits from sympc.utils import get_type_from_ring from sympc.utils import islocal from sympc.utils import ispointer from sympc.utils import parallel_execution from .tensor import SyMPCTensor PROPERTIES_NEW_RS_TENSOR: Set[str] = {"T"} METHODS_NEW_RS_TENSOR: Set[str] = {"unsqueeze", "view", "t", "sum", "clone", "repeat"} BINARY_MAP = {"add": "xor", "sub": "xor", "mul": "and_"} PRIME_NUMBER = 67 class ReplicatedSharedTensor(metaclass=SyMPCTensor): __slots__ = { "id", "tags", "description", "shares", "session_uuid", "config", "fp_encoder", "ring_size", } METHODS_FORWARD = {"numel", "t", "unsqueeze", "view", "sum", "clone", "repeat"} PROPERTIES_FORWARD = {"T", "shape"} def __init__( self, shares: Optional[List[Union[float, int, torch.Tensor]]] = None, config: Config = Config(encoder_base=2, encoder_precision=16), session_uuid: Optional[UUID] = None, ring_size: int = 2 ** 64, ): self.session_uuid = session_uuid self.ring_size = ring_size if ring_size in {2, PRIME_NUMBER}: self.config = Config(encoder_base=1, encoder_precision=0) else: self.config = config self.fp_encoder = FixedPointEncoder( base=self.config.encoder_base, precision=self.config.encoder_precision ) tensor_type = get_type_from_ring(ring_size) self.shares = [] if shares is not None: self.shares = [self._encode(share).to(tensor_type) for share in shares] def _encode(self, data: torch.Tensor) -> torch.Tensor: return self.fp_encoder.encode(data) def decode(self) -> List[torch.Tensor]: return self._decode() def _decode(self) -> List[torch.Tensor]: shares = [] shares = [ self.fp_encoder.decode(share.type(torch.LongTensor)) for share in self.shares ] return shares def get_shares(self) -> List[torch.Tensor]: return self.shares def get_ring_size(self) -> str: return str(self.ring_size) def get_config(self) -> Dict: return dataclasses.asdict(self.config) @staticmethod def addmodprime(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: if x.dtype != torch.uint8 or y.dtype != torch.uint8: raise ValueError( f"Both tensors x:{x.dtype} y:{y.dtype} should be of torch.uint8 dtype" ) return (x + y) % PRIME_NUMBER @staticmethod def submodprime(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: if x.dtype != torch.uint8 or y.dtype != torch.uint8: raise ValueError( f"Both tensors x:{x.dtype} y:{y.dtype} should be of torch.uint8 dtype" ) x = x.to(torch.int8) y = y.to(torch.int8) result = (x - y) % PRIME_NUMBER return result.to(torch.uint8) @staticmethod def mulmodprime(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: if x.dtype != torch.uint8 or y.dtype != torch.uint8: raise ValueError( f"Both tensors x:{x.dtype} y:{y.dtype} should be of torch.uint8 dtype" ) x = x.to(torch.int16) y = y.to(torch.int16) result = (x * y) % PRIME_NUMBER return result.to(torch.uint8) @staticmethod def get_op(ring_size: int, op_str: str) -> Callable[..., Any]: op = None if ring_size == 2: op = getattr(operator, BINARY_MAP[op_str]) elif ring_size == PRIME_NUMBER: op = getattr(ReplicatedSharedTensor, op_str + "modprime") elif ring_size in RING_SIZE_TO_TYPE.keys(): op = getattr(operator, op_str) else: raise ValueError(f"Invalid ring size: {ring_size}") return op @staticmethod def sanity_checks( x: "ReplicatedSharedTensor", y: Union[int, float, torch.Tensor, "ReplicatedSharedTensor"], ) -> "ReplicatedSharedTensor": if not isinstance(y, ReplicatedSharedTensor): y = y % PRIME_NUMBER if x.ring_size == PRIME_NUMBER else y y = ReplicatedSharedTensor( session_uuid=x.session_uuid, shares=[y], ring_size=x.ring_size, config=x.config, ) elif y.session_uuid and x.session_uuid and y.session_uuid != x.session_uuid: raise ValueError( f"Session UUIDs did not match {x.session_uuid} {y.session_uuid}" ) elif len(x.shares) != len(y.shares): raise ValueError( f"Both RSTensors should have equal number of shares {len(x.shares)} {len(y.shares)}" ) elif x.ring_size != y.ring_size: raise ValueError( f"Both RSTensors should have same ring_size {x.ring_size} {y.ring_size}" ) session_uuid = x.session_uuid if session_uuid is not None: session = sympc.session.get_session(str(x.session_uuid)) else: session = Session(config=x.config, ring_size=x.ring_size) session.nr_parties = 1 return y, session def __apply_public_op( self, y: Union[torch.Tensor, float, int], op_str: str ) -> "ReplicatedSharedTensor": y, session = ReplicatedSharedTensor.sanity_checks(self, y) op = ReplicatedSharedTensor.get_op(self.ring_size, op_str) shares = copy.deepcopy(self.shares) if op_str in {"add", "sub"}: if session.rank != 1: idx = (session.nr_parties - session.rank) % session.nr_parties shares[idx] = op(shares[idx], y.shares[0]) else: raise ValueError(f"{op_str} not supported") result = ReplicatedSharedTensor( ring_size=self.ring_size, session_uuid=self.session_uuid, config=self.config, ) result.shares = shares return result def __apply_private_op( self, y: "ReplicatedSharedTensor", op_str: str ) -> "ReplicatedSharedTensor": y, session = ReplicatedSharedTensor.sanity_checks(self, y) op = ReplicatedSharedTensor.get_op(self.ring_size, op_str) shares = [] if op_str in {"add", "sub"}: for x_share, y_share in zip(self.shares, y.shares): shares.append(op(x_share, y_share)) else: raise ValueError(f"{op_str} not supported") result = ReplicatedSharedTensor( ring_size=self.ring_size, session_uuid=self.session_uuid, config=self.config, ) result.shares = shares return result def __apply_op( self, y: Union["ReplicatedSharedTensor", torch.Tensor, float, int], op_str: str, ) -> "ReplicatedSharedTensor": is_private = isinstance(y, ReplicatedSharedTensor) if is_private: result = self.__apply_private_op(y, op_str) else: result = self.__apply_public_op(y, op_str) return result def add( self, y: Union[int, float, torch.Tensor, "ReplicatedSharedTensor"] ) -> "ReplicatedSharedTensor": return self.__apply_op(y, "add") def sub( self, y: Union[int, float, torch.Tensor, "ReplicatedSharedTensor"] ) -> "ReplicatedSharedTensor": return self.__apply_op(y, "sub") def rsub( self, y: Union[int, float, torch.Tensor, "ReplicatedSharedTensor"] ) -> "ReplicatedSharedTensor": return self.__apply_op(y, "sub") def mul( self, y: Union[int, float, torch.Tensor, "ReplicatedSharedTensor"] ) -> "ReplicatedSharedTensor": y_tensor, session = self.sanity_checks(self, y) is_private = isinstance(y, ReplicatedSharedTensor) op_str = "mul" op = ReplicatedSharedTensor.get_op(self.ring_size, op_str) if is_private: if session.nr_parties == 3: from sympc.protocol import Falcon result = [Falcon.multiplication_protocol(self, y_tensor, op_str)] else: raise ValueError( "Private mult between ReplicatedSharedTensors is allowed only for 3 parties" ) else: result = [op(share, y_tensor.shares[0]) for share in self.shares] tensor = ReplicatedSharedTensor( ring_size=self.ring_size, session_uuid=self.session_uuid, config=self.config ) tensor.shares = result return tensor def matmul( self, y: Union[int, float, torch.Tensor, "ReplicatedSharedTensor"] ) -> "ReplicatedSharedTensor": y_tensor, session = self.sanity_checks(self, y) is_private = isinstance(y, ReplicatedSharedTensor) op_str = "matmul" if is_private: if session.nr_parties == 3: from sympc.protocol import Falcon result = [Falcon.multiplication_protocol(self, y_tensor, op_str)] else: raise ValueError( "Private matmul between ReplicatedSharedTensors is allowed only for 3 parties" ) else: result = [ operator.matmul(share, y_tensor.shares[0]) for share in self.shares ] tensor = ReplicatedSharedTensor( ring_size=self.ring_size, session_uuid=self.session_uuid, config=self.config ) tensor.shares = result return tensor def truediv(self, y: Union[int, torch.Tensor]) -> "ReplicatedSharedTensor": if not isinstance(y, (int, torch.LongTensor)): raise ValueError( "Div works (for the moment) only with integers and LongTensor!" ) res = ReplicatedSharedTensor( session_uuid=self.session_uuid, config=self.config, ring_size=self.ring_size ) res.shares = [share // y for share in self.shares] return res def rshift(self, y: int) -> "ReplicatedSharedTensor": if not isinstance(y, int): raise ValueError("Right Shift works only with integers!") ring_bits = get_nr_bits(self.ring_size) if y < 0 or y > ring_bits - 1: raise ValueError( f"Invalid value for right shift: {y}, must be in range:[0,{ring_bits-1}]" ) res = ReplicatedSharedTensor( session_uuid=self.session_uuid, config=self.config, ring_size=self.ring_size ) res.shares = [share >> y for share in self.shares] return res def bit_extraction(self, pos: int = 0) -> "ReplicatedSharedTensor": ring_bits = get_nr_bits(self.ring_size) if pos < 0 or pos > ring_bits - 1: raise ValueError( f"Invalid position for bit_extraction: {pos}, must be in range:[0,{ring_bits-1}]" ) shares = [] bit_mask = torch.ones(self.shares[0].shape, dtype=self.shares[0].dtype) << pos shares = [share & bit_mask for share in self.shares] rst = ReplicatedSharedTensor( shares=shares, session_uuid=self.session_uuid, config=Config(encoder_base=1, encoder_precision=0), ring_size=2, ) return rst def rmatmul(self, y): raise NotImplementedError def xor( self, y: Union[int, torch.Tensor, "ReplicatedSharedTensor"] ) -> "ReplicatedSharedTensor": if self.ring_size == 2: return self + y elif self.ring_size in RING_SIZE_TO_TYPE: return self + y - (self * y * 2) else: raise ValueError(f"The ring_size {self.ring_size} is not supported.") def lt(self, y): raise NotImplementedError def gt(self, y): raise NotImplementedError def eq(self, y: Any) -> bool: if not (torch.cat(self.shares) == torch.cat(y.shares)).all(): return False if self.config != y.config: return False if self.session_uuid and y.session_uuid and self.session_uuid != y.session_uuid: return False if self.ring_size != y.ring_size: return False return True
MIT License
tom-ryder/viforsdes
lotka-volterra/lotka_volterra_data_augmentation.py
param_init
python
def param_init(mean, std_init): std = tf.log(tf.exp(std_init) - 1) param_mean = tf.Variable(mean) param_std = tf.nn.softplus(tf.Variable(std)) param_dist = tfd.TransformedDistribution(distribution=tfd.Normal( loc=param_mean, scale=param_std), bijector=tfb.Exp()) return param_dist, param_mean, param_std
init params of SDE as log normlas
https://github.com/tom-ryder/viforsdes/blob/ffe08f9797e6d9efc5e86f101fbacd8f9cd14425/lotka-volterra/lotka_volterra_data_augmentation.py#L29-L38
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf import numpy as np from lotka_volterra_data import * tfd = tf.contrib.distributions tfb = tfd.bijectors DTYPE = tf.float32 NP_DTYPE = np.float32 def sample_squeeze(dist, p, dt, T): dim2 = int(T / dt) sample = dist.sample([p, 1]) return sample, tf.reshape(tf.tile(sample, [1, dim2]), [-1, 1])
MIT License
brainiak/brainiak
brainiak/isc.py
_permute_two_sample_iscs
python
def _permute_two_sample_iscs(iscs, group_parameters, i, pairwise=False, summary_statistic='median', exact_permutations=None, prng=None): if exact_permutations: group_shuffler = np.array(exact_permutations[i]) elif not exact_permutations and pairwise: group_shuffler = prng.permutation(np.arange( len(np.array(group_parameters['group_assignment'])[ group_parameters['sorter']]))) elif not exact_permutations and not pairwise: group_shuffler = prng.permutation(np.arange( len(group_parameters['group_assignment']))) if pairwise: group_shuffled = group_parameters['group_matrix'][ group_shuffler, :][:, group_shuffler] group_selector = squareform(group_shuffled[ group_parameters['unsorter'], :] [:, group_parameters['unsorter']], checks=False) elif not pairwise: group_selector = np.array( group_parameters['group_assignment'])[group_shuffler] isc_sample = (compute_summary_statistic( iscs[group_selector == group_parameters[ 'group_labels'][0], :], summary_statistic=summary_statistic, axis=0) - compute_summary_statistic( iscs[group_selector == group_parameters[ 'group_labels'][1], :], summary_statistic=summary_statistic, axis=0)) return isc_sample
Applies two-sample permutations to ISC data Input ISCs should be n_subjects (leave-one-out approach) or n_pairs (pairwise approach) by n_voxels or n_ROIs array. This function is only intended to be used internally by the permutation_isc function in this module. Parameters ---------- iscs : ndarray or list ISC values group_parameters : dict Dictionary of group parameters i : int Permutation iteration pairwise : bool, default: False Indicator of pairwise or leave-one-out, should match ISCs variable summary_statistic : str, default: 'median' Summary statistic, either 'median' (default) or 'mean' exact_permutations : list List of permutations prng = None or np.random.RandomState, default: None Initial random seed Indicator of pairwise or leave-one-out, should match ISCs variable Returns ------- isc_sample : ndarray Array of permuted ISC values
https://github.com/brainiak/brainiak/blob/ee093597c6c11597b0a59e95b48d2118e40394a5/brainiak/isc.py#L968-L1054
import numpy as np import logging from scipy.spatial.distance import squareform from itertools import combinations, permutations, product from brainiak.fcma.util import compute_correlation from brainiak.utils.utils import (array_correlation, phase_randomize, p_from_null, _check_timeseries_input) logger = logging.getLogger(__name__) __all__ = [ "bootstrap_isc", "compute_summary_statistic", "isfc", "isc", "permutation_isc", "phaseshift_isc", "squareform_isfc", "timeshift_isc", ] MAX_RANDOM_SEED = 2**32 - 1 def isc(data, pairwise=False, summary_statistic=None, tolerate_nans=True): data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data) if n_subjects == 2: logger.info("Only two subjects! Simply computing Pearson correlation.") summary_statistic = None if tolerate_nans: mean = np.nanmean else: mean = np.mean data, mask = _threshold_nans(data, tolerate_nans) if n_subjects == 2: iscs_stack = array_correlation(data[..., 0], data[..., 1])[np.newaxis, :] elif pairwise: data = np.swapaxes(data, 2, 0) voxel_iscs = [] for v in np.arange(data.shape[1]): voxel_data = data[:, v, :] iscs = squareform(np.corrcoef(voxel_data), checks=False) voxel_iscs.append(iscs) iscs_stack = np.column_stack(voxel_iscs) elif not pairwise: iscs_stack = [] for s in np.arange(n_subjects): iscs_stack.append(array_correlation( data[..., s], mean(np.delete(data, s, axis=2), axis=2))) iscs_stack = np.array(iscs_stack) iscs = np.full((iscs_stack.shape[0], n_voxels), np.nan) iscs[:, np.where(mask)[0]] = iscs_stack if summary_statistic: iscs = compute_summary_statistic(iscs, summary_statistic=summary_statistic, axis=0)[np.newaxis, :] if iscs.shape[0] == 1: iscs = iscs[0] return iscs def isfc(data, targets=None, pairwise=False, summary_statistic=None, vectorize_isfcs=True, tolerate_nans=True): data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data) targets, t_n_TRs, t_n_voxels, t_n_subejcts, symmetric = ( _check_targets_input(targets, data)) if not symmetric: pairwise = False if tolerate_nans: mean = np.nanmean else: mean = np.mean data, mask = _threshold_nans(data, tolerate_nans) targets, targets_mask = _threshold_nans(targets, tolerate_nans) if symmetric and n_subjects == 2: isfcs = compute_correlation(np.ascontiguousarray(data[..., 0].T), np.ascontiguousarray(data[..., 1].T), return_nans=True) isfcs = (isfcs + isfcs.T) / 2 isfcs = isfcs[..., np.newaxis] summary_statistic = None logger.info("Only two subjects! Computing ISFC between them.") elif pairwise: isfcs = [] for pair in combinations(np.arange(n_subjects), 2): isfc_pair = compute_correlation(np.ascontiguousarray( data[..., pair[0]].T), np.ascontiguousarray( targets[..., pair[1]].T), return_nans=True) if symmetric: isfc_pair = (isfc_pair + isfc_pair.T) / 2 isfcs.append(isfc_pair) isfcs = np.dstack(isfcs) else: data = np.rollaxis(data, 2, 0) targets = np.rollaxis(targets, 2, 0) isfcs = [compute_correlation(np.ascontiguousarray(subject.T), np.ascontiguousarray(mean( np.delete(targets, s, axis=0), axis=0).T), return_nans=True) for s, subject in enumerate(data)] isfcs = np.dstack([(isfc_matrix + isfc_matrix.T) / 2 if symmetric else isfc_matrix for isfc_matrix in isfcs]) isfcs_all = np.full((n_voxels, t_n_voxels, isfcs.shape[2]), np.nan) isfcs_all[np.ix_(np.where(mask)[0], np.where(targets_mask)[0])] = isfcs isfcs = np.moveaxis(isfcs_all, 2, 0) if summary_statistic: isfcs = compute_summary_statistic(isfcs, summary_statistic=summary_statistic, axis=0) if isfcs.shape[0] == 1: isfcs = isfcs[0] if vectorize_isfcs and symmetric: isfcs, iscs = squareform_isfc(isfcs) return isfcs, iscs else: return isfcs def _check_isc_input(iscs, pairwise=False): if type(iscs) == list: iscs = np.array(iscs)[:, np.newaxis] elif isinstance(iscs, np.ndarray): if iscs.ndim == 1: iscs = iscs[:, np.newaxis] if pairwise: try: test_square = squareform(iscs[:, 0], force='tomatrix') n_subjects = test_square.shape[0] except ValueError: raise ValueError("For pairwise input, ISCs must be the " "vectorized triangle of a square matrix.") elif not pairwise: n_subjects = iscs.shape[0] n_voxels = iscs.shape[1] logger.info("Assuming {0} subjects with and {1} " "voxel(s) or ROI(s) in bootstrap ISC test.".format(n_subjects, n_voxels)) return iscs, n_subjects, n_voxels def _check_targets_input(targets, data): if isinstance(targets, np.ndarray) or isinstance(targets, list): targets, n_TRs, n_voxels, n_subjects = ( _check_timeseries_input(targets)) if data.shape[0] != n_TRs: raise ValueError("Targets array must have same number of " "TRs as input data") if data.shape[2] != n_subjects: raise ValueError("Targets array must have same number of " "subjects as input data") symmetric = False else: targets = data n_TRs, n_voxels, n_subjects = data.shape symmetric = True return targets, n_TRs, n_voxels, n_subjects, symmetric def compute_summary_statistic(iscs, summary_statistic='mean', axis=None): if summary_statistic not in ('mean', 'median'): raise ValueError("Summary statistic must be 'mean' or 'median'") if summary_statistic == 'mean': statistic = np.tanh(np.nanmean(np.arctanh(iscs), axis=axis)) elif summary_statistic == 'median': statistic = np.nanmedian(iscs, axis=axis) return statistic def squareform_isfc(isfcs, iscs=None): if not type(iscs) == np.ndarray and isfcs.shape[-2] == isfcs.shape[-1]: if isfcs.ndim == 2: isfcs = isfcs[np.newaxis, ...] if isfcs.ndim == 3: iscs = np.diagonal(isfcs, axis1=1, axis2=2) isfcs = np.vstack([squareform(isfc, checks=False)[np.newaxis, :] for isfc in isfcs]) else: raise ValueError("Square (redundant) ISFCs must be square " "with multiple subjects or pairs of subjects " "indexed by the first dimension") if isfcs.shape[0] == iscs.shape[0] == 1: isfcs, iscs = isfcs[0], iscs[0] return isfcs, iscs else: if isfcs.ndim == iscs.ndim == 1: isfcs, iscs = isfcs[np.newaxis, :], iscs[np.newaxis, :] isfcs_stack = [] for isfc, isc in zip(isfcs, iscs): isfc_sq = squareform(isfc, checks=False) np.fill_diagonal(isfc_sq, isc) isfcs_stack.append(isfc_sq[np.newaxis, ...]) isfcs = np.vstack(isfcs_stack) if isfcs.shape[0] == 1: isfcs = isfcs[0] return isfcs def _threshold_nans(data, tolerate_nans): nans = np.all(np.any(np.isnan(data), axis=0), axis=1) if tolerate_nans is True: logger.info("ISC computation will tolerate all NaNs when averaging") elif type(tolerate_nans) is float: if not 0.0 <= tolerate_nans <= 1.0: raise ValueError("If threshold to tolerate NaNs is a float, " "it must be between 0.0 and 1.0; got {0}".format( tolerate_nans)) nans += ~(np.sum(~np.any(np.isnan(data), axis=0), axis=1) >= data.shape[-1] * tolerate_nans) logger.info("ISC computation will tolerate voxels with at least " "{0} non-NaN values: {1} voxels do not meet " "threshold".format(tolerate_nans, np.sum(nans))) else: logger.info("ISC computation will not tolerate NaNs when averaging") mask = ~nans data = data[:, mask, :] return data, mask def bootstrap_isc(iscs, pairwise=False, summary_statistic='median', n_bootstraps=1000, ci_percentile=95, side='right', random_state=None): iscs, n_subjects, n_voxels = _check_isc_input(iscs, pairwise=pairwise) if summary_statistic not in ('mean', 'median'): raise ValueError("Summary statistic must be 'mean' or 'median'") observed = compute_summary_statistic(iscs, summary_statistic=summary_statistic, axis=0) distribution = [] for i in np.arange(n_bootstraps): if isinstance(random_state, np.random.RandomState): prng = random_state else: prng = np.random.RandomState(random_state) subject_sample = sorted(prng.choice(np.arange(n_subjects), size=n_subjects)) if pairwise: isc_sample = [] for voxel_iscs in iscs.T: voxel_iscs = squareform(voxel_iscs, force='tomatrix') np.fill_diagonal(voxel_iscs, 1) voxel_sample = voxel_iscs[subject_sample, :][:, subject_sample] voxel_sample = squareform(voxel_sample, checks=False) voxel_sample[voxel_sample == 1.] = np.NaN isc_sample.append(voxel_sample) isc_sample = np.column_stack(isc_sample) elif not pairwise: isc_sample = iscs[subject_sample, :] distribution.append(compute_summary_statistic( isc_sample, summary_statistic=summary_statistic, axis=0)) random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED)) distribution = np.array(distribution) ci = (np.percentile(distribution, (100 - ci_percentile)/2, axis=0), np.percentile(distribution, ci_percentile + (100 - ci_percentile)/2, axis=0)) shifted = distribution - observed p = p_from_null(observed, shifted, side=side, exact=False, axis=0) return observed, ci, p, distribution def _check_group_assignment(group_assignment, n_subjects): if type(group_assignment) == list: pass elif type(group_assignment) == np.ndarray: group_assignment = group_assignment.tolist() else: logger.info("No group assignment provided, " "performing one-sample test.") if group_assignment and len(group_assignment) != n_subjects: raise ValueError("Group assignments ({0}) " "do not match number of subjects ({1})!".format( len(group_assignment), n_subjects)) return group_assignment def _get_group_parameters(group_assignment, n_subjects, pairwise=False): group_parameters = {'group_assignment': group_assignment, 'n_subjects': n_subjects, 'group_labels': None, 'groups': None, 'sorter': None, 'unsorter': None, 'group_matrix': None, 'group_selector': None} if group_assignment and len(np.unique(group_assignment)) == 2: group_parameters['n_groups'] = 2 group_labels = np.unique(group_assignment) groups = {group_labels[0]: group_assignment.count(group_labels[0]), group_labels[1]: group_assignment.count(group_labels[1])} if pairwise: sorter = np.array(group_assignment).argsort() unsorter = np.array(group_assignment).argsort().argsort() upper_left = np.full((groups[group_labels[0]], groups[group_labels[0]]), group_labels[0]) upper_right = np.full((groups[group_labels[0]], groups[group_labels[1]]), np.nan) lower_left = np.full((groups[group_labels[1]], groups[group_labels[0]]), np.nan) lower_right = np.full((groups[group_labels[1]], groups[group_labels[1]]), group_labels[1]) group_matrix = np.vstack((np.hstack((upper_left, upper_right)), np.hstack((lower_left, lower_right)))) np.fill_diagonal(group_matrix, np.nan) group_parameters['group_matrix'] = group_matrix group_parameters['group_selector'] = squareform( group_matrix[unsorter, :][:, unsorter], checks=False) group_parameters['sorter'] = sorter group_parameters['unsorter'] = unsorter else: group_parameters['group_selector'] = group_assignment group_parameters['groups'] = groups group_parameters['group_labels'] = group_labels elif not group_assignment or len(np.unique(group_assignment)) == 1: group_parameters['n_groups'] = 1 if pairwise: group_parameters['group_matrix'] = np.ones(( group_parameters['n_subjects'], group_parameters['n_subjects'])) elif len(np.unique(group_assignment)) > 2: raise ValueError("This test is not valid for more than " "2 groups! (got {0})".format( len(np.unique(group_assignment)))) else: raise ValueError("Invalid group assignments!") return group_parameters def _permute_one_sample_iscs(iscs, group_parameters, i, pairwise=False, summary_statistic='median', group_matrix=None, exact_permutations=None, prng=None): if exact_permutations: sign_flipper = np.array(exact_permutations[i]) else: sign_flipper = prng.choice([-1, 1], size=group_parameters['n_subjects'], replace=True) if pairwise: matrix_flipped = (group_parameters['group_matrix'] * sign_flipper * sign_flipper[ :, np.newaxis]) sign_flipper = squareform(matrix_flipped, checks=False) isc_flipped = iscs * sign_flipper[:, np.newaxis] isc_sample = compute_summary_statistic( isc_flipped, summary_statistic=summary_statistic, axis=0) return isc_sample
Apache License 2.0
inferno-pytorch/speedrun
speedrun/core.py
BaseExperiment.update_git_revision
python
def update_git_revision(self, overwrite=False): try: gitcmd = ["git", "rev-parse", "--verify", "HEAD"] gitrev = subprocess.check_output(gitcmd).decode('latin1').strip() except subprocess.CalledProcessError: gitrev = "none" if not overwrite and self.get('git_rev', None) is not None: pass else: self.set("git_rev", gitrev) return self
Updates the configuration with a 'git_rev' field with the current HEAD revision. Parameters ---------- overwrite : bool If a 'git_rev' field already exists, Whether to overwrite it. Returns ------- BaseExperiment
https://github.com/inferno-pytorch/speedrun/blob/3570bf47196fb9a1406f600d055160aec6a71e3c/speedrun/core.py#L712-L735
import os import shutil import sys import ast import subprocess import yaml from .utils.py_utils import Namespace, MacroReader try: from torch import save from torch import load except ImportError: try: import dill serializer = dill except ImportError: dill = None import pickle serializer = pickle def save(obj, file_path): with open(file_path) as f: serializer.dump(obj, f, protocol=serializer.HIGHEST_PROTOCOL) def load(file_path): with open(file_path) as f: out = serializer.load(f) return out class BaseExperiment(object): DEFAULT_DISPATCH = None def __init__(self, experiment_directory=None): self._experiment_directory = None self._step = None self._epoch = None self._config = {} self._meta_config = {'exclude_attrs_from_save': [], 'stateless_attributes': [], 'stateful_attributes': []} self._cache = {} self._argv = None self._default_dispatch = None self.experiment_directory = experiment_directory super(BaseExperiment, self).__init__() @property def step(self): if self._step is None: self._step = 0 return self._step def next_step(self): self._step = 0 if self._step is None else self._step self._step += 1 return self @property def epoch(self): if self._epoch is None: self._epoch = 0 return self._epoch def next_epoch(self): self._epoch = 0 if self._epoch is None else self._epoch self._epoch += 1 return self @property def experiment_directory(self): return self._experiment_directory @experiment_directory.setter def experiment_directory(self, value): if value is not None: os.makedirs(os.path.join(value, 'Configurations'), exist_ok=True) os.makedirs(os.path.join(value, 'Logs'), exist_ok=True) os.makedirs(os.path.join(value, 'Weights'), exist_ok=True) os.makedirs(os.path.join(value, 'Plots'), exist_ok=True) self._experiment_directory = value @property def log_directory(self): if self._experiment_directory is not None: return os.path.join(self._experiment_directory, 'Logs') else: return None @property def checkpoint_directory(self): if self._experiment_directory is not None: return os.path.join(self._experiment_directory, 'Weights') else: return None @property def checkpoint_path(self): return os.path.join(self.checkpoint_directory, f'ckpt_iter_{self.step}.pt') @property def plot_directory(self): if self._experiment_directory is not None: return os.path.join(self._experiment_directory, 'Plots') else: return None @property def configuration_directory(self): if self._experiment_directory is not None: return os.path.join(self._experiment_directory, 'Configurations') else: return None def inherit_configuration(self, from_experiment_directory, file_name='train_config.yml', read=True): source_path = os.path.join(from_experiment_directory, 'Configurations', file_name) target_path = os.path.join(self.configuration_directory, file_name) shutil.copy(source_path, target_path) if read: self.read_config_file() return self def dump_configuration(self, file_name='train_config.yml'): dump_path = os.path.join(self.configuration_directory, file_name) with open(dump_path, 'w') as f: yaml.dump(self._config, f) return self def record_args(self): self._argv = sys.argv return self def get_arg(self, tag, default=None, ensure_exists=False): assert self._argv is not None, "Args not parsed yet. Have you called `self.record_args()`?" if not isinstance(tag, str): assert isinstance(tag, int) if ensure_exists: assert tag < len(self._argv), f"Accessing arg at index {tag}, but only {len(self._argv)} args available." return default if tag >= len(self._argv) else self._argv[tag] if f'--{tag}' in self._argv: value = self._argv[self._argv.index(f'--{tag}') + 1] try: value = ast.literal_eval(value) except (ValueError, SyntaxError): pass return value else: if ensure_exists: raise KeyError(f"Argument --{tag} is not provided, but it should be.") return default def update_configuration_from_args(self): for arg in self._argv: if arg.startswith('--config.'): tag = arg.replace('--config.', '').replace('.', '/') value = self.get_arg(arg.lstrip('--'), ensure_exists=True) self.set(tag, value) return self def register_unpickleable(self, *attributes): self._meta_config['exclude_attrs_from_save'].extend(list(attributes)) return self def checkpoint(self, force=True): if force: do_checkpoint = True else: do_checkpoint = (self.step % self.get('training/checkpoint_every')) == 0 if do_checkpoint: self_dict = {key: val for key, val in self.__dict__.items() if key not in self._meta_config['exclude_attrs_from_save']} save(self_dict, os.path.join(self.checkpoint_directory, f'checkpoint_iter_{self.step}.pt')) return self def load_from_checkpoint(self, step=None): for filename in os.listdir(self.checkpoint_directory): if filename.startswith('checkpoint_iter_') and filename.endswith('.pt'): try: ckpt_step = int(filename.strip('checkpoint_iter_.pt')) except ValueError: continue if ckpt_step == step: self_dict = load(filename) self.__dict__.update(self_dict) break else: raise FileNotFoundError(f"No checkpoint for step {step} found in " f"{self.checkpoint_directory}.") return self def get(self, tag, default=None, ensure_exists=False): paths = tag.split("/") data = self._config for path in paths: if ensure_exists: assert path in data data = data.get(path, default if path == paths[-1] else {}) return data def set(self, tag, value): paths = tag.split('/') data = self._config for path in paths[:-1]: if path in data: data = data[path] else: data.update({path: {}}) data = data[path] data[paths[-1]] = value return self @property def cache_keys(self): return list(self._cache.keys()) def read_from_cache(self, tag, default=None, ensure_exists=False): if ensure_exists: assert tag in self._cache return self._cache.get(tag, default) def write_to_cache(self, tag, value): self._cache.update({tag: value}) return self def accumulate_in_cache(self, tag, value, accumulate_fn=None): if tag not in self._cache: self.write_to_cache(tag, value) else: if accumulate_fn is None: self._cache[tag] += value else: assert callable(accumulate_fn) self._cache[tag] = accumulate_fn(self._cache[tag], value) return self def clear_in_cache(self, tag): if tag not in self._cache: pass else: del self._cache[tag] return self def clear_cache(self): self._cache.clear() return self def bundle(self, **kwargs): return Namespace(**kwargs) def read_config_file(self, file_name='train_config.yml', path=None): path = os.path.join(self.configuration_directory, file_name) if path is None else path if not os.path.exists(path): raise FileNotFoundError with open(path, 'r') as f: self._config = yaml.load(f, Loader=yaml.FullLoader) return self def read_macro(self, path=None): if path is None: path = self.get_arg('macro') if path is None: return for _path in path.split(":"): with open(_path, 'r') as f: macro = yaml.load(f, Loader=yaml.FullLoader) MacroReader.update_dict(self._config, macro, copy=False) return self def parse_experiment_directory(self): experiment_directory = self.get_arg(1) if experiment_directory is None: raise RuntimeError("Can't find experiment directory in command line args.") self.experiment_directory = experiment_directory return self def purge_existing_experiment_directory(self, experiment_directory=None): experiment_directory = self.get_arg(1) if experiment_directory is None else experiment_directory if experiment_directory is None: raise RuntimeError("No experiment directory found to be purged.") if os.path.exists(experiment_directory): shutil.rmtree(experiment_directory) return self @staticmethod def register_hook(fn, key): setattr(fn, f'__is_speedrun_{key}_hook', True) return fn def execute_hooks(self, key): hook_names = [attry for attry in dir(type(self)) if getattr(getattr(type(self), attry), f'__is_speedrun_{key}_hook', False)] return {hook_name: getattr(self, hook_name)() for hook_name in hook_names} def run(self, *args, **kwargs): try: self.execute_pre_dispatch_hooks() return self.dispatch(self.get_dispatch_key(), *args, **kwargs) finally: self.clean_up() def dispatch(self, key, *args, **kwargs): assert hasattr(self, key), f"Trying to dispatch method {key}, but it doesn't exist." return getattr(self, key)(*args, **kwargs) def get_dispatch_key(self): if self._argv is not None and self.get_arg('dispatch', None) is not None: return self.get_arg('dispatch', ensure_exists=True) elif self.find_default_dispatch() is not None: return self.find_default_dispatch() elif self._default_dispatch is not None: return self._default_dispatch elif self.DEFAULT_DISPATCH is not None: return self.DEFAULT_DISPATCH else: raise RuntimeError("No default dispatch could be found. Please set it first.") @staticmethod def register_default_dispatch(fn): setattr(fn, '__is_speedrun_default_dispatch', True) return fn def set_default_dispatch(self, method_name): assert method_name in dir(type(self)), f"Method name {method_name} not found in list of attributes." assert callable(getattr(type(self), method_name)), f"Default dispatch method name {method_name} should be callable." self._default_dispatch = method_name return self def get_default_dispatch(self): return self._default_dispatch def find_default_dispatch(self): for attry in dir(type(self)): if getattr(getattr(type(self), attry), '__is_speedrun_default_dispatch', False): return attry @staticmethod def register_pre_dispatch_hook(fn): return BaseExperiment.register_hook(fn, 'pre_dispatch') def execute_pre_dispatch_hooks(self): return self.execute_hooks('pre_dispatch') def clean_up(self): pass
Apache License 2.0
fastnlp/fastnlp
reproduction/multi-criteria-cws/optm.py
NoamOpt.rate
python
def rate(self, step=None): if step is None: step = self._step lr = self.factor * ( self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5)) ) return lr
Implement `lrate` above
https://github.com/fastnlp/fastnlp/blob/3cb01d15d8bc7d10f292b12e8fa803087d37e887/reproduction/multi-criteria-cws/optm.py#L25-L34
import torch import torch.optim as optim class NoamOpt: def __init__(self, model_size, factor, warmup, optimizer): self.optimizer = optimizer self._step = 0 self.warmup = warmup self.factor = factor self.model_size = model_size self._rate = 0 def step(self): self._step += 1 rate = self.rate() for p in self.optimizer.param_groups: p["lr"] = rate self._rate = rate self.optimizer.step()
Apache License 2.0
openstack/cinder
cinder/quota.py
DbQuotaDriver.get_class_quotas
python
def get_class_quotas(self, context, resources, quota_class, defaults=True): quotas = {} default_quotas = {} class_quotas = db.quota_class_get_all_by_name(context, quota_class) if defaults: default_quotas = db.quota_class_get_defaults(context) for resource in resources.values(): if resource.name in class_quotas: quotas[resource.name] = class_quotas[resource.name] continue if defaults: quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas
Given list of resources, retrieve the quotas for given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource.
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/quota.py#L139-L166
import datetime from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import importutils from oslo_utils import timeutils from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ LOG = logging.getLogger(__name__) quota_opts = [ cfg.IntOpt('quota_volumes', default=10, help='Number of volumes allowed per project'), cfg.IntOpt('quota_snapshots', default=10, help='Number of volume snapshots allowed per project'), cfg.IntOpt('quota_consistencygroups', default=10, help='Number of consistencygroups allowed per project'), cfg.IntOpt('quota_groups', default=10, help='Number of groups allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, help='Total amount of storage, in gigabytes, allowed ' 'for volumes and snapshots per project'), cfg.IntOpt('quota_backups', default=10, help='Number of volume backups allowed per project'), cfg.IntOpt('quota_backup_gigabytes', default=1000, help='Total amount of storage, in gigabytes, allowed ' 'for backups per project'), cfg.IntOpt('reservation_expire', default=86400, help='Number of seconds until a reservation expires'), cfg.IntOpt('reservation_clean_interval', default='$reservation_expire', help='Interval between periodic task runs to clean expired ' 'reservations in seconds.'), cfg.IntOpt('until_refresh', default=0, help='Count of reservations until usage is refreshed'), cfg.IntOpt('max_age', default=0, help='Number of seconds between subsequent usage refreshes'), cfg.StrOpt('quota_driver', default="cinder.quota.DbQuotaDriver", help='Default driver to use for quota checks'), cfg.BoolOpt('use_default_quota_class', default=True, help='Enables or disables use of default quota class ' 'with default quota.'), cfg.IntOpt('per_volume_size_limit', default=-1, help='Max size allowed per volume, in gigabytes'), ] CONF = cfg.CONF CONF.register_opts(quota_opts) class DbQuotaDriver(object): def get_by_project(self, context, project_id, resource_name): return db.quota_get(context, project_id, resource_name) def get_by_class(self, context, quota_class, resource_name): return db.quota_class_get(context, quota_class, resource_name) def get_default(self, context, resource, project_id): default_quotas = db.quota_class_get_defaults(context) return default_quotas.get(resource.name, resource.default) def get_defaults(self, context, resources, project_id=None): quotas = {} default_quotas = {} if CONF.use_default_quota_class: default_quotas = db.quota_class_get_defaults(context) for resource in resources.values(): if default_quotas: if resource.name not in default_quotas: versionutils.report_deprecated_feature(LOG, _( "Default quota for resource: %(res)s is set " "by the default quota flag: quota_%(res)s, " "it is now deprecated. Please use the " "default quota class for default " "quota.") % {'res': resource.name}) quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas
Apache License 2.0
dpup/git-workflow
util.py
get_editor
python
def get_editor(repo): return (repo.git.config("core.editor") or os.environ.get("GIT_EDITOR") or os.environ.get("VISUAL") or os.environ.get("EDITOR", "vi"))
Returns the editor from env vars.
https://github.com/dpup/git-workflow/blob/ba7c875d973ac46ea4c377228da02ed17130cc7b/util.py#L110-L118
import json import getpass import git import os.path import os import sys import signal import subprocess import tempfile import logging logging.basicConfig(format='%(message)s', level=logging.INFO) signal.signal(signal.SIGINT, lambda x,y: os._exit(1)) BOLD = '\033[1m' ITALIC = '\033[3m' UNDERLINE = '\033[4m' RED = '\033[31m' GREEN = '\033[32m' YELLOW = '\033[33m' BLUE = '\033[34m' MAGENTA = '\033[35m' CYAN = '\033[36m' RESET = '\033[0m' def main_branch_name(repo): ref = git.refs.symbolic.SymbolicReference(repo, 'refs/remotes/origin/HEAD') name = ref.ref.name return name[len('origin/'):] def fatal_if_dirty(repo): info('Checking for pending changes') if repo.is_dirty(): warn('You have uncommitted changes, proceeding automatically would be dangerous.') info(repo.git.status('-s')) exit(1) def update_main(repo, initial_branch): main = main_branch_name(repo) info('Switching to %s branch' % main) try: repo.heads[main].checkout() except BaseException as e: fatal('Could not checkout %s: %s' % (main, e)) info('Pulling updates for %s branch' % main) try: repo.git.remote('update', '--prune') repo.remotes.origin.pull('--no-tags') except BaseException as e: warn('Failed to update %s: %s' % (main, e)) initial_branch.checkout() c = prompt_y_n('Continue anyway?') if not c: exit(1) def get_branch_name(name): username = get_github_creds()['username'] return '%s/%s' % (username, name) def get_auth_filename(): return os.path.join(os.path.expanduser('~'), '.github-auth') def get_github_creds(): fn = get_auth_filename() if not os.path.isfile(fn): fatal("Missing GitHub credentials. Did you run `git github-login`?") with open(fn) as auth_file: return json.load(auth_file) def get_script_path(): return os.path.dirname(os.path.realpath(sys.argv[0]))
Apache License 2.0
nii-cloud/dodai-compute
nova/scheduler/abstract_scheduler.py
AbstractScheduler._provision_resource_locally
python
def _provision_resource_locally(self, context, build_plan_item, request_spec, kwargs): host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] image = request_spec['image'] instance_type = request_spec.get('instance_type') instance = compute_api.API().create_db_entry_for_new_instance(context, instance_type, image, base_options, None, []) instance_id = instance['id'] kwargs['instance_id'] = instance_id queue = db.queue_get_for(context, "compute", host) params = {"method": "run_instance", "args": kwargs} rpc.cast(context, queue, params) LOG.debug(_("Provisioning locally via compute node %(host)s") % locals())
Create the requested resource in this Zone.
https://github.com/nii-cloud/dodai-compute/blob/d9bea632913c0ddc6f59c6120f60daea369d09cc/nova/scheduler/abstract_scheduler.py#L59-L80
import operator import json import M2Crypto from novaclient import v1_1 as novaclient from novaclient import exceptions as novaclient_exceptions from nova import crypto from nova import db from nova import exception from nova import flags from nova import log as logging from nova import rpc from nova.compute import api as compute_api from nova.scheduler import api from nova.scheduler import driver FLAGS = flags.FLAGS LOG = logging.getLogger('nova.scheduler.abstract_scheduler') class InvalidBlob(exception.NovaException): message = _("Ill-formed or incorrectly routed 'blob' data sent " "to instance create request.") class AbstractScheduler(driver.Scheduler): def _call_zone_method(self, context, method, specs, zones): return api.call_zone_method(context, method, specs=specs, zones=zones)
Apache License 2.0
ddi-lab/generion-middleware
neo/Core/TX/RegisterTransaction.py
RegisterTransaction.DeserializeExclusiveData
python
def DeserializeExclusiveData(self, reader): self.Type = TransactionType.RegisterTransaction self.AssetType = reader.ReadByte() self.Name = reader.ReadVarString() self.Amount = reader.ReadFixed8() self.Precision = reader.ReadByte() self.Owner = ECDSA.Deserialize_Secp256r1(reader) self.Admin = reader.ReadUInt160()
Deserialize full object. Args: reader (neo.IO.BinaryReader):
https://github.com/ddi-lab/generion-middleware/blob/bdcb6a01c56e987f6eb1a3b5a35257ba6f777cdd/neo/Core/TX/RegisterTransaction.py#L99-L113
from neo.Core.TX.Transaction import Transaction, TransactionType from neo.Core.AssetType import AssetType from neocore.Cryptography.Crypto import Crypto from neocore.Cryptography.ECCurve import EllipticCurve, ECDSA from neo.Settings import settings from neocore.Fixed8 import Fixed8 class RegisterTransaction(Transaction): def __init__(self, inputs=None, outputs=None, assettype=AssetType.GoverningToken, assetname='', amount=Fixed8(0), precision=0, owner=None, admin=None): super(RegisterTransaction, self).__init__(inputs, outputs) self.Type = TransactionType.RegisterTransaction self.AssetType = assettype self.Name = assetname self.Amount = amount if inputs is not None: self.inputs = inputs else: self.inputs = [] if outputs is not None: self.outputs = outputs else: self.outputs = [] if owner is not None and type(owner) is not EllipticCurve.ECPoint: raise Exception("Invalid owner, must be ECPoint instance") self.Owner = owner self.Admin = admin self.Precision = precision def SystemFee(self): if self.AssetType == AssetType.GoverningToken or self.AssetType == AssetType.UtilityToken: return Fixed8.Zero() return Fixed8(int(settings.REGISTER_TX_FEE)) def GetScriptHashesForVerifying(self): pass
MIT License
wechaty/python-wechaty
src/wechaty/user/message.py
Message.to_url_link
python
async def to_url_link(self) -> UrlLink: log.info('Message to UrlLink') if self.type() != MessageType.MESSAGE_TYPE_URL: raise WechatyOperationError( 'current message type: %s, not url type' % self.type() ) payload = await self.puppet.message_url(self.message_id) if payload is None: raise WechatyPayloadError( 'can not get url_link_payload by message: %s' % self.message_id) return self.wechaty.UrlLink(payload)
get url_link from message :return:
https://github.com/wechaty/python-wechaty/blob/f7406b0c20e5749da2cee7cf1a4666742d219aae/src/wechaty/user/message.py#L580-L596
from __future__ import annotations import dataclasses import json import re from typing import ( Optional, Union, List ) from datetime import datetime from wechaty_puppet import ( FileBox, MessagePayload, MessageQueryFilter, MessageType, get_logger ) from wechaty.exceptions import WechatyPayloadError, WechatyOperationError from wechaty.utils import timestamp_to_date from ..accessory import Accessory from .mini_program import MiniProgram from .contact import Contact from .url_link import UrlLink from .image import Image from .room import Room log = get_logger('Message') SUPPORTED_MESSAGE_FILE_TYPES: List[MessageType] = [ MessageType.MESSAGE_TYPE_ATTACHMENT, MessageType.MESSAGE_TYPE_EMOTICON, MessageType.MESSAGE_TYPE_IMAGE, MessageType.MESSAGE_TYPE_VIDEO, MessageType.MESSAGE_TYPE_AUDIO ] class Message(Accessory[MessagePayload]): Type = MessageType def __init__(self, message_id: str): super().__init__() self.message_id = message_id def message_type(self) -> MessageType: return self.payload.type def __str__(self) -> str: if not self.is_ready(): return f'Message <{self.message_id}> is not ready' message_list = [ 'Message', f'#{self.message_type().name.lower()}', f'[🗣 {self.talker()}', ] if self.room(): message_list.append(f'@👥 {self.room()}]') if self.message_type() == MessageType.MESSAGE_TYPE_TEXT: message_list.append(f'\t{self.text()[:70]}') return ''.join(message_list) async def say(self, msg: Union[str, Contact, FileBox, UrlLink, MiniProgram], mention_ids: Optional[List[str]] = None) -> Optional[Message]: log.info('say() <%s>', msg) if not msg: log.error('can"t say nothing') return None room = self.room() if room is not None: conversation_id = room.room_id else: talker = self.talker() if talker is None: raise WechatyPayloadError('Message must be from room/contact') conversation_id = talker.contact_id from .url_link import UrlLink from .mini_program import MiniProgram if isinstance(msg, str): message_id = await self.puppet.message_send_text( conversation_id=conversation_id, message=msg, mention_ids=mention_ids) elif isinstance(msg, Contact): message_id = await self.puppet.message_send_contact( conversation_id=conversation_id, contact_id=msg.contact_id, ) elif isinstance(msg, FileBox): message_id = await self.puppet.message_send_file( conversation_id=conversation_id, file=msg) elif isinstance(msg, UrlLink): message_id = await self.puppet.message_send_url( conversation_id=conversation_id, url=json.dumps(dataclasses.asdict(msg.payload))) elif isinstance(msg, MiniProgram): assert msg.payload is not None message_id = await self.puppet.message_send_mini_program( conversation_id=conversation_id, mini_program=msg.payload) else: raise WechatyPayloadError('message type should be str, ' 'Contact/FileBox/UrlLink/MiniProgram') message = self.load(message_id) await message.ready() return message @classmethod async def find(cls, talker_id: Optional[str] = None, message_id: Optional[str] = None, room_id: Optional[str] = None, text: Optional[str] = None, to_id: Optional[str] = None, message_type: Optional[MessageType] = None ) -> Optional[Message]: log.info('Message find all <%s, %s, %s, <%s, %s, %s>', talker_id, message_id, room_id, text, to_id, message_type) messages = await cls.find_all( talker_id=talker_id, message_id=message_id, room_id=room_id, text=text, to_id=to_id, message_type=message_type ) if messages is None or len(messages) < 1: return None if len(messages) > 1: log.warning( 'Message findAll() got more than one(%d) result', len(messages)) return messages[0] @classmethod async def find_all(cls, talker_id: Optional[str] = None, message_id: Optional[str] = None, room_id: Optional[str] = None, text: Optional[str] = None, to_id: Optional[str] = None, message_type: Optional[MessageType] = None ) -> List[Message]: log.info('Message find all <%s, %s, %s, <%s, %s, %s>', talker_id, message_id, room_id, text, to_id, message_type) query_filter = MessageQueryFilter( from_id=talker_id, id=message_id, room_id=room_id, text=text, to_id=to_id, type=message_type ) message_ids = await cls.get_puppet().message_search(query_filter) messages = [cls.load(message_id) for message_id in message_ids] return messages def talker(self) -> Contact: talker_id = self.payload.from_id if talker_id is None: raise WechatyPayloadError('message must be from Contact') return self.wechaty.Contact.load(talker_id) def to(self) -> Optional[Contact]: to_id = self.payload.to_id if to_id is None: return None return self.wechaty.Contact.load(to_id) def room(self) -> Optional[Room]: room_id = self.payload.room_id if room_id is None or room_id == '': return None return self.wechaty.Room.load(room_id) def chatter(self) -> Union[Room, Contact]: room: Optional[Room] = self.room() if room: return room talker: Contact = self.talker() return talker def text(self) -> str: if self.payload.text: return self.payload.text return '' async def to_recalled(self) -> Message: if self.message_type() != MessageType.MESSAGE_TYPE_RECALLED: raise WechatyOperationError( 'Can not call toRecalled() on message which is not' ' recalled type.') origin_message_id = self.text() if origin_message_id is None: raise WechatyPayloadError('Can not find recalled message') log.info('get recall message <%s>', origin_message_id) try: message = self.wechaty.Message.load(origin_message_id) await message.ready() return message except Exception as exception: error_info = 'can"t load or ready message payload {}'.format( str(exception.args) ) log.error(error_info) raise WechatyOperationError(error_info) async def recall(self) -> bool: log.info('Message recall') success = await self.puppet.message_recall(self.message_id) return success @classmethod def load(cls, message_id: str) -> Message: return cls(message_id) def type(self) -> MessageType: return self.payload.type def is_self(self) -> bool: user_id = self.wechaty.contact_id talker = self.talker() if talker is None: return False return talker.contact_id == user_id async def mention_list(self) -> List[Contact]: log.info('Message mention_list') room = self.room() if self.type() != MessageType.MESSAGE_TYPE_TEXT or room is None: return [] if self.payload is not None and self.payload.mention_ids is not None: async def id_to_contact(contact_id: str) -> Contact: contact = self.wechaty.Contact.load(contact_id) await contact.ready() return contact contacts = [ await id_to_contact(contact_id) for contact_id in self.payload.mention_ids] return contacts return [] async def mention_text(self) -> str: text = self.text() room = self.room() mention_list = await self.mention_list() if room is None or len(mention_list) <= 0: return text async def get_alias_or_name(member: Contact) -> str: if room is not None: alias = await room.alias(member) if alias: return alias return member.name mention_names = [ await get_alias_or_name(member) for member in mention_list] while len(mention_names) > 0: escaped_cur = mention_names.pop() pattern = re.compile(f'@{escaped_cur}(\u2005|\u0020|$)') text = re.sub(pattern, '', text) return text async def mention_self(self) -> bool: self_id = self.wechaty.contact_id await self.ready() if self.payload is None or self.payload.mention_ids is None: return False return self_id in self.payload.mention_ids async def ready(self) -> None: log.debug('Message ready <%s>', self) if self.is_ready(): return self.payload = await self.puppet.message_payload(self.message_id) if self.payload.from_id.strip() != '': talker = self.wechaty.Contact.load(self.payload.from_id) await talker.ready() if self.payload.room_id.strip() != '': room = self.wechaty.Room.load(self.payload.room_id) await room.ready() if self.payload.to_id.strip() != '': to_contact = self.wechaty.Contact.load(self.payload.to_id) await to_contact.ready() async def forward(self, to: Union[Room, Contact]) -> None: log.info('forward() <%s>', to) if to is None: raise WechatyPayloadError('to param not found') try: if isinstance(to, Room): to_id = to.room_id elif isinstance(to, Contact): to_id = to.contact_id else: raise WechatyPayloadError( 'expected type is <Room, Contact>, but get <%s>' % to.__class__) print(to_id) await self.puppet.message_forward(to_id, self.message_id) except Exception as exception: message = 'Message forward error <%s>' % exception.args log.error(message) raise WechatyOperationError(message) def date(self) -> datetime: if self.payload.timestamp > 2145888000: time = datetime.fromtimestamp(self.payload.timestamp / 1000) else: time = datetime.fromtimestamp(self.payload.timestamp) return timestamp_to_date(self.payload.timestamp) def age(self) -> int: return (datetime.now() - self.date()).seconds // 1000 async def to_file_box(self) -> FileBox: log.info('Message to FileBox') if self.type() not in SUPPORTED_MESSAGE_FILE_TYPES: raise WechatyOperationError( f'this type <{self.type().name}> message can"t be converted to ' f'FileBox' ) msg_type: MessageType = self.type() if msg_type == MessageType.MESSAGE_TYPE_IMAGE: file_box = await self.puppet.message_image(self.message_id) else: file_box = await self.puppet.message_file(self.message_id) return file_box def to_image(self) -> Image: log.info('Message to Image() for message %s', self.message_id) if self.type() != MessageType.MESSAGE_TYPE_IMAGE: raise WechatyOperationError( 'current message type: %s, not image type' % self.type() ) return self.wechaty.Image.create(self.message_id) async def to_contact(self) -> Contact: log.info('Message to Contact') if self.type() != MessageType.MESSAGE_TYPE_CONTACT: raise WechatyOperationError( 'current message type: %s, not contact type' % self.type() ) contact_id = await self.puppet.message_contact(self.message_id) contact = self.wechaty.Contact.load(contact_id) await contact.ready() return contact
Apache License 2.0
zsims/dic
dic/rel.py
Relationship.resolve
python
def resolve(self, container): pass
Called when the relationship is resolved. E.g. about to be injected.
https://github.com/zsims/dic/blob/bb4e615c236e6cfe804bd7286a5af081007325ce/dic/rel.py#L7-L11
import abc import threading class Relationship(metaclass=abc.ABCMeta): @abc.abstractmethod
BSD 2-Clause Simplified License
checkpointsw/cp_mgmt_api_python_sdk
examples_python3/clone_host.py
set_threat_rule
python
def set_threat_rule(api_client, obj, new_host_uid): command = "set-threat-rule" payload = {"uid": obj["rule"]["uid"], "layer": obj["layer"]["uid"]} if obj["rule"]["type"] == "threat-exception": command = "set-threat-exception" payload = {"uid": obj["rule"]["uid"], "exception-group-uid": obj["layer"]["uid"]} if "package" in obj: log("\t\tRule number {} in policy {} (layer: {})".format(obj["position"], obj["package"]["name"], obj["layer"]["name"])) else: log("\t\tRule number {} (layer: {})".format(obj["position"], obj["layer"]["name"])) need_to_set_rule = False for column in obj["rule-columns"]: if column == "source": payload.update({"source": {"add": new_host_uid}}) need_to_set_rule = True if column == "destination": payload.update({"destination": {"add": new_host_uid}}) need_to_set_rule = True if column == "scope": payload.update({"protected-scope": {"add": new_host_uid}}) need_to_set_rule = True if need_to_set_rule: res = api_client.api_call(command, payload) if res.success is False: discard_write_to_log_file(api_client, "Adding new host to threat rule failed." " Error:\n{}\nAborting all changes.".format(res.error_message)) return False return True
This method sets the given threat rule according to the places where the original host appears :param api_client: Api client of the domain :param obj: the access rule object :param new_host_uid: new host uid :return: True on success, otherwise False
https://github.com/checkpointsw/cp_mgmt_api_python_sdk/blob/7294417a702a06ef910ca012d9b347a5908b599a/examples_python3/clone_host.py#L156-L198
from __future__ import print_function import argparse import getpass import sys, os sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from cpapi import APIClient, APIClientArgs global_domain_cloned_host_uid = None log_file = "" def create_host(api_client, orig_host_name, orig_host_uid, cloned_host_name, cloned_host_ip): log("\n\tGathering information for host {}".format(orig_host_name)) res = api_client.api_call("show-host", {"uid": orig_host_uid}) if res.success is False: discard_write_to_log_file(api_client, "Failed to open existing host:\n{}\nAborting.".format(res.error_message)) return None color = res.data["color"] comments = res.data["comments"] log("\n\tCreating a new host {}".format(cloned_host_name)) res = api_client.api_call("add-host", {"name": cloned_host_name, "ip-address": cloned_host_ip, "color": color, "comments": comments}) if res.success is False: discard_write_to_log_file(api_client, "Failed to create the new host:\n{}\nAborting.".format(res.error_message)) return None return res.data["uid"] def find_host_uid_if_exist(api_client, cloned_host_name, cloned_host_ip): res = api_client.api_call("show-host", {"name": cloned_host_name}) if res.success is False: if "code" in res.data and "generic_err_object_not_found" == res.data["code"]: return True else: discard_write_to_log_file(api_client, "Operation failed:\n{}\nAborting all changes.".format(res.error_message)) return False if res.data.get("ipv4-address") == cloned_host_ip or res.data.get("ipv6-address") == cloned_host_ip: log("\n\tThe host with the same name and IP already exists,\n\t" "going to copy it to the same places as the original host") return res.data["uid"] else: discard_write_to_log_file(api_client, "A host with the same name but a different IP address " "already exists, discarding all changes") return False def copy_reference(api_client, new_host_uid, new_host_name, where_used_data, is_global_domain): log("\n\tAdding '" + new_host_name + "' to:") if where_used_data["objects"]: for obj in where_used_data["objects"]: if obj["type"] == "group": if not is_global_domain and obj["domain"]["domain-type"] == "global domain": continue log("\t\tGroup: " + obj["name"]) res = api_client.api_call("set-group", {"name": obj["name"], "members": {"add": new_host_uid}}) if res.success is False: discard_write_to_log_file(api_client, "Adding the new host to the group failed. Error:\n" "{}\nAborting all changes.".format(res.error_message)) return False if where_used_data["access-control-rules"]: for obj in where_used_data["access-control-rules"]: if not is_global_domain and obj["rule"]["domain"]["domain-type"] == "global domain": continue if set_access_rule(api_client, obj, new_host_uid) is False: return False if where_used_data["nat-rules"]: for obj in where_used_data["nat-rules"]: if set_nat_rule(api_client, obj, new_host_uid) is False: return False if where_used_data["threat-prevention-rules"]: for obj in where_used_data["threat-prevention-rules"]: if not is_global_domain and obj["rule"]["domain"]["domain-type"] == "global domain": continue if set_threat_rule(api_client, obj, new_host_uid) is False: return False res = api_client.api_call("publish", {}) if res.success is False: discard_write_to_log_file(api_client, "Publish failed. Error:\n{}\nAborting all changes.".format(res.error_message)) return False return True
Apache License 2.0
kintyre/ksconf
ksconf/conf/parser.py
parse_conf
python
def parse_conf(stream, profile=PARSECONF_MID, encoding=None): try: if hasattr(stream, "read"): return parse_conf_stream(stream, **profile) else: if not encoding: encoding = detect_by_bom(stream) with open(stream, "r", encoding=encoding) as stream: return parse_conf_stream(stream, **profile) except UnicodeDecodeError as e: raise ConfParserException("Encoding error encountered: {}".format(e))
Parse a .conf file. This is a wrapper around :func:`parse_conf_stream` that allows filenames or stream to be passed in. :param stream: the path to a configuration file or open file-like object to be parsed :type stream: str, file :param profile: parsing configuration settings :param encoding: Defaults to the system default, (Often "utf-8") :return: a mapping of the stanza and attributes. The resulting output is accessible as [stanaza][attribute] -> value :rtype: dict
https://github.com/kintyre/ksconf/blob/eeafa5663aeff2585a6c3d40e7b72cdbbab9f211/ksconf/conf/parser.py#L222-L246
from __future__ import absolute_import, unicode_literals import codecs import os import re from io import StringIO, open import ksconf.ext.six as six from ..consts import SMART_CREATE, SMART_NOCHANGE, SMART_UPDATE from ..util.compare import fileobj_compare default_encoding = "utf-8" class Token(object): def __deepcopy__(self, memo): memo[id(self)] = self return self def __lt__(self, other): return isinstance(other, six.text_type) def __gt__(self, other): return not isinstance(other, six.text_type) DUP_OVERWRITE = "overwrite" DUP_MERGE = "merge" GLOBAL_STANZA = Token() DUP_EXCEPTION = "exception" PARSECONF_MID = dict( keep_comments=True, dup_stanza=DUP_EXCEPTION, dup_key=DUP_OVERWRITE, strict=True) PARSECONF_MID_NC = dict( keep_comments=False, dup_stanza=DUP_EXCEPTION, dup_key=DUP_OVERWRITE, strict=True) PARSECONF_LOOSE = dict( keep_comments=False, dup_stanza=DUP_MERGE, dup_key=DUP_MERGE, strict=False) PARSECONF_STRICT = dict( keep_comments=True, dup_stanza=DUP_EXCEPTION, dup_key=DUP_EXCEPTION, strict=True) PARSECONF_STRICT_NC = dict( keep_comments=False, dup_stanza=DUP_EXCEPTION, dup_key=DUP_EXCEPTION, strict=True) class ConfParserException(Exception): pass class DuplicateKeyException(ConfParserException): pass class DuplicateStanzaException(ConfParserException): pass def section_reader(stream, section_re=re.compile(r'^[\s\t]*\[(.*)\]\s*$')): buf = [] section = None for line in stream: line = line.rstrip("\r\n") match = section_re.match(line) if match: yield section, buf section = match.group(1) buf = [] else: buf.append(line) if section or buf: yield section, buf def _detect_lite(byte_str): for (enc, boms) in ( ('utf-8-sig', (codecs.BOM_UTF8,)), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE))): if any(byte_str.startswith(bom) for bom in boms): return {"encoding": enc} return {"encoding": default_encoding} def detect_by_bom(path): with open(path, 'rb') as f: raw = f.read(4) encoding = _detect_lite(raw) return encoding["encoding"] def cont_handler(iterable, continue_re=re.compile(r"^(.*)\\$"), breaker="\n"): buf = "" for line in iterable: mo = continue_re.match(line) if mo: buf += mo.group(1) + breaker elif buf: yield buf + line buf = "" else: yield line if buf: yield buf def splitup_kvpairs(lines, comments_re=re.compile(r"^\s*[#;]"), keep_comments=False, strict=False): comment = 0 for entry in lines: if comments_re.search(entry): if keep_comments: comment += 1 yield ("#-%06d" % comment, entry) elif "=" in entry: k, v = entry.split("=", 1) yield k.rstrip(), v.lstrip() elif re.search(r'^\s*\[|\]\s*$', entry): raise ConfParserException("Dangling stanza header: {0}".format(entry)) elif strict and entry.strip(): raise ConfParserException("Unexpected entry: {0!r}".format(entry))
Apache License 2.0
missionpinball/mpf
mpf/devices/dmd.py
Dmd._bcp_receive_dmd_frame
python
async def _bcp_receive_dmd_frame(cls, machine, client, name, rawbytes, **kwargs): del client del kwargs if name not in machine.dmds: raise TypeError("dmd {} not known".format(name)) machine.dmds[name].update(rawbytes)
Update dmd from BCP.
https://github.com/missionpinball/mpf/blob/1eda6ba6892b8f7cc6dedf6cb6472ff92293b8ef/mpf/devices/dmd.py#L43-L51
from functools import partial from mpf.core.machine import MachineController from mpf.core.platform import DmdPlatform from mpf.core.system_wide_device import SystemWideDevice class Dmd(SystemWideDevice): config_section = 'dmds' collection = 'dmds' class_label = 'dmd' __slots__ = ["hw_device", "platform"] @classmethod def device_class_init(cls, machine: MachineController): machine.bcp.interface.register_command_callback("dmd_frame", partial(cls._bcp_receive_dmd_frame, machine)) def __init__(self, machine, name): self.hw_device = None self.platform = None super().__init__(machine, name) async def _initialize(self): await super()._initialize() self.platform = self.machine.get_platform_sections("dmd", self.config['platform']) self.platform.assert_has_feature("dmds") self.hw_device = self.platform.configure_dmd() @classmethod
MIT License
wang502/slack-sql
PyGreSQL-5.0/pgdb.py
Cursor.execute
python
def execute(self, operation, parameters=None): if (parameters and isinstance(parameters, list) and len(parameters) > 1 and all(isinstance(p, tuple) for p in parameters) and all(len(p) == len(parameters[0]) for p in parameters[1:])): return self.executemany(operation, parameters) else: return self.executemany(operation, [parameters])
Prepare and execute a database operation (query or command).
https://github.com/wang502/slack-sql/blob/f6531e8bad65c09be779469c3bebdc89a28eccbf/PyGreSQL-5.0/pgdb.py#L893-L906
from __future__ import print_function, division from _pg import * __version__ = version from datetime import date, time, datetime, timedelta, tzinfo from time import localtime from decimal import Decimal from uuid import UUID as Uuid from math import isnan, isinf from collections import namedtuple from functools import partial from re import compile as regex from json import loads as jsondecode, dumps as jsonencode try: long except NameError: long = int try: unicode except NameError: unicode = str try: basestring except NameError: basestring = (str, bytes) from collections import Iterable apilevel = '2.0' threadsafety = 1 paramstyle = 'pyformat' shortcutmethods = 1 try: from inspect import signature except ImportError: from inspect import getargspec def get_args(func): return getargspec(func).args else: def get_args(func): return list(signature(func).parameters) try: from datetime import timezone except ImportError: class timezone(tzinfo): def __init__(self, offset, name=None): self.offset = offset if not name: minutes = self.offset.days * 1440 + self.offset.seconds // 60 if minutes < 0: hours, minutes = divmod(-minutes, 60) hours = -hours else: hours, minutes = divmod(minutes, 60) name = 'UTC%+03d:%02d' % (hours, minutes) self.name = name def utcoffset(self, dt): return self.offset def tzname(self, dt): return self.name def dst(self, dt): return None timezone.utc = timezone(timedelta(0), 'UTC') _has_timezone = False else: _has_timezone = True _timezones = dict(CET='+0100', EET='+0200', EST='-0500', GMT='+0000', HST='-1000', MET='+0100', MST='-0700', UCT='+0000', UTC='+0000', WET='+0000') def _timezone_as_offset(tz): if tz.startswith(('+', '-')): if len(tz) < 5: return tz + '00' return tz.replace(':', '') return _timezones.get(tz, '+0000') def _get_timezone(tz): tz = _timezone_as_offset(tz) minutes = 60 * int(tz[1:3]) + int(tz[3:5]) if tz[0] == '-': minutes = -minutes return timezone(timedelta(minutes=minutes), tz) def decimal_type(decimal_type=None): global Decimal if decimal_type is not None: Decimal = decimal_type set_typecast('numeric', decimal_type) return Decimal def cast_bool(value): if value: return value[0] in ('t', 'T') def cast_money(value): if value: value = value.replace('(', '-') return Decimal(''.join(c for c in value if c.isdigit() or c in '.-')) def cast_int2vector(value): return [int(v) for v in value.split()] def cast_date(value, connection): if value == '-infinity': return date.min if value == 'infinity': return date.max value = value.split() if value[-1] == 'BC': return date.min value = value[0] if len(value) > 10: return date.max fmt = connection.date_format() return datetime.strptime(value, fmt).date() def cast_time(value): fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' return datetime.strptime(value, fmt).time() _re_timezone = regex('(.*)([+-].*)') def cast_timetz(value): tz = _re_timezone.match(value) if tz: value, tz = tz.groups() else: tz = '+0000' fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' if _has_timezone: value += _timezone_as_offset(tz) fmt += '%z' return datetime.strptime(value, fmt).timetz() return datetime.strptime(value, fmt).timetz().replace( tzinfo=_get_timezone(tz)) def cast_timestamp(value, connection): if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max value = value.split() if value[-1] == 'BC': return datetime.min fmt = connection.date_format() if fmt.endswith('-%Y') and len(value) > 2: value = value[1:5] if len(value[3]) > 4: return datetime.max fmt = ['%d %b' if fmt.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y'] else: if len(value[0]) > 10: return datetime.max fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S'] return datetime.strptime(' '.join(value), ' '.join(fmt)) def cast_timestamptz(value, connection): if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max value = value.split() if value[-1] == 'BC': return datetime.min fmt = connection.date_format() if fmt.endswith('-%Y') and len(value) > 2: value = value[1:] if len(value[3]) > 4: return datetime.max fmt = ['%d %b' if fmt.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y'] value, tz = value[:-1], value[-1] else: if fmt.startswith('%Y-'): tz = _re_timezone.match(value[1]) if tz: value[1], tz = tz.groups() else: tz = '+0000' else: value, tz = value[:-1], value[-1] if len(value[0]) > 10: return datetime.max fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S'] if _has_timezone: value.append(_timezone_as_offset(tz)) fmt.append('%z') return datetime.strptime(' '.join(value), ' '.join(fmt)) return datetime.strptime(' '.join(value), ' '.join(fmt)).replace( tzinfo=_get_timezone(tz)) _re_interval_sql_standard = regex( '(?:([+-])?([0-9]+)-([0-9]+) ?)?' '(?:([+-]?[0-9]+)(?!:) ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres = regex( '(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres_verbose = regex( '@ ?(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-]?[0-9]+) ?hours? ?)?' '(?:([+-]?[0-9]+) ?mins? ?)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))? ?secs?)? ?(ago)?') _re_interval_iso_8601 = regex( 'P(?:([+-]?[0-9]+)Y)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-]?[0-9]+)D)?' '(?:T(?:([+-]?[0-9]+)H)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))?S)?)?') def cast_interval(value): m = _re_interval_iso_8601.match(value) if m: m = [d or '0' for d in m.groups()] secs_ago = m.pop(5) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if secs_ago: secs = -secs usecs = -usecs else: m = _re_interval_postgres_verbose.match(value) if m: m, ago = [d or '0' for d in m.groups()[:8]], m.group(9) secs_ago = m.pop(5) == '-' m = [-int(d) for d in m] if ago else [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if secs_ago: secs = - secs usecs = -usecs else: m = _re_interval_postgres.match(value) if m and any(m.groups()): m = [d or '0' for d in m.groups()] hours_ago = m.pop(3) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: m = _re_interval_sql_standard.match(value) if m and any(m.groups()): m = [d or '0' for d in m.groups()] years_ago = m.pop(0) == '-' hours_ago = m.pop(3) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if years_ago: years = -years mons = -mons if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: raise ValueError('Cannot parse interval: %s' % value) days += 365 * years + 30 * mons return timedelta(days=days, hours=hours, minutes=mins, seconds=secs, microseconds=usecs) class Typecasts(dict): defaults = {'char': str, 'bpchar': str, 'name': str, 'text': str, 'varchar': str, 'bool': cast_bool, 'bytea': unescape_bytea, 'int2': int, 'int4': int, 'serial': int, 'int8': long, 'oid': int, 'hstore': cast_hstore, 'json': jsondecode, 'jsonb': jsondecode, 'float4': float, 'float8': float, 'numeric': Decimal, 'money': cast_money, 'date': cast_date, 'interval': cast_interval, 'time': cast_time, 'timetz': cast_timetz, 'timestamp': cast_timestamp, 'timestamptz': cast_timestamptz, 'int2vector': cast_int2vector, 'uuid': Uuid, 'anyarray': cast_array, 'record': cast_record} connection = None def __missing__(self, typ): if not isinstance(typ, str): raise TypeError('Invalid type: %s' % typ) cast = self.defaults.get(typ) if cast: cast = self._add_connection(cast) self[typ] = cast elif typ.startswith('_'): base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: self[typ] = cast return cast @staticmethod def _needs_connection(func): try: args = get_args(func) except (TypeError, ValueError): return False else: return 'connection' in args[1:] def _add_connection(self, cast): if not self.connection or not self._needs_connection(cast): return cast return partial(cast, connection=self.connection) def get(self, typ, default=None): return self[typ] or default def set(self, typ, cast): if isinstance(typ, basestring): typ = [typ] if cast is None: for t in typ: self.pop(t, None) self.pop('_%s' % t, None) else: if not callable(cast): raise TypeError("Cast parameter must be callable") for t in typ: self[t] = self._add_connection(cast) self.pop('_%s' % t, None) def reset(self, typ=None): defaults = self.defaults if typ is None: self.clear() self.update(defaults) else: if isinstance(typ, basestring): typ = [typ] for t in typ: cast = defaults.get(t) if cast: self[t] = self._add_connection(cast) t = '_%s' % t cast = defaults.get(t) if cast: self[t] = self._add_connection(cast) else: self.pop(t, None) else: self.pop(t, None) self.pop('_%s' % t, None) def create_array_cast(self, basecast): def cast(v): return cast_array(v, basecast) return cast def create_record_cast(self, name, fields, casts): record = namedtuple(name, fields) def cast(v): return record(*cast_record(v, casts)) return cast _typecasts = Typecasts() def get_typecast(typ): return _typecasts.get(typ) def set_typecast(typ, cast): _typecasts.set(typ, cast) def reset_typecast(typ=None): _typecasts.reset(typ) class LocalTypecasts(Typecasts): defaults = _typecasts connection = None def __missing__(self, typ): if typ.startswith('_'): base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: self[typ] = cast else: cast = self.defaults.get(typ) if cast: cast = self._add_connection(cast) self[typ] = cast else: fields = self.get_fields(typ) if fields: casts = [self[field.type] for field in fields] fields = [field.name for field in fields] cast = self.create_record_cast(typ, fields, casts) self[typ] = cast return cast def get_fields(self, typ): return [] class TypeCode(str): @classmethod def create(cls, oid, name, len, type, category, delim, relid): self = cls(name) self.oid = oid self.len = len self.type = type self.category = category self.delim = delim self.relid = relid return self FieldInfo = namedtuple('FieldInfo', ['name', 'type']) class TypeCache(dict): def __init__(self, cnx): super(TypeCache, self).__init__() self._escape_string = cnx.escape_string self._src = cnx.source() self._typecasts = LocalTypecasts() self._typecasts.get_fields = self.get_fields self._typecasts.connection = cnx def __missing__(self, key): if isinstance(key, int): oid = key else: if '.' not in key and '"' not in key: key = '"%s"' % key oid = "'%s'::regtype" % self._escape_string(key) try: self._src.execute("SELECT oid, typname," " typlen, typtype, typcategory, typdelim, typrelid" " FROM pg_type WHERE oid=%s" % oid) except ProgrammingError: res = None else: res = self._src.fetch(1) if not res: raise KeyError('Type %s could not be found' % key) res = res[0] type_code = TypeCode.create(int(res[0]), res[1], int(res[2]), res[3], res[4], res[5], int(res[6])) self[type_code.oid] = self[str(type_code)] = type_code return type_code def get(self, key, default=None): try: return self[key] except KeyError: return default def get_fields(self, typ): if not isinstance(typ, TypeCode): typ = self.get(typ) if not typ: return None if not typ.relid: return None self._src.execute("SELECT attname, atttypid" " FROM pg_attribute WHERE attrelid=%s AND attnum>0" " AND NOT attisdropped ORDER BY attnum" % typ.relid) return [FieldInfo(name, self.get(int(oid))) for name, oid in self._src.fetch(-1)] def get_typecast(self, typ): return self._typecasts.get(typ) def set_typecast(self, typ, cast): self._typecasts.set(typ, cast) def reset_typecast(self, typ=None): self._typecasts.reset(typ) def typecast(self, value, typ): if value is None: return None cast = self.get_typecast(typ) if not cast or cast is str: return value return cast(value) class _quotedict(dict): def __getitem__(self, key): return self.quote(super(_quotedict, self).__getitem__(key)) def _db_error(msg, cls=DatabaseError): error = cls(msg) error.sqlstate = None return error def _op_error(msg): return _db_error(msg, OperationalError) class Cursor(object): def __init__(self, dbcnx): self.connection = self._dbcnx = dbcnx self._cnx = dbcnx._cnx self.type_cache = dbcnx.type_cache self._src = self._cnx.source() self._description = None if self.row_factory is Cursor.row_factory: self.row_factory = None else: self.build_row_factory = None self.rowcount = -1 self.arraysize = 1 self.lastrowid = None def __iter__(self): return self def __enter__(self): return self def __exit__(self, et, ev, tb): self.close() def _quote(self, value): if value is None: return 'NULL' if isinstance(value, (Hstore, Json)): value = str(value) if isinstance(value, basestring): if isinstance(value, Binary): value = self._cnx.escape_bytea(value) if bytes is not str: value = value.decode('ascii') else: value = self._cnx.escape_string(value) return "'%s'" % value if isinstance(value, float): if isinf(value): return "'-Infinity'" if value < 0 else "'Infinity'" if isnan(value): return "'NaN'" return value if isinstance(value, (int, long, Decimal, Literal)): return value if isinstance(value, datetime): if value.tzinfo: return "'%s'::timestamptz" % value return "'%s'::timestamp" % value if isinstance(value, date): return "'%s'::date" % value if isinstance(value, time): if value.tzinfo: return "'%s'::timetz" % value return "'%s'::time" % value if isinstance(value, timedelta): return "'%s'::interval" % value if isinstance(value, Uuid): return "'%s'::uuid" % value if isinstance(value, list): if not value: return "'{}'" q = self._quote try: return 'ARRAY[%s]' % ','.join(str(q(v)) for v in value) except UnicodeEncodeError: return u'ARRAY[%s]' % ','.join(unicode(q(v)) for v in value) if isinstance(value, tuple): q = self._quote try: return '(%s)' % ','.join(str(q(v)) for v in value) except UnicodeEncodeError: return u'(%s)' % ','.join(unicode(q(v)) for v in value) try: value = value.__pg_repr__() except AttributeError: raise InterfaceError( 'Do not know how to adapt type %s' % type(value)) if isinstance(value, (tuple, list)): value = self._quote(value) return value def _quoteparams(self, string, parameters): if not parameters: try: return string % () except (TypeError, ValueError): return string if isinstance(parameters, dict): parameters = _quotedict(parameters) parameters.quote = self._quote else: parameters = tuple(map(self._quote, parameters)) return string % parameters def _make_description(self, info): name, typ, size, mod = info[1:] type_code = self.type_cache[typ] if mod > 0: mod -= 4 if type_code == 'numeric': precision, scale = mod >> 16, mod & 0xffff size = precision else: if not size: size = type_code.size if size == -1: size = mod precision = scale = None return CursorDescription(name, type_code, None, size, precision, scale, None) @property def description(self): descr = self._description if self._description is True: make = self._make_description descr = [make(info) for info in self._src.listinfo()] self._description = descr return descr @property def colnames(self): return [d[0] for d in self.description] @property def coltypes(self): return [d[1] for d in self.description] def close(self): self._src.close()
MIT License
iduta/iresnet
models/iresnet.py
iresnet18
python
def iresnet18(pretrained=False, **kwargs): model = iResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: os.makedirs(default_cache_path, exist_ok=True) model.load_state_dict(torch.load(download_from_url(model_urls['iresnet18'], root=default_cache_path))) return model
Constructs a iResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
https://github.com/iduta/iresnet/blob/babdc4f5946f64905710cd64a5bd6c164a805c9e/models/iresnet.py#L270-L281
import torch import torch.nn as nn import os from div.download_from_url import download_from_url try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join( os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))) default_cache_path = os.path.join(torch_cache_home, 'pretrained') __all__ = ['iResNet', 'iresnet18', 'iresnet34', 'iresnet50', 'iresnet101', 'iresnet152', 'iresnet200', 'iresnet302', 'iresnet404', 'iresnet1001'] model_urls = { 'iresnet18': 'Trained model not available yet!!', 'iresnet34': 'Trained model not available yet!!', 'iresnet50': 'https://drive.google.com/uc?export=download&id=1Waw3ob8KPXCY9iCLdAD6RUA0nvVguc6K', 'iresnet101': 'https://drive.google.com/uc?export=download&id=1cZ4XhwZfUOm_o0WZvenknHIqgeqkY34y', 'iresnet152': 'https://drive.google.com/uc?export=download&id=10heFLYX7VNlaSrDy4SZbdOOV9xwzwyli', 'iresnet200': 'https://drive.google.com/uc?export=download&id=1Ao-f--jNU7MYPqSW8UMonXVrq3mkLRpW', 'iresnet302': 'https://drive.google.com/uc?export=download&id=1UcyvLhLzORJZBUQDNJdsx3USCloXZT6V', 'iresnet404': 'https://drive.google.com/uc?export=download&id=1hEOHErsD6AF1b3qQi56mgxvYDneTvMIq', 'iresnet1001': 'Trained model not available yet!!', } def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None, start_block=False, end_block=False, exclude_bn0=False): super(BasicBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if not start_block and not exclude_bn0: self.bn0 = norm_layer(inplanes) self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) if start_block: self.bn2 = norm_layer(planes) if end_block: self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride self.start_block = start_block self.end_block = end_block self.exclude_bn0 = exclude_bn0 def forward(self, x): identity = x if self.start_block: out = self.conv1(x) elif self.exclude_bn0: out = self.relu(x) out = self.conv1(out) else: out = self.bn0(x) out = self.relu(out) out = self.conv1(out) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) if self.start_block: out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity if self.end_block: out = self.bn2(out) out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None, start_block=False, end_block=False, exclude_bn0=False): super(Bottleneck, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if not start_block and not exclude_bn0: self.bn0 = norm_layer(inplanes) self.conv1 = conv1x1(inplanes, planes) self.bn1 = norm_layer(planes) self.conv2 = conv3x3(planes, planes, stride) self.bn2 = norm_layer(planes) self.conv3 = conv1x1(planes, planes * self.expansion) if start_block: self.bn3 = norm_layer(planes * self.expansion) if end_block: self.bn3 = norm_layer(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.start_block = start_block self.end_block = end_block self.exclude_bn0 = exclude_bn0 def forward(self, x): identity = x if self.start_block: out = self.conv1(x) elif self.exclude_bn0: out = self.relu(x) out = self.conv1(out) else: out = self.bn0(x) out = self.relu(out) out = self.conv1(out) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) if self.start_block: out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity if self.end_block: out = self.bn3(out) out = self.relu(out) return out class iResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=None, dropout_prob0=0.0): super(iResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(64) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 64, layers[0], stride=2, norm_layer=norm_layer) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) if dropout_prob0 > 0.0: self.dp = nn.Dropout(dropout_prob0, inplace=True) print("Using Dropout with the prob to set to 0 of: ", dropout_prob0) else: self.dp = None self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None): if norm_layer is None: norm_layer = nn.BatchNorm2d downsample = None if stride != 1 and self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.MaxPool2d(kernel_size=3, stride=stride, padding=1), conv1x1(self.inplanes, planes * block.expansion), norm_layer(planes * block.expansion), ) elif self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion), norm_layer(planes * block.expansion), ) elif stride != 1: downsample = nn.MaxPool2d(kernel_size=3, stride=stride, padding=1) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, norm_layer, start_block=True)) self.inplanes = planes * block.expansion exclude_bn0 = True for _ in range(1, (blocks-1)): layers.append(block(self.inplanes, planes, norm_layer=norm_layer, exclude_bn0=exclude_bn0)) exclude_bn0 = False layers.append(block(self.inplanes, planes, norm_layer=norm_layer, end_block=True, exclude_bn0=exclude_bn0)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) if self.dp is not None: x = self.dp(x) x = self.fc(x) return x
MIT License
zewenshen/uoft-profs
src/course_selection/selection_utils.py
_is_not_valid
python
def _is_not_valid(separated_schedule): schedule = {"MONDAY": [False for i in range(48)], "TUESDAY": [False for i in range(48)], "WEDNESDAY": [False for i in range(48)], "THURSDAY": [False for i in range(48)], "FRIDAY": [False for i in range(48)] } for item in separated_schedule: item_day = item[TIME_INDEX] for day in DAY: for time_tuple in item_day[day]: for i in range(time_tuple[0], time_tuple[1]): if schedule[day][i] is True: return True else: schedule[day][i] = True return False
Return false if the combination we get conflicts.
https://github.com/zewenshen/uoft-profs/blob/424a38cf86a30122fb2fb194989d8c4bdc5db32b/src/course_selection/selection_utils.py#L82-L101
import sys sys.path.append("../util") import Database import itertools PATH = "../../database.info" DB_NAME = "uoftcourses" DAY = ("MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY") LEC_NUM_INDEX = 1 TIME_INDEX = 2 def get_and_filter_course_data(cursor, cID, term): data_processed = get_processed_course_data(cursor, cID, term) comb = get_combination_of_one_course(data_processed) _filter_combination(comb) return comb def __get_course_data(cursor, cID, term): sql = "SELECT cID, lecNum, lecTime from Course where cID like %s and term like %s" if cID[-2] == 'Y': term = 'Fall' cursor.execute(sql, (cID, "%{}%".format(term))) return list(map(list, list(cursor.fetchall()))) def _process_raw_course_data(raw_course_data_list): for item in raw_course_data_list: item[2] = process_times(item[2]) def get_processed_course_data(cursor, cID, term): result = __get_course_data(cursor, cID, term) _process_raw_course_data(result) return result def get_combination_of_one_course(course_info): lec_list = [item for item in course_info if 'Lec' in item[LEC_NUM_INDEX]] tut_list = [item for item in course_info if 'Tut' in item[LEC_NUM_INDEX]] pra_list = [item for item in course_info if 'Pra' in item[LEC_NUM_INDEX]] if lec_list != []: comb = lec_list if list(itertools.product(comb, tut_list)) != []: comb = list(itertools.product(comb, tut_list)) if list(itertools.product(comb, pra_list)) != []: comb = list(itertools.product(comb, pra_list)) elif list(itertools.product(comb, pra_list)) != []: comb = list(itertools.product(comb, pra_list)) else: comb = [[item] for item in lec_list] elif tut_list != []: comb = tut_list if list(itertools.product(comb, pra_list)) != []: comb = list(itertools.product(comb, pra_list)) else: comb = [[item] for item in tut_list] else: comb = [[item] for item in pra_list] return comb def _filter_combination(comb): for separated_schedule in comb: if len(separated_schedule) != 1 and _is_not_valid(separated_schedule): comb.remove(separated_schedule)
Apache License 2.0
hersonls/django-queryset-join
src/querysetjoin/__init__.py
QuerySetJoin.all
python
def all(self): return self._all()
Return all itens from the junction of QuerySets
https://github.com/hersonls/django-queryset-join/blob/d702a8337ab5e84209ce44967ec99dfbca3f2ab2/src/querysetjoin/__init__.py#L66-L69
import re from operator import attrgetter from itertools import islice, chain from django.core.exceptions import FieldError ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$') REPR_OUTPUT_SIZE= 2 class QuerySetJoin(object): def __init__(self, *querysets): self.querysets = querysets self.order = {} def __repr__(self): data = list(self[:REPR_OUTPUT_SIZE + 1]) if len(data) > REPR_OUTPUT_SIZE: data[-1] = "...(remaining elements truncated)..." return repr(data) def __getitem__(self, ndx): if type(ndx) is slice: return list(islice(self._all(), ndx.start, ndx.stop, ndx.step or 1)) else: return islice(self._all(), ndx, ndx+1).next() def _all(self): return chain(*self.querysets) def _clone(self): return self.__class__(*self.querysets) def count(self): return sum(qs.count() for qs in self.querysets) def order_by(self, field): reverse = False if ORDER_PATTERN.match(field): if field[:1] == "-": reverse = True field = field[1:] else: raise FieldError("The pattern is not correctly") self.order = {"reverse": reverse, "key": attrgetter(field)} return sorted(self._all(), **self.order)
BSD 3-Clause New or Revised License
taxjar/taxjar-python
taxjar/client.py
Client.rates_for_location
python
def rates_for_location(self, postal_code, location_deets=None): request = self._get("rates/" + postal_code, location_deets) return self.responder(request)
Shows the sales tax rates for a given location.
https://github.com/taxjar/taxjar-python/blob/a2c365e9a267f8dcff9b07c291ccfb95074af5c5/taxjar/client.py#L36-L39
import platform import ssl import string import sys import requests import taxjar from taxjar.response import TaxJarResponse from taxjar.exceptions import TaxJarConnectionError class Client(object): def __init__(self, api_key, api_url="", options=None, responder=TaxJarResponse.from_request): if options is None: options = {} self.api_key = api_key self.api_url = api_url if api_url else taxjar.DEFAULT_API_URL self.api_url += "/" + taxjar.API_VERSION + "/" self.headers = options.get('headers', {}) self.timeout = options.get('timeout', 5) self.responder = responder self.session = requests.Session() def set_api_config(self, key, value): if key == 'api_url': value += "/" + taxjar.API_VERSION + "/" setattr(self, key, value) def get_api_config(self, key): return getattr(self, key) def categories(self): request = self._get('categories') return self.responder(request)
MIT License
chxy95/deep-mutual-learning
trainer.py
Trainer.validate
python
def validate(self, epoch): losses = [] accs = [] for i in range(self.model_num): self.models[i].eval() losses.append(AverageMeter()) accs.append(AverageMeter()) for i, (images, labels) in enumerate(self.valid_loader): if self.use_gpu: images, labels = images.cuda(), labels.cuda() images, labels = Variable(images), Variable(labels) outputs=[] for model in self.models: outputs.append(model(images)) for i in range(self.model_num): ce_loss = self.loss_ce(outputs[i], labels) kl_loss = 0 for j in range(self.model_num): if i!=j: kl_loss += self.loss_kl(F.log_softmax(outputs[i], dim = 1), F.softmax(Variable(outputs[j]), dim=1)) loss = ce_loss + kl_loss / (self.model_num - 1) prec = accuracy(outputs[i].data, labels.data, topk=(1,))[0] losses[i].update(loss.item(), images.size()[0]) accs[i].update(prec.item(), images.size()[0]) if self.use_tensorboard: for i in range(self.model_num): log_value('valid_loss_%d' % (i+1), losses[i].avg, epoch+1) log_value('valid_acc_%d' % (i+1), accs[i].avg, epoch+1) return losses, accs
Evaluate the model on the validation set.
https://github.com/chxy95/deep-mutual-learning/blob/650e2767684ec481841581b2ff74bfd88fce1d12/trainer.py#L244-L284
import torch import torchvision import torch.nn as nn from torch.autograd import Variable import torch.optim as optim from torch.optim.lr_scheduler import ReduceLROnPlateau import torch.nn.functional as F import os import time import shutil from tqdm import tqdm from utils import accuracy, AverageMeter from resnet import resnet32 from tensorboard_logger import configure, log_value class Trainer(object): def __init__(self, config, data_loader): self.config = config if config.is_train: self.train_loader = data_loader[0] self.valid_loader = data_loader[1] self.num_train = len(self.train_loader.dataset) self.num_valid = len(self.valid_loader.dataset) else: self.test_loader = data_loader self.num_test = len(self.test_loader.dataset) self.num_classes = config.num_classes self.epochs = config.epochs self.start_epoch = 0 self.momentum = config.momentum self.lr = config.init_lr self.weight_decay = config.weight_decay self.nesterov = config.nesterov self.gamma = config.gamma self.use_gpu = config.use_gpu self.best = config.best self.ckpt_dir = config.ckpt_dir self.logs_dir = config.logs_dir self.counter = 0 self.lr_patience = config.lr_patience self.train_patience = config.train_patience self.use_tensorboard = config.use_tensorboard self.resume = config.resume self.print_freq = config.print_freq self.model_name = config.save_name self.model_num = config.model_num self.models = [] self.optimizers = [] self.schedulers = [] self.loss_kl = nn.KLDivLoss(reduction='batchmean') self.loss_ce = nn.CrossEntropyLoss() self.best_valid_accs = [0.] * self.model_num if self.use_tensorboard: tensorboard_dir = self.logs_dir + self.model_name print('[*] Saving tensorboard logs to {}'.format(tensorboard_dir)) if not os.path.exists(tensorboard_dir): os.makedirs(tensorboard_dir) configure(tensorboard_dir) for i in range(self.model_num): model = resnet32() if self.use_gpu: model.cuda() self.models.append(model) optimizer = optim.SGD(model.parameters(), lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay, nesterov=self.nesterov) self.optimizers.append(optimizer) scheduler = optim.lr_scheduler.StepLR(self.optimizers[i], step_size=60, gamma=self.gamma, last_epoch=-1) self.schedulers.append(scheduler) print('[*] Number of parameters of one model: {:,}'.format( sum([p.data.nelement() for p in self.models[0].parameters()]))) def train(self): if self.resume: self.load_checkpoint(best=False) print("\n[*] Train on {} samples, validate on {} samples".format( self.num_train, self.num_valid) ) for epoch in range(self.start_epoch, self.epochs): for scheduler in self.schedulers: scheduler.step(epoch) print( '\nEpoch: {}/{} - LR: {:.6f}'.format( epoch+1, self.epochs, self.optimizers[0].param_groups[0]['lr'],) ) train_losses, train_accs = self.train_one_epoch(epoch) valid_losses, valid_accs = self.validate(epoch) for i in range(self.model_num): is_best = valid_accs[i].avg> self.best_valid_accs[i] msg1 = "model_{:d}: train loss: {:.3f} - train acc: {:.3f} " msg2 = "- val loss: {:.3f} - val acc: {:.3f}" if is_best: msg2 += " [*]" msg = msg1 + msg2 print(msg.format(i+1, train_losses[i].avg, train_accs[i].avg, valid_losses[i].avg, valid_accs[i].avg)) self.best_valid_accs[i] = max(valid_accs[i].avg, self.best_valid_accs[i]) self.save_checkpoint(i, {'epoch': epoch + 1, 'model_state': self.models[i].state_dict(), 'optim_state': self.optimizers[i].state_dict(), 'best_valid_acc': self.best_valid_accs[i], }, is_best ) def train_one_epoch(self, epoch): batch_time = AverageMeter() losses = [] accs = [] for i in range(self.model_num): self.models[i].train() losses.append(AverageMeter()) accs.append(AverageMeter()) tic = time.time() with tqdm(total=self.num_train) as pbar: for i, (images, labels) in enumerate(self.train_loader): if self.use_gpu: images, labels = images.cuda(), labels.cuda() images, labels = Variable(images), Variable(labels) outputs=[] for model in self.models: outputs.append(model(images)) for i in range(self.model_num): ce_loss = self.loss_ce(outputs[i], labels) kl_loss = 0 for j in range(self.model_num): if i!=j: kl_loss += self.loss_kl(F.log_softmax(outputs[i], dim = 1), F.softmax(Variable(outputs[j]), dim=1)) loss = ce_loss + kl_loss / (self.model_num - 1) prec = accuracy(outputs[i].data, labels.data, topk=(1,))[0] losses[i].update(loss.item(), images.size()[0]) accs[i].update(prec.item(), images.size()[0]) self.optimizers[i].zero_grad() loss.backward() self.optimizers[i].step() toc = time.time() batch_time.update(toc-tic) pbar.set_description( ( "{:.1f}s - model1_loss: {:.3f} - model1_acc: {:.3f}".format( (toc-tic), losses[0].avg, accs[0].avg ) ) ) self.batch_size = images.shape[0] pbar.update(self.batch_size) if self.use_tensorboard: iteration = epoch*len(self.train_loader) + i for i in range(self.model_num): log_value('train_loss_%d' % (i+1), losses[i].avg, iteration) log_value('train_acc_%d' % (i+1), accs[i].avg, iteration) return losses, accs
MIT License
openstack/masakari
masakari/api/urlmap.py
parse_list_header
python
def parse_list_header(value): result = [] for item in request.parse_http_list(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result
Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] :param value: a string with a list header. :return: :class:`list`
https://github.com/openstack/masakari/blob/bb1c2ba6ee1b7cdb2f7cd4b22379303cec80f439/masakari/api/urlmap.py#L49-L70
import re from oslo_log import log as logging import paste.urlmap from urllib import request from masakari.api.openstack import wsgi LOG = logging.getLogger(__name__) _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile( r';\s*([^\s;=]+|%s)\s*' r'(?:=\s*([^;]+|%s))?\s*' % (_quoted_string_re, _quoted_string_re)) def unquote_header_value(value): if value and value[0] == value[-1] == '"': value = value[1:-1] return value
Apache License 2.0
cloud-bulldozer/browbeat
rally/rally-plugins/dynamic-workloads/provider_network.py
DynamicProviderNetworkBase._create_provider_network
python
def _create_provider_network(self, provider_phys_net): project_id = self.context["tenant"]["id"] body = { "name": self.generate_random_name(), "tenant_id": project_id, "provider:network_type": "vlan", "provider:physical_network": provider_phys_net } return self.admin_clients("neutron").create_network({"network": body})
Create neutron provider network. :param provider_phys_net: provider physical network :returns: neutron network dict
https://github.com/cloud-bulldozer/browbeat/blob/96aff26d80b404f447649d9102e93fb8a376881f/rally/rally-plugins/dynamic-workloads/provider_network.py#L25-L38
import random import os import subprocess from rally_openstack.scenarios.neutron import utils as neutron_utils import dynamic_utils from rally.task import atomic class DynamicProviderNetworkBase(dynamic_utils.NovaUtils, neutron_utils.NeutronScenario): @atomic.action_timer("neutron.create_network")
Apache License 2.0
cgatoxford/cgatpipelines
CGATPipelines/PipelineMappingQC.py
buildPicardRnaSeqMetrics
python
def buildPicardRnaSeqMetrics(infiles, strand, outfile): job_memory = PICARD_MEMORY picard_opts = '-Xmx%(job_memory)s -XX:+UseParNewGC -XX:+UseConcMarkSweepGC' % locals() job_threads = 3 infile, genome = infiles if BamTools.getNumReads(infile) == 0: E.warn("no reads in %s - no metrics" % infile) P.touch(outfile) return statement = '''picard %(picard_opts)s CollectRnaSeqMetrics REF_FLAT=%(genome)s INPUT=%(infile)s ASSUME_SORTED=true OUTPUT=%(outfile)s STRAND=%(strand)s VALIDATION_STRINGENCY=SILENT ''' P.run()
run picard:RNASeqMetrics Arguments --------- infiles : string Input filename in :term:`BAM` format. Genome file in refflat format (http://genome.ucsc.edu/goldenPath/gbdDescriptionsOld.html#RefFlat) outfile : string Output filename with picard output.
https://github.com/cgatoxford/cgatpipelines/blob/a34d460b5fc64984f6da0acb18aee43c5e02d5fc/CGATPipelines/PipelineMappingQC.py#L658-L691
import CGAT.Experiment as E import os import re import pandas import CGAT.IOTools as IOTools import CGAT.BamTools as BamTools import CGATPipelines.Pipeline as P PICARD_MEMORY = "9G" def getNumReadsFromReadsFile(infile): with IOTools.openFile(infile) as inf: line = inf.readline() if not line.startswith("nreads"): raise ValueError( "parsing error in file '%s': " "expected first line to start with 'nreads'") nreads = int(line[:-1].split("\t")[1]) return nreads def buildPicardInsertSizeStats(infile, outfile, genome_file): job_memory = PICARD_MEMORY picard_opts = '-Xmx%(job_memory)s -XX:+UseParNewGC -XX:+UseConcMarkSweepGC' % locals() job_threads = 3 if BamTools.getNumReads(infile) == 0: E.warn("no reads in %s - no metrics" % infile) P.touch(outfile) return statement = '''picard %(picard_opts)s CollectInsertSizeMetrics INPUT=%(infile)s REFERENCE_SEQUENCE=%(genome_file)s ASSUME_SORTED=true OUTPUT=%(outfile)s VALIDATION_STRINGENCY=SILENT >& %(outfile)s''' P.run() def buildPicardAlignmentStats(infile, outfile, genome_file): job_memory = PICARD_MEMORY picard_opts = '-Xmx%(job_memory)s -XX:+UseParNewGC -XX:+UseConcMarkSweepGC' % locals() job_threads = 3 if BamTools.getNumReads(infile) == 0: E.warn("no reads in %s - no metrics" % infile) P.touch(outfile) return statement = '''cat %(infile)s | cgat bam2bam -v 0 --method=set-sequence --output-sam | picard %(picard_opts)s CollectMultipleMetrics INPUT=/dev/stdin REFERENCE_SEQUENCE=%(genome_file)s ASSUME_SORTED=true OUTPUT=%(outfile)s VALIDATION_STRINGENCY=SILENT >& %(outfile)s''' P.run() def buildPicardDuplicationStats(infile, outfile): job_memory = PICARD_MEMORY picard_opts = '-Xmx%(job_memory)s -XX:+UseParNewGC -XX:+UseConcMarkSweepGC' % locals() job_threads = 3 if BamTools.getNumReads(infile) == 0: E.warn("no reads in %s - no metrics" % infile) P.touch(outfile) return if ".gsnap.bam" in infile: tmpf = P.getTempFile(".") tmpfile_name = tmpf.name statement = '''samtools view -h %(infile)s | awk "!/\\tXT:/" | samtools view /dev/stdin -S -b > %(tmpfile_name)s; ''' % locals() data_source = tmpfile_name else: statement = "" data_source = infile statement += '''picard %(picard_opts)s MarkDuplicates INPUT=%(data_source)s ASSUME_SORTED=true METRICS_FILE=%(outfile)s OUTPUT=/dev/null VALIDATION_STRINGENCY=SILENT ''' P.run() if ".gsnap.bam" in infile: os.unlink(tmpfile_name) def buildPicardDuplicateStats(infile, outfile): job_memory = PICARD_MEMORY picard_opts = '-Xmx%(job_memory)s -XX:+UseParNewGC -XX:+UseConcMarkSweepGC' % locals() job_threads = 3 if BamTools.getNumReads(infile) == 0: E.warn("no reads in %s - no metrics" % infile) P.touch(outfile) return statement = '''picard %(picard_opts)s MarkDuplicates INPUT=%(infile)s ASSUME_SORTED=true METRICS_FILE=%(outfile)s.duplicate_metrics OUTPUT=%(outfile)s VALIDATION_STRINGENCY=SILENT; ''' statement += '''samtools index %(outfile)s ;''' P.run() def buildPicardCoverageStats(infile, outfile, baits, regions): job_memory = PICARD_MEMORY picard_opts = '-Xmx%(job_memory)s -XX:+UseParNewGC -XX:+UseConcMarkSweepGC' % locals() job_threads = 3 if BamTools.getNumReads(infile) == 0: E.warn("no reads in %s - no metrics" % infile) P.touch(outfile) return statement = '''picard %(picard_opts)s CollectHsMetrics BAIT_INTERVALS=%(baits)s TARGET_INTERVALS=%(regions)s INPUT=%(infile)s OUTPUT=%(outfile)s VALIDATION_STRINGENCY=LENIENT''' % locals() P.run() def buildPicardGCStats(infile, outfile, genome_file): job_memory = PICARD_MEMORY picard_opts = '-Xmx%(job_memory)s -XX:+UseParNewGC -XX:+UseConcMarkSweepGC' % locals() job_threads = 3 if BamTools.getNumReads(infile) == 0: E.warn("no reads in %s - no metrics" % infile) P.touch(outfile) return statement = '''picard %(picard_opts) CollectGcBiasMetrics INPUT=%(infile)s REFERENCE_SEQUENCE=%(genome_file)s OUTPUT=%(outfile)s VALIDATION_STRINGENCY=SILENT CHART_OUTPUT=%(outfile)s.pdf SUMMARY_OUTPUT=%(outfile)s.summary >& %(outfile)s''' P.run() def loadPicardMetrics(infiles, outfile, suffix, pipeline_suffix=".picard_stats", tablename=None): if not tablename: tablename = "%s_%s" % (P.toTable(outfile), suffix) outf = P.getTempFile(".") filenames = ["%s.%s" % (x, suffix) for x in infiles] first = True for filename in filenames: track = P.snip(os.path.basename(filename), "%s.%s" % (pipeline_suffix, suffix)) if not os.path.exists(filename): E.warn("File %s missing" % filename) continue lines = IOTools.openFile(filename, "r").readlines() rx_start = re.compile("## METRICS CLASS") for n, line in enumerate(lines): if rx_start.search(line): lines = lines[n + 1:] break for n, line in enumerate(lines): if not line.strip(): lines = lines[:n] break if len(lines) == 0: E.warn("no lines in %s: %s" % (track, filename)) continue if first: outf.write("%s\t%s" % ("track", lines[0])) fields = lines[0][:-1].split("\t") else: f = lines[0][:-1].split("\t") if f != fields: raise ValueError( "file %s has different fields: expected %s, got %s" % (filename, fields, f)) first = False for i in range(1, len(lines)): outf.write("%s\t%s" % (track, lines[i])) outf.close() P.load(outf.name, outfile, tablename=tablename, options="--add-index=track --allow-empty-file") os.unlink(outf.name) def loadPicardHistogram(infiles, outfile, suffix, column, pipeline_suffix=".picard_stats", tablename=False): if not tablename: tablename = "%s_%s" % (P.toTable(outfile), suffix) tablename = tablename.replace("_metrics", "_histogram") xfiles = [x for x in infiles if os.path.exists("%s.%s" % (x, suffix))] if len(xfiles) == 0: E.warn("no files for %s" % tablename) return header = ",".join([P.snip(os.path.basename(x), pipeline_suffix) for x in xfiles]) filenames = " ".join(["%s.%s" % (x, suffix) for x in xfiles]) load_statement = P.build_load_statement( tablename, options="--add-index=track " " --header-names=%s,%s" " --allow-empty-file" " --replace-header" % (column, header)) statement = """cgat combine_tables --regex-start="## HISTOGRAM" --missing-value=0 --take=2 %(filenames)s | %(load_statement)s >> %(outfile)s """ to_cluster = False P.run() def loadPicardAlignmentStats(infiles, outfile): loadPicardMetrics(infiles, outfile, "alignment_summary_metrics") loadPicardMetrics(infiles, outfile, "insert_size_metrics") histograms = (("quality_by_cycle_metrics", "cycle"), ("quality_distribution_metrics", "quality"), ("insert_size_metrics", "insert_size")) for suffix, column in histograms: loadPicardHistogram(infiles, outfile, suffix, column) def loadPicardDuplicationStats(infiles, outfiles): outfile_metrics, outfile_histogram = outfiles suffix = "picard_duplication_metrics" infile_names = [x[:-len("." + suffix)] for x in infiles] loadPicardMetrics(infile_names, outfile_metrics, suffix, "", tablename="picard_duplication_metrics") infiles_with_histograms = [] for infile in infile_names: with_hist = False with open(".".join([infile, suffix]), "r") as open_infile: for line in open_infile: if line.startswith("## HISTOGRAM"): infiles_with_histograms.append(infile) break if len(infiles_with_histograms) > 0: loadPicardHistogram(infiles_with_histograms, outfile_histogram, suffix, "coverage_multiple", "", tablename="picard_complexity_histogram") else: with open(outfile_histogram, "w") as ofh: ofh.write("No histograms detected, no data loaded.") def loadPicardDuplicateStats(infiles, outfile, pipeline_suffix=".bam"): loadPicardMetrics( infiles, outfile, "duplicate_metrics", pipeline_suffix=pipeline_suffix) loadPicardHistogram(infiles, outfile, "duplicate_metrics", "duplicates", pipeline_suffix=pipeline_suffix) def loadPicardCoverageStats(infiles, outfile): outf = P.getTempFile(".") first = True for f in infiles: track = P.snip(os.path.basename(f), ".cov") lines = [x for x in open(f, "r").readlines() if not x.startswith("#") and x.strip()] if first: outf.write("%s\t%s" % ("track", lines[0])) first = False outf.write("%s\t%s" % (track, lines[1])) outf.close() P.load(outf.name, outfile, options="--ignore-empty --add-index=track") os.unlink(outf.name) def buildBAMStats(infile, outfile): statement = '''cgat bam2stats --force-output --output-filename-pattern=%(outfile)s.%%s < %(infile)s > %(outfile)s''' P.run() def loadBAMStats(infiles, outfile): header = ",".join([P.snip(os.path.basename(x), ".readstats") for x in infiles]) filenames = " ".join(["<( cut -f 1,2 < %s)" % x for x in infiles]) tablename = P.toTable(outfile) load_statement = P.build_load_statement( tablename, options="--add-index=track " " --allow-empty-file") E.info("loading bam stats - summary") statement = """cgat combine_tables --header-names=%(header)s --missing-value=0 --ignore-empty %(filenames)s | perl -p -e "s/bin/track/" | cgat table2table --transpose | %(load_statement)s > %(outfile)s""" to_cluster = False P.run() for suffix in ("nm", "nh"): E.info("loading bam stats - %s" % suffix) filenames = " ".join(["%s.%s" % (x, suffix) for x in infiles]) load_statement = P.build_load_statement( "%s_%s" % (tablename, suffix), options="--allow-empty-file") statement = """cgat combine_tables --header-names=%(header)s --skip-titles --missing-value=0 --ignore-empty %(filenames)s | perl -p -e "s/bin/%(suffix)s/" | %(load_statement)s >> %(outfile)s """ to_cluster = False P.run() for suffix in ("mapq",): E.info("loading bam stats - %s" % suffix) filenames = " ".join(["%s.%s" % (x, suffix) for x in infiles]) load_statement = P.build_load_statement( "%s_%s" % (tablename, suffix), options=" --allow-empty-file") statement = """cgat combine_tables --header-names=%(header)s --skip-titles --missing-value=0 --ignore-empty --take=3 %(filenames)s | perl -p -e "s/bin/%(suffix)s/" | %(load_statement)s >> %(outfile)s """ to_cluster = False P.run()
MIT License
neuralmagic/sparseml
src/sparseml/pytorch/optim/analyzer_pruning.py
ModulePruningAnalyzer.tag
python
def tag(self) -> str: return "{}.{}".format(self.name, self.param_name)
:return: combines the layer name and param name in to a single string separated by a period
https://github.com/neuralmagic/sparseml/blob/e2dcb66bad713542158dfe54cba113a0cc02ed39/src/sparseml/pytorch/optim/analyzer_pruning.py#L93-L98
from typing import List, Tuple, Union from torch import Tensor from torch.nn import Module, Parameter from sparseml.pytorch.utils import tensor_sparsity __all__ = ["ModulePruningAnalyzer"] class ModulePruningAnalyzer(object): @staticmethod def analyze_layers(module: Module, layers: List[str], param_name: str = "weight"): analyzed = [] for layer_name in layers: mod = module lays = layer_name.split(".") for lay in lays: mod = mod.__getattr__(lay) analyzed.append(ModulePruningAnalyzer(mod, layer_name, param_name)) return analyzed def __init__(self, module: Module, name: str, param_name: str = "weight"): self._module = module self._name = name self._param_name = param_name self._param = self._module.__getattr__(self._param_name) @property def module(self) -> Module: return self._module @property def name(self) -> str: return self._name @property def param_name(self) -> str: return self._param_name @property
Apache License 2.0
googlecloudplatform/perfkitbenchmarker
perfkitbenchmarker/providers/aws/aws_dynamodb.py
AwsDynamoDBInstance.SetThroughput
python
def SetThroughput(self, rcu: Optional[int] = None, wcu: Optional[int] = None) -> None: if not rcu: rcu = self.rcu if not wcu: wcu = self.wcu cmd = util.AWS_PREFIX + [ 'dynamodb', 'update-table', '--table-name', self.table_name, '--region', self.region, '--provisioned-throughput', f'ReadCapacityUnits={rcu},WriteCapacityUnits={wcu}', ] logging.info('Setting %s table provisioned throughput to %s rcu and %s wcu', self.table_name, rcu, wcu) util.IssueRetryableCommand(cmd) while not self._IsReady(): continue
Updates the table's rcu and wcu.
https://github.com/googlecloudplatform/perfkitbenchmarker/blob/c14a122016d414351d41167029c79c9a19709384/perfkitbenchmarker/providers/aws/aws_dynamodb.py#L274-L293
import json import logging from typing import Any, Dict, Optional, Tuple, Sequence from absl import flags from perfkitbenchmarker import errors from perfkitbenchmarker import non_relational_db from perfkitbenchmarker import vm_util from perfkitbenchmarker.providers.aws import util FLAGS = flags.FLAGS flags.DEFINE_string('aws_dynamodb_primarykey', 'primary_key', 'The primaryKey of dynamodb table.' 'This switches to sortkey if using sort.' 'If testing GSI/LSI, use the range keyname' 'of the index you want to test') flags.DEFINE_boolean('aws_dynamodb_use_sort', False, 'determine whether to use sort key or not') flags.DEFINE_string('aws_dynamodb_sortkey', 'sort_key', 'The sortkey of dynamodb table. ' 'This switches to primarykey if using sort.' 'If testing GSI/LSI, use the primary keyname' 'of the index you want to test') flags.DEFINE_enum('aws_dynamodb_attributetype', 'S', ['S', 'N', 'B'], 'The type of attribute, default to S (String).' 'Alternates are N (Number) and B (Binary).') flags.DEFINE_integer('aws_dynamodb_read_capacity', '5', 'Set RCU for dynamodb table') flags.DEFINE_integer('aws_dynamodb_write_capacity', '5', 'Set WCU for dynamodb table') flags.DEFINE_integer('aws_dynamodb_lsi_count', 0, 'Set amount of Local Secondary Indexes. Only set 0-5') flags.register_validator('aws_dynamodb_lsi_count', lambda value: -1 < value < 6, message='--count must be from 0-5') flags.register_validator('aws_dynamodb_use_sort', lambda sort: sort or not FLAGS.aws_dynamodb_lsi_count, message='--aws_dynamodb_lsi_count requires sort key.') flags.DEFINE_integer('aws_dynamodb_gsi_count', 0, 'Set amount of Global Secondary Indexes. Only set 0-5') flags.register_validator('aws_dynamodb_gsi_count', lambda value: -1 < value < 6, message='--count must be from 0-5') flags.DEFINE_boolean('aws_dynamodb_ycsb_consistentReads', False, "Consistent reads cost 2x eventual reads. " "'false' is default which is eventual") flags.DEFINE_integer('aws_dynamodb_connectMax', 50, 'Maximum number of concurrent dynamodb connections. ' 'Defaults to 50.') _FREE_TIER_RCU = 25 _FREE_TIER_WCU = 25 class _GetIndexes(): def __init__(self): self.lsi_count = FLAGS.aws_dynamodb_lsi_count self.gsi_count = FLAGS.aws_dynamodb_gsi_count def CreateLocalSecondaryIndex(self): lsi_items = [] lsi_entry = [] attr_list = [] for lsi in range(0, self.lsi_count): lsi_item = ('{{"IndexName": "lsiidx{0}",' '"KeySchema": [{{' '"AttributeName": "{1}",' '"KeyType": "HASH"}},{{' '"AttributeName": "lattr{2}",' '"KeyType": "RANGE"}}],' '"Projection": {{' '"ProjectionType": "KEYS_ONLY"}}}}'.format( str(lsi), FLAGS.aws_dynamodb_primarykey, str(lsi))) lsi_entry.append(lsi_item) attr_list.append('{{"AttributeName": "lattr{0}","AttributeType": "{1}"}}' .format(str(lsi), FLAGS.aws_dynamodb_attributetype)) lsi_items.append('[' + ','.join(lsi_entry) + ']') lsi_items.append(','.join(attr_list)) return lsi_items def CreateGlobalSecondaryIndex(self): gsi_items = [] gsi_entry = [] attr_list = [] for gsi in range(0, self.gsi_count): gsi_item = ('{{"IndexName": "gsiidx{0}",' '"KeySchema": [{{' '"AttributeName": "gsikey{1}",' '"KeyType": "HASH"}},{{' '"AttributeName": "gattr{2}",' '"KeyType": "RANGE"}}],' '"Projection": {{' '"ProjectionType": "KEYS_ONLY"}},' '"ProvisionedThroughput": {{' '"ReadCapacityUnits": {3},' '"WriteCapacityUnits": {4}}}}}'.format(str(gsi), str(gsi), str(gsi), 5, 5)) gsi_entry.append(gsi_item) attr_list.append('{{"AttributeName": "gattr{0}","AttributeType": "{1}"}}' .format(str(gsi), FLAGS.aws_dynamodb_attributetype)) attr_list.append('{{"AttributeName": "gsikey{0}","AttributeType": "{1}"}}' .format(str(gsi), FLAGS.aws_dynamodb_attributetype)) gsi_items.append('[' + ','.join(gsi_entry) + ']') gsi_items.append(','.join(attr_list)) return gsi_items class AwsDynamoDBInstance(non_relational_db.BaseNonRelationalDb): SERVICE_TYPE = non_relational_db.DYNAMODB def __init__(self, table_name, **kwargs): super(AwsDynamoDBInstance, self).__init__(**kwargs) self.zone = FLAGS.zones[0] if FLAGS.zones else FLAGS.zone[0] self.region = util.GetRegionFromZone(self.zone) self.primary_key = ('{{\"AttributeName\": \"{0}\",\"KeyType\": \"HASH\"}}' .format(FLAGS.aws_dynamodb_primarykey)) self.sort_key = ('{{\"AttributeName\": \"{0}\",\"KeyType\": \"RANGE\"}}' .format(FLAGS.aws_dynamodb_sortkey)) self.part_attributes = ('{{\"AttributeName\": \"{0}\",' '\"AttributeType\": \"{1}\"}}' .format(FLAGS.aws_dynamodb_primarykey, FLAGS.aws_dynamodb_attributetype)) self.sort_attributes = ('{{\"AttributeName\": \"{0}\",' '\"AttributeType\": \"{1}\"}}' .format(FLAGS.aws_dynamodb_sortkey, FLAGS.aws_dynamodb_attributetype)) self.table_name = table_name self.rcu = FLAGS.aws_dynamodb_read_capacity self.wcu = FLAGS.aws_dynamodb_write_capacity self.throughput = 'ReadCapacityUnits={read},WriteCapacityUnits={write}'.format( read=self.rcu, write=self.wcu) self.lsi_indexes = _GetIndexes().CreateLocalSecondaryIndex() self.gsi_indexes = _GetIndexes().CreateGlobalSecondaryIndex() self.resource_arn: str = None def _Create(self): cmd = util.AWS_PREFIX + [ 'dynamodb', 'create-table', '--region', self.region, '--table-name', self.table_name, '--attribute-definitions', self.part_attributes, '--key-schema', self.primary_key, '--provisioned-throughput', self.throughput, '--tags'] + util.MakeFormattedDefaultTags() if FLAGS.aws_dynamodb_lsi_count > 0 and FLAGS.aws_dynamodb_use_sort: cmd[10] = ( '[' + self.part_attributes + ', ' + self.sort_attributes + ', ' + self.lsi_indexes[1] + ']') logging.info('adding to --attribute definitions') cmd.append('--local-secondary-indexes') cmd.append(self.lsi_indexes[0]) cmd[12] = ('[' + self.primary_key + ', ' + self.sort_key + ']') logging.info('adding to --key-schema') elif FLAGS.aws_dynamodb_use_sort: cmd[10] = ('[' + self.part_attributes + ', ' + self.sort_attributes + ']') logging.info('adding to --attribute definitions') cmd[12] = ('[' + self.primary_key + ', ' + self.sort_key + ']') logging.info('adding to --key-schema') if FLAGS.aws_dynamodb_gsi_count > 0: cmd[10] = cmd[10][:-1] cmd[10] += (', ' + self.gsi_indexes[1] + ']') logging.info('adding to --attribute definitions') cmd.append('--global-secondary-indexes') cmd.append(self.gsi_indexes[0]) _, stderror, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False) if retcode != 0: logging.warning('Failed to create table! %s', stderror) def _Delete(self): cmd = util.AWS_PREFIX + [ 'dynamodb', 'delete-table', '--region', self.region, '--table-name', self.table_name] logging.info('Attempting deletion: ') vm_util.IssueCommand(cmd, raise_on_failure=False) def _IsReady(self): logging.info('Getting table ready status for %s', self.table_name) cmd = util.AWS_PREFIX + [ 'dynamodb', 'describe-table', '--region', self.region, '--table-name', self.table_name] stdout, _, _ = vm_util.IssueCommand(cmd) result = json.loads(stdout) return result['Table']['TableStatus'] == 'ACTIVE' def _Exists(self) -> bool: logging.info('Checking if table %s exists', self.table_name) result = self._DescribeTable() if not result: return False if not self.resource_arn: self.resource_arn = result['TableArn'] return True def _DescribeTable(self) -> Dict[Any, Any]: cmd = util.AWS_PREFIX + [ 'dynamodb', 'describe-table', '--region', self.region, '--table-name', self.table_name] stdout, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False) if retcode != 0: logging.info('Could not find table %s, %s', self.table_name, stderr) return {} return json.loads(stdout)['Table'] def GetEndPoint(self): ddbep = 'http://dynamodb.{0}.amazonaws.com'.format(self.region) return ddbep def GetResourceMetadata(self): return { 'aws_dynamodb_primarykey': FLAGS.aws_dynamodb_primarykey, 'aws_dynamodb_use_sort': FLAGS.aws_dynamodb_use_sort, 'aws_dynamodb_sortkey': FLAGS.aws_dynamodb_sortkey, 'aws_dynamodb_attributetype': FLAGS.aws_dynamodb_attributetype, 'aws_dynamodb_read_capacity': FLAGS.aws_dynamodb_read_capacity, 'aws_dynamodb_write_capacity': FLAGS.aws_dynamodb_write_capacity, 'aws_dynamodb_lsi_count': FLAGS.aws_dynamodb_lsi_count, 'aws_dynamodb_gsi_count': FLAGS.aws_dynamodb_gsi_count, 'aws_dynamodb_consistentReads': FLAGS.aws_dynamodb_ycsb_consistentReads, 'aws_dynamodb_connectMax': FLAGS.aws_dynamodb_connectMax, }
Apache License 2.0