repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
nccgroup/depthcharge
python/depthcharge/stratagem.py
Stratagem.from_json_file
python
def from_json_file(cls, filename: str): with open(filename, "r") as infile: data = infile.read() return cls.from_json(data)
Create a Stratagem object from the contents of the specified JSON file.
https://github.com/nccgroup/depthcharge/blob/9b66d1c2a80b9398ac561c83173ebd748aef018d/python/depthcharge/stratagem.py#L220-L226
import json from copy import copy from datetime import datetime from .operation import Operation from .version import __version__ class StratagemRequired(TypeError): def __init__(self, name): msg = name + ' requires a Stratagem in order to perform the requested operation' super(StratagemRequired, self).__init__(msg) class StratagemNotRequired(Exception): class StratagemCreationFailed(Exception): class Stratagem: def __init__(self, op_class, capacity: int = -1, **kwargs): self._op = op_class .__name__ self._spec = op_class.get_stratagem_spec() if capacity > 0: self._list = capacity * [None] else: self._list = [] self.comment = kwargs.get('comment', '') self.timestamp = kwargs.get('timestamp', datetime.now().isoformat()) def entries(self): for entry in self._list: yield copy(entry) def __getitem__(self, index): return copy(self._list[index]) def _process_entry(self, entry: dict): for key in entry: expected_type = self._spec[key] if expected_type is int and isinstance(entry[key], str): entry[key] = int(entry[key], 0) else: entry[key] = expected_type(entry[key]) return entry def __setitem__(self, index, entry): self._process_entry(entry) self._list[index] = entry @property def operation_name(self) -> str: return self._op def append(self, entry: dict = None, **kwargs): if isinstance(entry, list): for e in entry: self.append(e) return if entry is None: entry = {} else: entry = copy(entry) for key in kwargs: entry[key] = kwargs[key] self._list.append(self._process_entry(entry)) def __len__(self): return len(self._list) @property def total_operations(self): count = 0 for entry in self._list: try: count += entry['iterations'] except KeyError: count += 1 return count def __str__(self): return self.to_json(indent=4) @classmethod def from_json(cls, json_str: str): tmp = json.loads(json_str) try: op_name = tmp['operation'] op_class = Operation.get_implementation(op_name) except KeyError: raise ValueError('Invalid Operation name encountered: ' + op_name) key_spec = op_class.get_stratagem_spec() if key_spec is None: raise StratagemNotRequired(op_name + ' does not require the use of Stratagem objects') stratagem = Stratagem(op_class, comment=tmp.get('comment', ''), timestamp=tmp.get('timestamp', '')) for entry in tmp['entries']: stratagem.append(entry) return stratagem @classmethod
BSD 3-Clause New or Revised License
jarryshaw/pypcapkit
pcapkit/protocols/protocol.py
Protocol.length
python
def length(self):
Header length of current protocol. :rtype: int
https://github.com/jarryshaw/pypcapkit/blob/cfa778f606a111b2dc6eb57d1af054ba2689b578/pcapkit/protocols/protocol.py#L103-L107
import abc import collections import contextlib import enum import functools import importlib import io import numbers import os import re import shutil import string import struct import textwrap import urllib import aenum import chardet from pcapkit.corekit.infoclass import Info from pcapkit.corekit.protochain import ProtoChain from pcapkit.utilities.compat import cached_property from pcapkit.utilities.decorators import beholder, seekset from pcapkit.utilities.exceptions import (ProtocolNotFound, ProtocolNotImplemented, ProtocolUnbound, StructError) from pcapkit.utilities.logging import logger __all__ = ['Protocol'] readable = [ord(char) for char in filter(lambda char: not char.isspace(), string.printable)] class Protocol(metaclass=abc.ABCMeta): __layer__ = None __proto__ = collections.defaultdict(lambda: ('pcapkit.protocols.raw', 'Raw')) @property @abc.abstractmethod def name(self): @property def alias(self): return self.__class__.__name__ @property def info(self): return self._info @property def data(self): return self._data @property @abc.abstractmethod
BSD 3-Clause New or Revised License
openwisp/openwisp-users
openwisp_users/admin.py
RequiredInlineFormSet._construct_form
python
def _construct_form(self, i, **kwargs): form = super()._construct_form(i, **kwargs) form.empty_permitted = self.instance.is_superuser return form
Override the method to change the form attribute empty_permitted
https://github.com/openwisp/openwisp-users/blob/6c083027ed7bb467351f8f05ac201fa60d9bdc24/openwisp_users/admin.py#L74-L81
import logging from copy import deepcopy from allauth import app_settings as allauth_settings from allauth.account.models import EmailAddress from django import forms from django.apps import apps from django.conf import settings from django.contrib import admin, messages from django.contrib.admin.actions import delete_selected from django.contrib.admin.sites import NotRegistered from django.contrib.admin.utils import model_ngettext from django.contrib.auth import get_user_model from django.contrib.auth.admin import GroupAdmin as BaseGroupAdmin from django.contrib.auth.admin import UserAdmin as BaseUserAdmin from django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm from django.core.exceptions import ValidationError from django.forms.models import BaseInlineFormSet from django.http import HttpResponseRedirect from django.template.response import TemplateResponse from django.urls import reverse from django.utils.translation import ngettext from django.utils.translation import ugettext_lazy as _ from openwisp_utils.admin import UUIDAdmin from organizations.base_admin import ( BaseOrganizationAdmin, BaseOrganizationOwnerAdmin, BaseOrganizationUserAdmin, ) from organizations.exceptions import OwnershipRequired from phonenumber_field.formfields import PhoneNumberField from swapper import load_model from . import settings as app_settings from .multitenancy import MultitenantAdminMixin from .utils import BaseAdmin Group = load_model('openwisp_users', 'Group') Organization = load_model('openwisp_users', 'Organization') OrganizationOwner = load_model('openwisp_users', 'OrganizationOwner') OrganizationUser = load_model('openwisp_users', 'OrganizationUser') User = get_user_model() logger = logging.getLogger(__name__) class EmailAddressInline(admin.StackedInline): model = EmailAddress extra = 0 readonly_fields = ['email'] def has_add_permission(self, *args, **kwargs): return False def has_change_permission(self, request, obj=None): if user_not_allowed_to_change_owner(request.user, obj): self.can_delete = False return False return super().has_change_permission(request, obj) class RequiredInlineFormSet(BaseInlineFormSet):
BSD 3-Clause New or Revised License
onicagroup/runway
runway/env_mgr/tfenv.py
TFEnvManager.parse_version_string
python
def parse_version_string(cls, version: str) -> VersionTuple: match = re.search(cls.VERSION_REGEX, version) if not match: raise ValueError( f"provided version doesn't conform to regex: {cls.VERSION_REGEX}" ) return VersionTuple( major=int(match.group("major")), minor=int(match.group("minor")), patch=int(match.group("patch")), prerelease=match.group("prerelease") or None, prerelease_number=int(match.group("prerelease_number") or 0) or None, )
Parse version string into a :class:`VersionTuple`. Args: version: Version string to parse. Must be in the format of ``<major>.<minor>.<patch>`` with an optional ``-<prerelease>``.
https://github.com/onicagroup/runway/blob/6e0d543512325a92265c140f386c163f6be410b4/runway/env_mgr/tfenv.py#L448-L467
from __future__ import annotations import hashlib import json import logging import os import platform import re import shutil import subprocess import sys import tempfile import zipfile from distutils.version import LooseVersion from typing import ( TYPE_CHECKING, Any, Dict, Generator, List, NamedTuple, Optional, Union, cast, overload, ) from urllib.error import URLError from urllib.request import urlretrieve import hcl import hcl2 import requests from typing_extensions import Final from ..compat import cached_property from ..exceptions import HclParserError from ..utils import FileHash, get_hash_for_filename, merge_dicts from . import EnvManager, handle_bin_download_error if TYPE_CHECKING: from pathlib import Path from types import ModuleType from .._logging import RunwayLogger LOGGER = cast("RunwayLogger", logging.getLogger(__name__)) TF_VERSION_FILENAME = ".terraform-version" def download_tf_release( version: str, versions_dir: Path, command_suffix: str, tf_platform: Optional[str] = None, arch: Optional[str] = None, ) -> None: version_dir = versions_dir / version if arch is None: arch = os.getenv("TFENV_ARCH", "amd64") if tf_platform: tfver_os = tf_platform + "_" + arch else: if platform.system().startswith("Darwin"): tfver_os = f"darwin_{arch}" elif platform.system().startswith("Windows") or ( platform.system().startswith("MINGW64") or ( platform.system().startswith("MSYS_NT") or platform.system().startswith("CYGWIN_NT") ) ): tfver_os = f"windows_{arch}" else: tfver_os = f"linux_{arch}" download_dir = tempfile.mkdtemp() filename = f"terraform_{version}_{tfver_os}.zip" shasums_name = f"terraform_{version}_SHA256SUMS" tf_url = "https://releases.hashicorp.com/terraform/" + version try: LOGGER.verbose("downloading Terraform from %s...", tf_url) for i in [filename, shasums_name]: urlretrieve(tf_url + "/" + i, os.path.join(download_dir, i)) except URLError as exc: handle_bin_download_error(exc, "Terraform") tf_hash = get_hash_for_filename(filename, os.path.join(download_dir, shasums_name)) checksum = FileHash(hashlib.sha256()) checksum.add_file(os.path.join(download_dir, filename)) if tf_hash != checksum.hexdigest: LOGGER.error( "downloaded Terraform %s does not match sha256 %s", filename, tf_hash ) sys.exit(1) with zipfile.ZipFile(os.path.join(download_dir, filename)) as tf_zipfile: version_dir.mkdir(parents=True, exist_ok=True) tf_zipfile.extractall(str(version_dir)) shutil.rmtree(download_dir) result = version_dir / ("terraform" + command_suffix) result.chmod(result.stat().st_mode | 0o0111) def get_available_tf_versions(include_prerelease: bool = False) -> List[str]: tf_releases = json.loads( requests.get("https://releases.hashicorp.com/index.json").text )["terraform"] tf_versions = sorted( [k for k, _v in tf_releases["versions"].items()], key=LooseVersion, reverse=True, ) if include_prerelease: return [i for i in tf_versions if i] return [i for i in tf_versions if i and "-" not in i] def get_latest_tf_version(include_prerelease: bool = False) -> str: return get_available_tf_versions(include_prerelease)[0] def load_terraform_module(parser: ModuleType, path: Path) -> Dict[str, Any]: result: Dict[str, Any] = {} LOGGER.debug("using %s parser to load module: %s", parser.__name__.upper(), path) for tf_file in path.glob("*.tf"): try: tf_config = parser.loads(tf_file.read_text()) result = merge_dicts(result, cast(Dict[str, Any], tf_config)) except Exception as exc: raise HclParserError(exc, tf_file, parser) from None return result class VersionTuple(NamedTuple): major: int minor: int patch: int prerelease: Optional[str] = None prerelease_number: Optional[int] = None def __str__(self) -> str: result = f"{self.major}.{self.minor}.{self.patch}" if self.prerelease: result += f"-{self.prerelease}" if self.prerelease_number: result += str(self.prerelease_number) return result class TFEnvManager(EnvManager): VERSION_REGEX: Final[str] = ( r"^(?P<major>[0-9]*)\.(?P<minor>[0-9]*)\.(?P<patch>[0-9]*)" r"(\-(?P<prerelease>alpha|beta|oci|rc)(?P<prerelease_number>[0-9]*)?)?" ) VERSION_OUTPUT_REGEX: Final[ str ] = r"^Terraform v(?P<version>[0-9]*\.[0-9]*\.[0-9]*)(?P<suffix>-.*)?" def __init__(self, path: Optional[Path] = None) -> None: super().__init__("terraform", "tfenv", path) @cached_property def backend(self) -> Dict[str, Any]: return [ {"type": k, "config": v} for k, v in self.terraform_block.get( "backend", {None: cast(Dict[str, str], {})} ).items() ][0] @cached_property def terraform_block(self) -> Dict[str, Any]: @overload def _flatten_lists(data: Dict[str, Any]) -> Dict[str, Any]: ... @overload def _flatten_lists(data: List[Any]) -> List[Any]: ... @overload def _flatten_lists(data: str) -> str: ... def _flatten_lists( data: Union[Dict[str, Any], List[Any], Any] ) -> Union[Dict[str, Any], Any]: if not isinstance(data, dict): return data copy_data = cast(Dict[str, Any], data.copy()) for attr, val in copy_data.items(): if isinstance(val, list): if len(cast(List[Any], val)) == 1: data[attr] = _flatten_lists(cast(Any, val[0])) else: data[attr] = [_flatten_lists(v) for v in cast(List[Any], val)] elif isinstance(val, dict): data[attr] = _flatten_lists(cast(Dict[str, Any], val)) return data try: result: Union[Dict[str, Any], List[Dict[str, Any]]] = load_terraform_module( hcl2, self.path ).get("terraform", cast(Dict[str, Any], {})) except HclParserError as exc: LOGGER.warning(exc) LOGGER.warning("failed to parse as HCL2; trying HCL...") try: result = load_terraform_module(hcl, self.path).get( "terraform", cast(Dict[str, Any], {}) ) except HclParserError as exc: LOGGER.warning(exc) result = {} if isinstance(result, list): return _flatten_lists({k: v for i in result for k, v in i.items()}) return _flatten_lists(result) @cached_property def version(self) -> Optional[VersionTuple]: version_requested = self.current_version or self.get_version_from_file() if not version_requested: return None if re.match(r"^min-required$", version_requested): LOGGER.debug("tfenv: detecting minimal required version") version_requested = self.get_min_required() if re.match(r"^latest:.*$", version_requested): regex = re.search(r"latest:(.*)", version_requested).group( 1 ) include_prerelease_versions = False elif re.match(r"^latest$", version_requested): regex = r"^[0-9]+\.[0-9]+\.[0-9]+$" include_prerelease_versions = False else: regex = f"^{version_requested}$" include_prerelease_versions = True if (self.versions_dir / version_requested).is_dir(): self.current_version = version_requested return self.parse_version_string(self.current_version) try: version = next( i for i in get_available_tf_versions(include_prerelease_versions) if re.match(regex, i) ) except StopIteration: LOGGER.error("unable to find a Terraform version matching regex: %s", regex) sys.exit(1) self.current_version = version return self.parse_version_string(self.current_version) @cached_property def version_file(self) -> Optional[Path]: for path in [self.path, self.path.parent]: test_path = path / TF_VERSION_FILENAME if test_path.is_file(): LOGGER.debug("using version file: %s", test_path) return test_path return None def get_min_required(self) -> str: version = self.terraform_block.get("required_version") if version: if re.match(r"^!=.+", version): LOGGER.error( "min required Terraform version is a negation (%s) " "- unable to determine required version", version, ) sys.exit(1) else: version = re.search(r"[0-9]*\.[0-9]*(?:\.[0-9]*)?", version) if version: LOGGER.debug("detected minimum Terraform version is %s", version) return version.group(0) LOGGER.error( "Terraform version specified as min-required, but unable to " "find a specified version requirement in this module's tf files" ) sys.exit(1) def get_version_from_file(self, file_path: Optional[Path] = None) -> Optional[str]: file_path = file_path or self.version_file if file_path and file_path.is_file(): return file_path.read_text().strip() LOGGER.debug("file path not provided and version file could not be found") return None def install(self, version_requested: Optional[str] = None) -> str: if version_requested: self.set_version(version_requested) if not self.version: raise ValueError( f"version not provided and unable to find a {TF_VERSION_FILENAME} file" ) if (self.versions_dir / str(self.version)).is_dir(): LOGGER.verbose( "Terraform version %s already installed; using it...", self.version ) return str(self.bin) LOGGER.info("downloading and using Terraform version %s ...", self.version) download_tf_release(str(self.version), self.versions_dir, self.command_suffix) LOGGER.verbose("downloaded Terraform %s successfully", self.version) return str(self.bin) def list_installed(self) -> Generator[Path, None, None]: LOGGER.verbose("checking %s for Terraform versions...", self.versions_dir) return self.versions_dir.rglob("*.*.*") def set_version(self, version: str) -> None: if self.current_version == version: return self.current_version = version try: del self.version except Exception: pass @classmethod def get_version_from_executable( cls, bin_path: Union[Path, str], *, cwd: Optional[Union[Path, str]] = None, env: Optional[Dict[str, str]] = None, ) -> Optional[VersionTuple]: output = subprocess.check_output( [str(bin_path), "-version"], cwd=cwd, env=env ).decode() match = re.search(cls.VERSION_OUTPUT_REGEX, output) if not match: return None return cls.parse_version_string(match.group("version")) @classmethod
Apache License 2.0
krahets/codingsolution
leetcode_python/28. Implement strStr().py
Solution.strStr
python
def strStr(self, haystack, needle): if not needle: return 0 l_h, l_n = len(haystack), len(needle) for i in range(l_h + 1): for j in range(l_n + 1): if j == l_n: return i if i + j == l_h: return -1 if haystack[i+j] != needle[j]: break
:type haystack: str :type needle: str :rtype: int
https://github.com/krahets/codingsolution/blob/16f57a541292631ad77368caad948eba10e24660/leetcode_python/28. Implement strStr().py#L2-L14
class Solution(object):
MIT License
app-registry/appr
appr/api/impl/builder.py
show_file
python
def show_file(package, filepath, version_query=None, endpoint=DEFAULT_ENDPOINT): k = Kub(package, version=version_query, endpoint=endpoint) return k.package.file(filepath)
Returns the content of any file inside a package. Useful to navigate and inspect a package from a web-browser Args: package (:obj:`str`): package name in the format `namespace/name` or `domain.com/name` filepath (:obj:`str`): filepath relative to the package, eg: `templates/svc.yaml` version_query (:obj:`str`): a version query, eg: ">=1.5,<2.0" endpoint (:obj:`str`): the appr-registry server Returns: :obj:`str`: the file content See Also: * :obj:`appr.api.builder.show_file`
https://github.com/app-registry/appr/blob/37ff9a487a54ad41b59855ecd76ee092fe206a84/appr/api/impl/builder.py#L51-L70
from appr.formats.appr.kpm import Kpm as Kub DEFAULT_ENDPOINT = "http://localhost:5000" def build(package, version_query=None, namespace="default", variables={}, shards=None, endpoint=DEFAULT_ENDPOINT): variables['namespace'] = namespace k = Kub(package, endpoint=endpoint, variables=variables, namespace=namespace, version=version_query, shards=shards) return k
Apache License 2.0
mindspore-ai/mindinsight
mindinsight/mindconverter/graph_based_converter/third_party_graph/base.py
GraphNode._get_raw_params
python
def _get_raw_params(self, node):
Get params in onnx.
https://github.com/mindspore-ai/mindinsight/blob/253a210719dbb1e55b826f2e489322f402d66676/mindinsight/mindconverter/graph_based_converter/third_party_graph/base.py#L503-L504
import abc from collections import OrderedDict from typing import List from mindinsight.mindconverter.common.log import logger as log from mindinsight.mindconverter.graph_based_converter.constant import InputType from mindinsight.mindconverter.common.exceptions import NodeInputTypeNotSupportError class GraphParser(metaclass=abc.ABCMeta): @classmethod @abc.abstractmethod def parse(cls, model_path: str, **kwargs): class BaseGraph(metaclass=abc.ABCMeta): _REQUIRED_PARAM_OF_MODEL = "model" @abc.abstractmethod def build(self): @abc.abstractmethod def sub_graph_merging(self): @staticmethod @abc.abstractmethod def load_checkpoint(ckpt_path: str) -> dict: @staticmethod @abc.abstractmethod def load_metadata(**kwargs): @staticmethod @abc.abstractmethod def load_graph(graph_path: str, **kwargs): @classmethod @abc.abstractmethod def load(cls, model_path: str, **kwargs): def __new__(cls, *args, **kwargs): model_param = args[0] if args else kwargs.get( cls._REQUIRED_PARAM_OF_MODEL) if not model_param: error = ValueError(f"`{cls._REQUIRED_PARAM_OF_MODEL}` " f"can not be None.") log.error(str(error)) log.exception(error) raise error return super(BaseGraph, cls).__new__(cls) class Graph(BaseGraph, abc.ABC): sorted = False def __init__(self, model, model_path, **kwargs): super(Graph, self).__init__() self.model = model self.model_path = model_path self._raw_input_nodes = kwargs.get("input_nodes") self._raw_output_nodes = kwargs.get("output_nodes") self._nodes_collection = OrderedDict() self._nodes_record = dict() self._shape_dict = dict() self._input_nodes = [] self._output_nodes = [] self._topological_order = [] self._input_shape = dict() self._is_multi_opt_graph = False @property def user_provided_input_nodes(self) -> List[str]: return list(self._raw_input_nodes.keys()) def get_input_shape(self, name): return self._input_shape.get(name) def get_output_shape(self, name): return self._shape_dict.get(name) def get_input_shape_from_input(self, name): return self._input_shape.get(name) @property def nodes_in_topological_order(self): if not self.sorted: self._topological_sort() return self._topological_order def _reset_topological_order(self): self._topological_order = self._input_nodes[:] self.sorted = False def get_node(self, node_name): prefix = node_name if prefix not in self._nodes_collection: return None return self._nodes_collection[prefix] def build(self): self._collect_ipt_and_opt_nodes() self._topological_sort() def _collect_ipt_and_opt_nodes(self): for name, node in self._nodes_collection.items(): if node.in_degree == 0: self._input_nodes.append(name) if node.out_degree == 0: self._output_nodes.append(name) def _topological_sort(self): self._reset_topological_order() def is_connected(src, dst): for precursor in dst.precursor_nodes: if src == precursor: return 1 return 0 idx = 0 while idx < len(self._topological_order): cur_node_name = self._topological_order[idx] cur_node = self.get_node(cur_node_name) for scsr_name in cur_node.successor_nodes: scsr_node = self.get_node(scsr_name) scsr_node.cur_in_degree -= is_connected(cur_node_name, scsr_node) if scsr_node.cur_in_degree == 0: self._topological_order.append(scsr_name) idx += 1 self.sorted = True def sub_graph_merging(self): raise NotImplementedError @staticmethod def load_checkpoint(ckpt_path: str) -> dict: raise NotImplementedError @staticmethod def load_metadata(**kwargs): raise NotImplementedError @staticmethod def load_graph(graph_path: str, **kwargs): raise NotImplementedError @classmethod def load(cls, model_path: str, **kwargs) -> BaseGraph: src_graph = cls.load_graph(graph_path=model_path, **kwargs) return cls(src_graph, model_path, **kwargs) class GraphNode(abc.ABC): transformed = False def __init__(self, node): self.precursor_nodes = [] self.successor_nodes = [] self._deleted_in_edge = 0 self._src_node = node if node else None self._op_name = None self._op_params = dict() self._scope_name = None self._op_shape = None self._node_type = None self._args_in_code = dict() self._hash_key = None self._ipt_shape = None self._opt_shape = None self._weight = None self._ipt_var_names = list() self._opt_var_names = list() self._is_in_multi_opt_graph = False @property def ir_node_name(self): return self._src_node.name @property def ir_node_operation(self): return self._src_node.op_type @property def ir_node_inputs(self): return list(self._src_node.input_name_list) @property def ir_node_outputs(self): return list(self._src_node.output_name_list) @property def ir_node_precursor(self): return [ v.name for _, v in self._src_node.precursor_onnx_node_dict.items() ] @property def ir_node_successor(self): return [ v.name for _, v in self._src_node.successor_onnx_node_dict.items() ] @property def weight(self): return self._weight @property def ipt_var_names(self): return self._ipt_var_names @ipt_var_names.setter def ipt_var_names(self, var_names): self._ipt_var_names = var_names @property def opt_var_names(self): return self._opt_var_names @opt_var_names.setter def opt_var_names(self, var_names): self._opt_var_names = var_names @staticmethod def get_opt_var_name(variable_name): return f"{variable_name}_opt" @property def args_in_code(self): return self._args_in_code @args_in_code.setter def args_in_code(self, args): self._args_in_code = args @property def input_shape(self): return self._ipt_shape @property def output_shape(self): return self._opt_shape def is_empty(self): return not self._src_node @property def node_type(self): return self._node_type @node_type.setter def node_type(self, m): self._node_type = m @property def scope_name(self): return self._scope_name @scope_name.setter def scope_name(self, name): self._scope_name = name @property def node_params(self): return self._op_params @property def cur_in_degree(self): return self.in_degree - self._deleted_in_edge @cur_in_degree.setter def cur_in_degree(self, e): self._deleted_in_edge += self.cur_in_degree - e @property def in_degree(self): return len(self.precursor_nodes) @property def out_degree(self): return len(self.successor_nodes) @property @abc.abstractmethod def hash_key(self): @abc.abstractmethod
Apache License 2.0
fzh0917/stmtrack
videoanalyst/data/sampler/sampler_base.py
SamplerBase.__getitem__
python
def __getitem__(self, item) -> Dict:
r""" An interface to sample data
https://github.com/fzh0917/stmtrack/blob/61730c19ec0eaea393fa3119b8d71023e1173d5b/videoanalyst/data/sampler/sampler_base.py#L83-L86
from abc import ABCMeta from typing import Dict, List import numpy as np from loguru import logger from videoanalyst.utils import Registry from ..dataset.dataset_base import DatasetBase TRACK_SAMPLERS = Registry('TRACK_SAMPLERS') VOS_SAMPLERS = Registry('VOS_SAMPLERS') TASK_SAMPLERS = dict( track=TRACK_SAMPLERS, vos=VOS_SAMPLERS, ) class SamplerBase: __metaclass__ = ABCMeta default_hyper_params = dict() def __init__(self, datasets: List[DatasetBase] = [], seed: int = 0) -> None: self._hyper_params = self.default_hyper_params self._state = dict() self._state["rng"] = np.random.RandomState(seed) self.datasets = datasets for d in datasets: dataset_name = type(d).__name__ logger.info("Sampler's underlying datasets: {}, length {}".format( dataset_name, len(d))) def get_hps(self) -> Dict: return self._hyper_params def set_hps(self, hps: Dict) -> None: for key in hps: if key not in self._hyper_params: raise KeyError self._hyper_params[key] = hps[key] def update_params(self) -> None:
BSD 3-Clause New or Revised License
neurodata/ndstore
ndramon/batchAnnotation.py
BatchAnnotation.update
python
def update ( self, cursor ): self.updateBase ( ANNO_ANNOTATION, cursor )
Set type and update base class.
https://github.com/neurodata/ndstore/blob/32ca33626ae54ee73c4309033b3172dd4da6bfea/ndramon/batchAnnotation.py#L136-L139
import numpy as np import cStringIO import MySQLdb import sys from collections import defaultdict from ndwserror import NDWSError import logging logger=logging.getLogger("neurodata") ANNO_ANNOTATION = 1 ANNO_SYNAPSE = 2 ANNO_SEED = 3 ANNO_SEGMENT = 4 ANNO_NEURON = 5 ANNO_ORGANELLE = 6 anno_dbtables = { 'annotation':'annotations', 'kvpairs':'kvpairs', 'synapse':'synapses', 'segment':'segments', 'synseg':'synseg', 'organelle':'organelles', 'seed':'seeds' } class BatchAnnotation: def __init__ ( self, annodb ): self.annodb = annodb self.annidList = [] self.statusList = [0] self.confidenceList = [0.0] self.authorList = [""] self.kvpairs = defaultdict(list) def setID ( self, db ): if len(self.annidList) == 0: self.annidList.append( db.nextID() ) else: db.setBatchID( self.annidList ) def getField ( self, field ): if field == 'status': return self.statusList elif field == 'confidence': return self.confidenceList elif field == 'author': return self.authorList elif self.kvpairs.get(field): return self.kvpairs[field] else: logger.warning ( "getField: No such field %s" % (field)) raise NDWSError ( "getField: No such field %s" % (field)) def setField ( self, field, value ): if field == 'status': self.statusList.append( value ) elif field == 'confidence': self.confidenceList.append( value ) elif field == 'author': self.authorList = ( value ) else: self.kvpairs[field]=value def store ( self, cursor, annotype=ANNO_ANNOTATION ): sql = "INSERT INTO {} VALUES ( %s, %s, %s, %s )".format( anno_dbtables['annotation'] ) data = zip( self.annidList, len(self.annidList)*[annotype] , self.confidenceList, self.statusList ) try: cursor.executemany (sql, data) except MySQLdb.Error, e: logger.warning ( "Error inserting annotation %d: %s. sql=%s" % (e.args[0], e.args[1], sql)) raise NDWSError ( "Error inserting annotation: %d: %s. sql=%s" % (e.args[0], e.args[1], sql)) if self.author != "": self.kvpairs['ann_author'] = self.author if len(self.kvpairs) != 0: try: kvclause = ','.join(['(' + str(self.annid) +',\'' + k + '\',\'' + v +'\')' for (k,v) in self.kvpairs.iteritems()]) except: raise NDWSError ( "Improperly formatted key/value csv string:" + kvclause ) sql = "INSERT INTO %s VALUES %s" % ( anno_dbtables['kvpairs'], kvclause ) try: cursor.execute(sql) except MySQLdb.Error, e: logger.warning ( "Error inserting kvpairs %d: %s. sql=%s" % (e.args[0], e.args[1], sql)) raise NDWSError ( "Error inserting kvpairs: %d: %s. sql=%s" % (e.args[0], e.args[1], sql))
Apache License 2.0
ibm/power-up
scripts/python/lib/utilities.py
get_file_path
python
def get_file_path(filename='/home'): print(bold('\nFile search hints:')) print('/home/user1/abc.* Search for abc.* under home/user1/') print('/home/user1/**/abc.* Search recursively for abc.* under ' '/home/user1/') print('/home/user1/myfile[56].2 Search for myfile5.2 or myfile6.2 under ' '/home/user1/') print('/home/user1/*/ List directories under /home/user1') print() maxl = 10 while True: print("Enter a file name to search for ('L' to leave without making a " "selction): ") filename = rlinput(bold("File: "), filename) print() if filename == 'L' or filename == "'L'": return None files = glob(filename, recursive=True) if files: print(bold(f'Found {len(files)} matching')) if len(files) > maxl: print(f'\nSearch returned more than {maxl} items. Showing ' f'first {maxl}') files = files[:40] choices = [str(i + 1) for i in range(len(files))] choices.append('S') choices.append('L') files.append('Search again') files.append('Leave without selecting') ch, item = get_selection(files, choices) print() if item is not None and os.path.isfile(item): print(f'\n{item}') if get_yesno("Confirm selection: ", default='y'): return item else: item = 'Search again' elif item == 'Leave without selecting': return None if item != 'Search again': filename = item
Interactive search and selection of a file path. Returns: path to file or None
https://github.com/ibm/power-up/blob/53a1db7e86726cf6d1783afaf083c9b7dcabdef8/scripts/python/lib/utilities.py#L974-L1018
from glob import glob import os import re import sys import datetime import subprocess import fileinput import readline from shutil import copy2, copyfile from subprocess import Popen, PIPE from netaddr import IPNetwork, IPAddress, IPSet from tabulate import tabulate from textwrap import dedent import hashlib from distro import linux_distribution from lib.config import Config import lib.logger as logger from lib.exception import UserException PATTERN_DHCP = r"^\|_*\s+(.+):(.+)" PATTERN_MAC = r'([\da-fA-F]{2}:){5}[\da-fA-F]{2}' PATTERN_IP = (r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}' r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$') PATTERN_EMBEDDED_IP = (r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}' r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)') CalledProcessError = subprocess.CalledProcessError LOG = logger.getlogger() DHCP_SERVER_CMD = "sudo nmap --script broadcast-dhcp-discover -e {0}" def parse_dhcp_servers(nmap_response): matches = re.findall(PATTERN_DHCP, nmap_response, re.MULTILINE) data = {a: b.strip() for a, b in matches} return data def get_dhcp_servers(interface): cmd = DHCP_SERVER_CMD.format(interface) output = "" data = None try: output = bash_cmd(cmd) except Exception as e: LOG.error("{0}".format(e)) raise e else: data = parse_dhcp_servers(output) return data def has_dhcp_servers(interface): try: dct = get_dhcp_servers(interface) return 'DHCPOFFER' in dct['DHCP Message Type'] except: pass return False def scan_subnet(cidr): cmd = f'sudo nmap -sn {cidr}' res, err, rc = sub_proc_exec(cmd) items = [] if rc != 0: LOG.error(f'Error while scanning subnet {cidr}, rc: {rc}') for line in res.split('Nmap scan report'): match = re.search(PATTERN_EMBEDDED_IP, line) if match: ip = match.group(0) match2 = re.search(PATTERN_MAC, line) if match2: mac = match2.group(0) else: mac = '' items += [(ip, mac)] return items def scan_subnet_for_port_open(cidr, port): if isinstance(cidr, list): cidr = ' '.join(cidr) cmd = f'sudo nmap -p {port} {cidr}' res, err, rc = sub_proc_exec(cmd) items = [] if rc != 0: LOG.error(f'Error while scanning subnet {cidr}, rc: {rc}') for line in res.split('Nmap scan report'): match = re.search(PATTERN_EMBEDDED_IP, line) if match: ip = match.group(0) match2 = re.search(r'\d+/tcp\s+open.+' + rf'({PATTERN_MAC})', line, re.DOTALL) if match2: mac = match2.group(1) if match2: items += [(ip, mac)] return items def is_ipaddr(ip): if re.search(PATTERN_IP, ip): return True def is_netmask(mask): from netaddr import AddrFormatError try: if IPAddress(mask).is_netmask(): res = True else: res = False except AddrFormatError: res = False return res def get_network_addr(ipaddr, prefix): return str(IPNetwork(f'{ipaddr}/{prefix}').network) def get_netmask(prefix): return str(IPNetwork(f'0.0.0.0/{prefix}').netmask) def get_prefix(netmask): return IPAddress(netmask).netmask_bits() def get_network_size(cidr): return IPNetwork(cidr).size def add_offset_to_address(addr, offset): addr_ = IPNetwork(addr) addr_.value += offset return str(addr_.ip) def is_overlapping_addr(subnet1, subnet2): if IPSet([subnet1]).intersection(IPSet([subnet2])): return True else: return False def bash_cmd(cmd): log = logger.getlogger() _cmd = ['bash', '-c', cmd] log.debug('Run subprocess: %s' % ' '.join(_cmd)) output = subprocess.check_output(_cmd, universal_newlines=True, stderr=subprocess.STDOUT) try: output = output.decode('utf-8') except AttributeError: pass log.debug(output) return output def backup_file(path, suffix='.orig', multi=True): log = logger.getlogger() backup_path = path + suffix version = 0 while os.path.exists(backup_path) and multi: version += 1 backup_path += "." + str(version) log.debug('Make backup copy of orignal file: \'%s\'' % backup_path) copy2(path, backup_path) os.chmod(backup_path, 0o444) def append_line(path, line, check_exists=True): log = logger.getlogger() log.debug('Add line \'%s\' to file \'%s\'' % (line, path)) if not line.endswith('\n'): line += '\n' exists = False if check_exists: with open(path, 'r') as file_in: for read_line in file_in: if read_line == line: exists = True if not exists: with open(path, 'a') as file_out: file_out.write(line) def remove_line(path, regex): log = logger.getlogger() log.debug('Remove lines containing regex \'%s\' from file \'%s\'' % (regex, path)) for line in fileinput.input(path, inplace=1): if not re.match(regex, line): print(line, end='') def line_in_file(path, regex, replace, backup=None): if os.path.isfile(path): if backup: backup_file(path, multi=False) try: with open(path, 'r') as f: data = f.read() except FileNotFoundError as exc: print(f'File not found: {path}. Err: {exc}') else: data = data.splitlines() in_file = False with open(path, 'r+') as f: for line in data: in_line = re.search(regex, line) if in_line: line = re.sub(regex, replace, line) in_file = True f.write(line + '\n') if not in_file: f.write(replace + '\n') def replace_regex(path, regex, replace): log = logger.getlogger() log.debug('Replace regex \'%s\' with \'%s\' in file \'%s\'' % (regex, replace, path)) for line in fileinput.input(path, inplace=1): print(re.sub(regex, replace, line), end='') def copy_file(source, dest, metadata=True): log = logger.getlogger() log.debug(f'Copy file, source:{source} dest:{dest} metadata:{metadata}') if metadata: copy2(source, dest) else: if os.path.isdir(dest): basename = os.path.basename(source) dest = os.path.join(dest, basename) copyfile(source, dest) def sub_proc_launch(cmd, stdout=PIPE, stderr=PIPE): log = logger.getlogger() log.debug(f"sub_proc_launch cmd='{cmd}' stdout='{stdout}' stderr='{stderr}'") proc = Popen(cmd.split(), stdout=stdout, stderr=stderr) return proc def sub_proc_exec(cmd, stdout=PIPE, stderr=PIPE, shell=False, env=None): log = logger.getlogger() log.debug(f"sub_proc_exec cmd='{cmd}' stdout='{stdout}' stderr='{stderr}' " f"shell='{shell}' env='{env}'") if not shell: cmd = cmd.split() proc = Popen(cmd, stdout=stdout, stderr=stderr, shell=shell, env=env) stdout, stderr = proc.communicate() try: stdout = stdout.decode('utf-8') except AttributeError: pass try: stderr = stderr.decode('utf-8') except AttributeError: pass log.debug(f"sub_proc_exec stdout='{stdout}' stderr='{stderr}' " f"rc='{proc.returncode}'") return stdout, stderr, proc.returncode def sub_proc_display(cmd, stdout=None, stderr=None, shell=False, env=None): log = logger.getlogger() log.debug(f"sub_proc_display cmd='{cmd}' stdout='{stdout}' " f"stderr='{stderr}' shell='{shell}' env='{env}'") if not shell: cmd = cmd.split() proc = Popen(cmd, stdout=stdout, stderr=stderr, shell=shell, env=env) proc.wait() rc = proc.returncode log.debug(f"sub_proc_display rc='{rc}'") return rc def sub_proc_wait(proc): log = logger.getlogger() cnt = 0 rc = None while rc is None: rc = proc.poll() print('\rwaiting for process to finish. Time elapsed: {:2}:{:2}:{:2}'. format(cnt // 3600, cnt % 3600 // 60, cnt % 60), end="") sys.stdout.flush() cnt += 1 print('\n') resp, err = proc.communicate() log.debug(f"results: resp='{resp}' err='{err}' rc='{rc}'") print(resp) return rc class Color: black = '\033[90m' red = '\033[91m' green = '\033[92m' yellow = '\033[33m' brt_yellow = '\033[93m' blue = '\033[94m' purple = '\033[95m' cyan = '\033[96m' white = '\033[37m' brt_white = '\033[97m' bold = '\033[1m' underline = '\033[4m' sol = '\033[1G' clr_to_eol = '\033[K' clr_to_bot = '\033[J' scroll_five = '\n\n\n\n\n' scroll_ten = '\n\n\n\n\n\n\n\n\n\n' up_one = '\033[1A' up_five = '\033[5A' up_ten = '\033[10A' header1 = ' ' + bold + underline endc = '\033[0m' def heading1(text='-', width=79, indent=10): ind = ''.join([' ' for i in range(indent)]) text1 = f'{ind}{Color.bold}{Color.underline}{text}{Color.endc}' print(f'\n{text1: <{width + 8}}') def bold(text): return Color.bold + text + Color.endc def rlinput(prompt, prefill=''): log = logger.getlogger() log.debug(f"prompt='{repr(prompt)}' prefill='{prefill}'") readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: user_input = input(prompt) log.debug(f"user_input='{user_input}'") return user_input finally: readline.set_startup_hook() def files_present(url, fileglobs, _all=True): log = logger.getlogger() any_present = False all_present = True fileglobsstr = ','.join(fileglobs) if fileglobs: cmd = (f'wget -r -l 10 -nd -np --spider --accept={fileglobsstr} {url}') reply, err, rc = sub_proc_exec(cmd) err = err.replace('%2B', '+') if rc == 0: for fileglob in fileglobs: regx = fileglob_to_regx(fileglob) res = re.findall(regx, err) log.debug(f"fileglob='{fileglob}' regx='{regx}' res='{res}'") if len(res) == 0: all_present = False log.warning(f"File not found in repo: {fileglob}") else: any_present = True if not fileglobs: return True if _all: return all_present else: return any_present def fileglob_to_regx(fileglob): regx = fileglob.replace('.', r'\.') regx = regx.replace('+', r'\+') regx = regx.replace(']*', '][0-9]{0,3}') regx = regx.replace('*', '.*') regx = 'http.+' + regx return regx def get_url(url='http://', fileglob='', prompt_name='', repo_chk='', contains=[], excludes=[], filelist=[]): from lib.genesis import GEN_SOFTWARE_PATH print(f'Enter {prompt_name} URL. ("sss" at end of URL to skip)') if fileglob: print('Do not include filenames in the URL. A search of the URL') print('will be made up to 10 levels deep') while True: url = rlinput(f'Enter URL: ', url) if url.endswith('sss'): url = None break if 'artifactory.swg' in url: fnd_creds = False while not fnd_creds: path = os.path.join(GEN_SOFTWARE_PATH, 'artifactory.credentials') if os.path.isfile(path): with open(path, 'r') as f: creds = f.read().rstrip('\n') fnd_creds = True else: print('No artifactory credentials file found') r = get_selection('Retry\nTerminate Sofware install', ('R', 'T')) if r == 'T': sys.exit('PowerUp software install terminated by user') url = f'https://{creds}{url}' break if repo_chk: url = url if url.endswith('/') else url + '/' try: cmd = f'curl --max-time 2 -I {url}' url_info, err, rc = sub_proc_exec(cmd) except: pass else: if 'http:' in url or 'https:' in url: response = re.search(r'HTTP\/\d+.\d+\s+200\s+ok', url_info, re.IGNORECASE) if response: repo_mrkr = {'yum': '/repodata/', 'ana': 'repodata.json', 'pypi': '/simple/'} print(response.group(0)) if repo_chk: ss = repo_mrkr[repo_chk] elif fileglob: ss = fileglob elif url[-1] != '/': ss = os.path.basename(url) url = os.path.dirname(url) cmd = ('wget -r -l 10 -nd -np --spider ' f'--accept={ss} {url}') reply, err, rc = sub_proc_exec(cmd) err = err.replace('%2B', '+') if rc == 0: if repo_chk: regx = 'http.+' + repo_mrkr[repo_chk] elif fileglob: regx = fileglob_to_regx(fileglob) _found = re.findall(regx, err) _found = list(set(_found)) found = [] if repo_chk: for _url in _found: if (any([item for item in contains if item in _url]) and not any([item for item in excludes if item in _url])): found.append(_url) if found: _list = found elif _found: _list = _found if repo_chk: print(bold('\nWarning. The following url(s) ' 'were found but do not match the ' 'search criteria')) else: _list = [] if _list: ch, sel = get_selection(_list, allow_none=True) if ch != 'N': if repo_chk: sel = sel.rstrip('/') url = os.path.dirname(sel) if files_present(url, filelist): break else: print('\nChosen URL does not appear ' 'to be valid. File check ' 'failed.') if get_yesno('Use selection anyway'): break else: url = sel break else: print('No match found.') else: print(f'Error reading url. {reply}') else: print('Invalid url') err = re.search('curl: .+', err) if err: print(err.group(0)) tmp = re.search(r'HTTP\/\d+.\d+\s+.+', url_info) if tmp: print(tmp.group(0)) elif 'file:///' in url: response = re.search(r'Content-Length:\s+\d+', url_info) if response: if repo_chk == 'yum': ss = '/repodata' elif repo_chk == 'ana': ss = '/repodata.json' elif repo_chk == 'pypi': ss = '/simple' if repo_chk: ss = url + ss elif fileglob: ss = url + fileglob ss = '/' + ss.lstrip('file:/') files = glob(ss, recursive=True) if files: ch, sel = get_selection(files, allow_none=True) if ch != 'N': url = 'file://' + os.path.dirname(sel) + '/' break else: print('No match found.') elif 'file:' in url: print('Proper file url format: "file:///path/to/file') response = '' else: response = '' return url def get_yesno(prompt='', yesno='[y]/n', default=''): log = logger.getlogger() log.debug(f"prompt='{repr(prompt)}' yesno='{yesno}' default='{default}'") try: def_resp = yesno[1 + yesno.index('['):yesno.index(']')] except ValueError: def_resp = '' yn = yesno.replace('[', '') yn = yn.replace(']', '') yn = yn.split('/') while True: r = rlinput(f'{prompt}({yesno})? ', default) if def_resp and not r.strip(): ret = True if def_resp == yn[0] else False return ret elif r == yn[0]: return True elif r == yn[-1]: return False def get_dir(src_dir): rows = 10 if not src_dir: path = os.path.abspath('.') else: path = src_dir while True: path = rlinput(f'Enter an absolute directory location (S to skip): ', path) if path == 'S': return None if os.path.exists(path): rpm_filelist = [] non_rpm_filelist = [] print() top, dirs, files = next(os.walk(path)) files.sort() rpm_cnt = 0 non_rpm_cnt = 0 for f in files: if f.endswith('.rpm'): rpm_filelist.append(f) rpm_cnt += 1 else: non_rpm_filelist.append(f) non_rpm_cnt += 1 cnt = min(10, max(rpm_cnt, non_rpm_cnt)) rpm_filelist += rows * [''] list1 = rpm_filelist[:cnt] non_rpm_filelist += rows * [''] list2 = non_rpm_filelist[:cnt] print('\n' + bold(path)) print(tabulate(list(zip(list1, list2)), headers=[bold('RPM Files'), bold('Other files')], tablefmt='psql')) if rpm_cnt > 0: print(bold(f'{rpm_cnt} rpm files found')) print(f'including the {min(10, rpm_cnt)} files above.\n') else: print(bold('No rpm files found\n')) if non_rpm_cnt > 0: print(bold(f'{non_rpm_cnt} other files found')) print(f'including the {min(10, non_rpm_cnt)} files above.') else: print(bold('No non rpm files found')) print('\nSub directories of the entered directory: ') dirs.sort() print(dirs) print(f'\nThe entered path was: {top}') if get_yesno('Use the entered path '): return path def scan_ping_network(network_type='all', config_path=None): cfg = Config(config_path) type_ = cfg.get_depl_netw_client_type() if network_type == 'pxe' or network_type == 'all': net_type = 'pxe' idx = type_.index(net_type) cip = cfg.get_depl_netw_client_cont_ip()[idx] netprefix = cfg.get_depl_netw_client_prefix()[idx] cidr_cip = IPNetwork(cip + '/' + str(netprefix)) net_c = str(IPNetwork(cidr_cip).network) cmd = 'fping -a -r0 -g ' + net_c + '/' + str(netprefix) result, err, rc = sub_proc_exec(cmd) print(result) if network_type == 'ipmi' or network_type == 'all': net_type = 'ipmi' idx = type_.index(net_type) cip = cfg.get_depl_netw_client_cont_ip()[idx] netprefix = cfg.get_depl_netw_client_prefix()[idx] cidr_cip = IPNetwork(cip + '/' + str(netprefix)) net_c = str(IPNetwork(cidr_cip).network) cmd = 'fping -a -r0 -g ' + net_c + '/' + str(netprefix) result, err, rc = sub_proc_exec(cmd) print(result) def get_selection(items, choices=None, prompt='Enter a selection: ', sep='\n', allow_none=False, allow_retry=False): log = logger.getlogger() log.debug(f"items='{repr(items)}' choices='{repr(choices)}' " f"prompt='{repr(prompt)}' sep='{repr(sep)}' " f"allow_none='{allow_none}' allow_retry='{allow_retry}'") if not items: return None, None if not isinstance(items, (list, tuple)): items = items.rstrip(sep) items = items.split(sep) if not choices: choices = [str(i) for i in range(1, 1 + len(items))] if not isinstance(choices, (list, tuple)): choices = choices.rstrip(sep) choices = choices.split(sep) if allow_none: choices.append('N') items.append('Return without making a selection.') if allow_retry: choices.append('R') items.append('Retry the search.') if len(choices) == 1: return choices[0], items[0] maxw = 1 for ch in choices: maxw = max(maxw, len(ch)) print() for i in range(min(len(choices), len(items))): print(bold(f'{choices[i]: <{maxw}}') + ' - ' + items[i]) print() ch = ' ' while not (ch in choices or ch in items): ch = input(f'{Color.bold}{prompt}{Color.endc}') if not (ch in choices or ch in items): print('Not a valid selection') print(f'Choose from {choices}') ch = ' ' if ch not in choices: ch = choices[items.index(ch)] item = items[choices.index(ch)] if item == 'Return without making a selection.': item = None print() log.debug(f"results: ch='{ch}' item='{item}'") return ch, item def get_src_path(src_name): log = logger.getlogger() while True: cmd = (f'find /home -name {src_name}') resp1, err, rc1 = sub_proc_exec(cmd) if rc1 != 0: log.error(f'Error searching for {src_name}') cmd = (f'find /root -name {src_name}') resp2, err, rc2 = sub_proc_exec(cmd) if rc2 != 0: log.error(f'Error searching for {src_name}') if rc1 != 0 and rc2 != 0: return None resp = resp1 + resp2 if not resp: cmd = (f'find / -name {src_name}') resp, err, rc = sub_proc_exec(cmd) if rc != 0: log.error(f'Error searching for {src_name}') return None if not resp: print(f'Source file {src_name} not found') if not get_yesno('Search again', 'y/no', default='y'): log.error(f'Source file {src_name} not found.\n ' f'{src_name} is not setup in the POWER-Up ' 'software server.') return None else: ch, src_path = get_selection(resp, prompt='Select a source file: ', allow_none=True, allow_retry=True) if ch != 'R': return src_path else: ch, src_path = get_selection(resp, prompt='Select a source file: ', allow_none=True, allow_retry=True) if ch != 'R': return src_path
Apache License 2.0
sdispater/eloquent
eloquent/query/builder.py
QueryBuilder.having_raw
python
def having_raw(self, sql, bindings=None, boolean='and'): type = 'raw' self.havings.append({ 'type': type, 'sql': sql, 'boolean': boolean }) self.add_binding(bindings, 'having') return self
Add a raw having clause to the query :param sql: The raw query :type sql: str :param bindings: The query bindings :type bindings: list :param boolean: Boolean joiner type :type boolean: str :return: The current QueryBuilder instance :rtype: QueryBuilder
https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/query/builder.py#L694-L720
import re from itertools import chain from collections import OrderedDict from .expression import QueryExpression from .join_clause import JoinClause from ..support.collection import Collection from ..utils import basestring, Null from ..exceptions import ArgumentError class QueryBuilder(object): _operators = [ '=', '<', '>', '<=', '>=', '<>', '!=', 'like', 'like binary', 'not like', 'between', 'ilike', '&', '|', '^', '<<', '>>', 'rlike', 'regexp', 'not regexp', '~', '~*', '!~', '!~*', 'similar to', 'not similar to', ] def __init__(self, connection, grammar, processor): self._grammar = grammar self._processor = processor self._connection = connection self._bindings = OrderedDict() for type in ['select', 'join', 'where', 'having', 'order']: self._bindings[type] = [] self.aggregate_ = None self.columns = [] self.distinct_ = False self.from__ = '' self.joins = [] self.wheres = [] self.groups = [] self.havings = [] self.orders = [] self.limit_ = None self.offset_ = None self.unions = [] self.union_limit = None self.union_offset = None self.union_orders = [] self.lock_ = None self._backups = [] self._use_write_connection = False def select(self, *columns): if not columns: columns = ['*'] self.columns = columns return self def select_raw(self, expression, bindings=None): self.add_select(QueryExpression(expression)) if bindings: self.add_binding(bindings, 'select') return self def select_sub(self, query, as_): if isinstance(query, QueryBuilder): bindings = query.get_bindings() query = query.to_sql() elif isinstance(query, basestring): bindings = [] else: raise ArgumentError('Invalid subselect') return self.select_raw('(%s) AS %s' % (query, self._grammar.wrap(as_)), bindings) def add_select(self, *column): if not column: column = [] self.columns += column return self def distinct(self): self.distinct_ = True return self def from_(self, table): self.from__ = table return self def join(self, table, one=None, operator=None, two=None, type='inner', where=False): if isinstance(table, JoinClause): self.joins.append(table) else: if one is None: raise ArgumentError('Missing "one" argument') join = JoinClause(table, type) self.joins.append(join.on( one, operator, two, 'and', where )) return self def join_where(self, table, one, operator, two, type='inner'): return self.join(table, one, operator, two, type, True) def left_join(self, table, one=None, operator=None, two=None): if isinstance(table, JoinClause): table.type = 'left' return self.join(table, one, operator, two, 'left') def left_join_where(self, table, one, operator, two): return self.join_where(table, one, operator, two, 'left') def right_join(self, table, one=None, operator=None, two=None): if isinstance(table, JoinClause): table.type = 'right' return self.join(table, one, operator, two, 'right') def right_join_where(self, table, one, operator, two): return self.join_where(table, one, operator, two, 'right') def where(self, column, operator=Null(), value=None, boolean='and'): if isinstance(column, dict): nested = self.new_query() for key, value in column.items(): nested.where(key, '=', value) return self.where_nested(nested, boolean) if isinstance(column, QueryBuilder): return self.where_nested(column, boolean) if value is None: if not isinstance(operator, Null): value = operator operator = '=' else: raise ArgumentError('Value must be provided') if operator not in self._operators: value = operator operator = '=' if isinstance(value, QueryBuilder): return self._where_sub(column, operator, value, boolean) if value is None: return self.where_null(column, boolean, operator != '=') type = 'basic' self.wheres.append({ 'type': type, 'column': column, 'operator': operator, 'value': value, 'boolean': boolean }) if not isinstance(value, QueryExpression): self.add_binding(value, 'where') return self def or_where(self, column, operator=None, value=None): return self.where(column, operator, value, 'or') def _invalid_operator_and_value(self, operator, value): is_operator = operator in self._operators return is_operator and operator != '=' and value is None def where_raw(self, sql, bindings=None, boolean='and'): type = 'raw' self.wheres.append({ 'type': type, 'sql': sql, 'boolean': boolean }) self.add_binding(bindings, 'where') return self def or_where_raw(self, sql, bindings=None): return self.where_raw(sql, bindings, 'or') def where_between(self, column, values, boolean='and', negate=False): type = 'between' self.wheres.append({ 'column': column, 'type': type, 'boolean': boolean, 'not': negate }) self.add_binding(values, 'where') return self def or_where_between(self, column, values): return self.where_between(column, values, 'or') def where_not_between(self, column, values, boolean='and'): return self.where_between(column, values, boolean, True) def or_where_not_between(self, column, values): return self.where_not_between(column, values, 'or') def where_nested(self, query, boolean='and'): query.from_(self.from__) return self.add_nested_where_query(query, boolean) def add_nested_where_query(self, query, boolean='and'): if len(query.wheres): type = 'nested' self.wheres.append({ 'type': type, 'query': query, 'boolean': boolean }) self.merge_bindings(query) return self def _where_sub(self, column, operator, query, boolean): type = 'sub' self.wheres.append({ 'type': type, 'column': column, 'operator': operator, 'query': query, 'boolean': boolean }) self.merge_bindings(query) return self def where_exists(self, query, boolean='and', negate=False): if negate: type = 'not_exists' else: type = 'exists' self.wheres.append({ 'type': type, 'query': query, 'boolean': boolean }) self.merge_bindings(query) return self def or_where_exists(self, callback, negate=False): return self.where_exists(callback, 'or', negate) def where_not_exists(self, callback, boolean='and'): return self.where_exists(callback, boolean, True) def or_where_not_exists(self, callback): self.or_where_exists(callback, True) def where_in(self, column, values, boolean='and', negate=False): if negate: type = 'not_in' else: type = 'in' if isinstance(values, QueryBuilder): return self._where_in_sub(column, values, boolean, negate) self.wheres.append({ 'type': type, 'column': column, 'values': values, 'boolean': boolean }) self.add_binding(values, 'where') return self def or_where_in(self, column, values): return self.where_in(column, values, 'or') def where_not_in(self, column, values, boolean='and'): return self.where_in(column, values, boolean, True) def or_where_not_in(self, column, values): return self.where_not_in(column, values, 'or') def _where_in_sub(self, column, query, boolean, negate=False): if negate: type = 'not_in_sub' else: type = 'in_sub' self.wheres.append({ 'type': type, 'column': column, 'query': query, 'boolean': boolean }) self.merge_bindings(query) return self def where_null(self, column, boolean='and', negate=False): if negate: type = 'not_null' else: type = 'null' self.wheres.append({ 'type': type, 'column': column, 'boolean': boolean }) return self def or_where_null(self, column): return self.where_null(column, 'or') def where_not_null(self, column, boolean='and'): return self.where_null(column, boolean, True) def or_where_not_null(self, column): return self.where_not_null(column, 'or') def where_date(self, column, operator, value, boolean='and'): return self._add_date_based_where('date', column, operator, value, boolean) def where_day(self, column, operator, value, boolean='and'): return self._add_date_based_where('day', column, operator, value, boolean) def where_month(self, column, operator, value, boolean='and'): return self._add_date_based_where('month', column, operator, value, boolean) def where_year(self, column, operator, value, boolean='and'): return self._add_date_based_where('year', column, operator, value, boolean) def _add_date_based_where(self, type, column, operator, value, boolean='and'): self.wheres.append({ 'type': type, 'column': column, 'boolean': boolean, 'operator': operator, 'value': value }) self.add_binding(value, 'where') def dynamic_where(self, method): finder = method[6:] def dynamic_where(*parameters): segments = re.split('_(and|or)_(?=[a-z])', finder, 0, re.I) connector = 'and' index = 0 for segment in segments: if segment.lower() != 'and' and segment.lower() != 'or': self._add_dynamic(segment, connector, parameters, index) index += 1 else: connector = segment return self return dynamic_where def _add_dynamic(self, segment, connector, parameters, index): self.where(segment, '=', parameters[index], connector) def group_by(self, *columns): for column in columns: self.groups.append(column) return self def having(self, column, operator=None, value=None, boolean='and'): type = 'basic' self.havings.append({ 'type': type, 'column': column, 'operator': operator, 'value': value, 'boolean': boolean }) if not isinstance(value, QueryExpression): self.add_binding(value, 'having') return self def or_having(self, column, operator=None, value=None): return self.having(column, operator, value, 'or')
MIT License
hyangwinter/flownet3d_pytorch
util.py
PointNetSetUpConv.forward
python
def forward(self, pos1, pos2, feature1, feature2): pos1_t = pos1.permute(0, 2, 1).contiguous() pos2_t = pos2.permute(0, 2, 1).contiguous() B,C,N = pos1.shape if self.knn: _, idx = pointutils.knn(self.nsample, pos1_t, pos2_t) else: idx, _ = query_ball_point(self.radius, self.nsample, pos2_t, pos1_t) pos2_grouped = pointutils.grouping_operation(pos2, idx) pos_diff = pos2_grouped - pos1.view(B, -1, N, 1) feat2_grouped = pointutils.grouping_operation(feature2, idx) feat_new = torch.cat([feat2_grouped, pos_diff], dim = 1) for conv in self.mlp1_convs: feat_new = conv(feat_new) feat_new = feat_new.max(-1)[0] if feature1 is not None: feat_new = torch.cat([feat_new, feature1], dim=1) for conv in self.mlp2_convs: feat_new = conv(feat_new) return feat_new
Feature propagation from xyz2 (less points) to xyz1 (more points) Inputs: xyz1: (batch_size, 3, npoint1) xyz2: (batch_size, 3, npoint2) feat1: (batch_size, channel1, npoint1) features for xyz1 points (earlier layers, more points) feat2: (batch_size, channel1, npoint2) features for xyz2 points Output: feat1_new: (batch_size, npoint2, mlp[-1] or mlp2[-1] or channel1+3) TODO: Add support for skip links. Study how delta(XYZ) plays a role in feature updating.
https://github.com/hyangwinter/flownet3d_pytorch/blob/ae0847d242d3582b3f6f115e64f61e637ef80355/util.py#L350-L388
import torch import torch.nn as nn import torch.nn.functional as F from time import time import numpy as np from lib import pointnet2_utils as pointutils def quat2mat(quat): x, y, z, w = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3] B = quat.size(0) w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) wx, wy, wz = w*x, w*y, w*z xy, xz, yz = x*y, x*z, y*z rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz, 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx, 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3) return rotMat def transform_point_cloud(point_cloud, rotation, translation): if len(rotation.size()) == 2: rot_mat = quat2mat(rotation) else: rot_mat = rotation return torch.matmul(rot_mat, point_cloud) + translation.unsqueeze(2) def npmat2euler(mats, seq='zyx'): eulers = [] for i in range(mats.shape[0]): r = Rotation.from_dcm(mats[i]) eulers.append(r.as_euler(seq, degrees=True)) return np.asarray(eulers, dtype='float32') def timeit(tag, t): print("{}: {}s".format(tag, time() - t)) return time() def pc_normalize(pc): l = pc.shape[0] centroid = np.mean(pc, axis=0) pc = pc - centroid m = np.max(np.sqrt(np.sum(pc**2, axis=1))) pc = pc / m return pc def square_distance(src, dst): B, N, _ = src.shape _, M, _ = dst.shape dist = -2 * torch.matmul(src, dst.permute(0, 2, 1)) dist += torch.sum(src ** 2, -1).view(B, N, 1) dist += torch.sum(dst ** 2, -1).view(B, 1, M) return dist def index_points(points, idx): device = points.device B = points.shape[0] view_shape = list(idx.shape) view_shape[1:] = [1] * (len(view_shape) - 1) repeat_shape = list(idx.shape) repeat_shape[0] = 1 batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape) new_points = points[batch_indices, idx, :] return new_points def farthest_point_sample(xyz, npoint): device = xyz.device B, N, C = xyz.shape centroids = torch.zeros(B, npoint, dtype=torch.long).to(device) distance = torch.ones(B, N).to(device) * 1e10 farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device) batch_indices = torch.arange(B, dtype=torch.long).to(device) for i in range(npoint): centroids[:, i] = farthest centroid = xyz[batch_indices, farthest, :].view(B, 1, 3) dist = torch.sum((xyz - centroid) ** 2, -1) mask = dist < distance distance[mask] = dist[mask] farthest = torch.max(distance, -1)[1] return centroids def knn_point(k, pos1, pos2): B, N, C = pos1.shape M = pos2.shape[1] pos1 = pos1.view(B,1,N,-1).repeat(1,M,1,1) pos2 = pos2.view(B,M,1,-1).repeat(1,1,N,1) dist = torch.sum(-(pos1-pos2)**2,-1) val,idx = dist.topk(k=k,dim = -1) return torch.sqrt(-val), idx def query_ball_point(radius, nsample, xyz, new_xyz): device = xyz.device B, N, C = xyz.shape _, S, _ = new_xyz.shape group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1]) sqrdists = square_distance(new_xyz, xyz) group_idx[sqrdists > radius ** 2] = N mask = group_idx != N cnt = mask.sum(dim=-1) group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample] group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample]) mask = group_idx == N group_idx[mask] = group_first[mask] return group_idx, cnt def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False): B, N, C = xyz.shape S = npoint fps_idx = farthest_point_sample(xyz, npoint) new_xyz = index_points(xyz, fps_idx) idx, _ = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = index_points(xyz, idx) grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C) if points is not None: grouped_points = index_points(points, idx) new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) else: new_points = grouped_xyz_norm if returnfps: return new_xyz, new_points, grouped_xyz, fps_idx else: return new_xyz, new_points def sample_and_group_all(xyz, points): device = xyz.device B, N, C = xyz.shape new_xyz = torch.zeros(B, 1, C).to(device) grouped_xyz = xyz.view(B, 1, N, C) if points is not None: new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1) else: new_points = grouped_xyz return new_xyz, new_points class PointNetSetAbstraction(nn.Module): def __init__(self, npoint, radius, nsample, in_channel, mlp, mlp2 = None, group_all = False): super(PointNetSetAbstraction, self).__init__() self.npoint = npoint self.radius = radius self.nsample = nsample self.group_all = group_all self.mlp_convs = nn.ModuleList() self.mlp_bns = nn.ModuleList() self.mlp2_convs = nn.ModuleList() last_channel = in_channel+3 for out_channel in mlp: self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias = False)) self.mlp_bns.append(nn.BatchNorm2d(out_channel)) last_channel = out_channel for out_channel in mlp2: self.mlp2_convs.append(nn.Sequential(nn.Conv1d(last_channel, out_channel, 1, bias=False), nn.BatchNorm1d(out_channel))) last_channel = out_channel if group_all: self.queryandgroup = pointutils.GroupAll() else: self.queryandgroup = pointutils.QueryAndGroup(radius, nsample) def forward(self, xyz, points): device = xyz.device B, C, N = xyz.shape xyz_t = xyz.permute(0, 2, 1).contiguous() if self.group_all == False: fps_idx = pointutils.furthest_point_sample(xyz_t, self.npoint) new_xyz = pointutils.gather_operation(xyz, fps_idx) else: new_xyz = xyz new_points = self.queryandgroup(xyz_t, new_xyz.transpose(2, 1).contiguous(), points) for i, conv in enumerate(self.mlp_convs): bn = self.mlp_bns[i] new_points = F.relu(bn(conv(new_points))) new_points = torch.max(new_points, -1)[0] for i, conv in enumerate(self.mlp2_convs): new_points = F.relu(conv(new_points)) return new_xyz, new_points class FlowEmbedding(nn.Module): def __init__(self, radius, nsample, in_channel, mlp, pooling='max', corr_func='concat', knn = True): super(FlowEmbedding, self).__init__() self.radius = radius self.nsample = nsample self.knn = knn self.pooling = pooling self.corr_func = corr_func self.mlp_convs = nn.ModuleList() self.mlp_bns = nn.ModuleList() if corr_func is 'concat': last_channel = in_channel*2+3 for out_channel in mlp: self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias=False)) self.mlp_bns.append(nn.BatchNorm2d(out_channel)) last_channel = out_channel def forward(self, pos1, pos2, feature1, feature2): pos1_t = pos1.permute(0, 2, 1).contiguous() pos2_t = pos2.permute(0, 2, 1).contiguous() B, N, C = pos1_t.shape if self.knn: _, idx = pointutils.knn(self.nsample, pos1_t, pos2_t) else: idx, cnt = query_ball_point(self.radius, self.nsample, pos2_t, pos1_t) _, idx_knn = pointutils.knn(self.nsample, pos1_t, pos2_t) cnt = cnt.view(B, -1, 1).repeat(1, 1, self.nsample) idx = idx_knn[cnt > (self.nsample-1)] pos2_grouped = pointutils.grouping_operation(pos2, idx) pos_diff = pos2_grouped - pos1.view(B, -1, N, 1) feat2_grouped = pointutils.grouping_operation(feature2, idx) if self.corr_func=='concat': feat_diff = torch.cat([feat2_grouped, feature1.view(B, -1, N, 1).repeat(1, 1, 1, self.nsample)], dim = 1) feat1_new = torch.cat([pos_diff, feat_diff], dim = 1) for i, conv in enumerate(self.mlp_convs): bn = self.mlp_bns[i] feat1_new = F.relu(bn(conv(feat1_new))) feat1_new = torch.max(feat1_new, -1)[0] return pos1, feat1_new class PointNetSetUpConv(nn.Module): def __init__(self, nsample, radius, f1_channel, f2_channel, mlp, mlp2, knn = True): super(PointNetSetUpConv, self).__init__() self.nsample = nsample self.radius = radius self.knn = knn self.mlp1_convs = nn.ModuleList() self.mlp2_convs = nn.ModuleList() last_channel = f2_channel+3 for out_channel in mlp: self.mlp1_convs.append(nn.Sequential(nn.Conv2d(last_channel, out_channel, 1, bias=False), nn.BatchNorm2d(out_channel), nn.ReLU(inplace=False))) last_channel = out_channel if len(mlp) is not 0: last_channel = mlp[-1] + f1_channel else: last_channel = last_channel + f1_channel for out_channel in mlp2: self.mlp2_convs.append(nn.Sequential(nn.Conv1d(last_channel, out_channel, 1, bias=False), nn.BatchNorm1d(out_channel), nn.ReLU(inplace=False))) last_channel = out_channel
MIT License
getmetamapper/metamapper
utils/postgres/querysets.py
PostgresQuerySet.on_conflict
python
def on_conflict(self, fields, action=ConflictAction, index_predicate=None): self.conflict_target = fields self.conflict_action = action self.index_predicate = index_predicate return self
Sets the action to take when conflicts arise when attempting to insert/create a new row. Arguments: fields: The fields the conflicts can occur in. action: The action to take when the conflict occurs. index_predicate: The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking conflicts)
https://github.com/getmetamapper/metamapper/blob/0b2f67eec03fbf7ece35ff9f58ea9bb2dde4d85f/utils/postgres/querysets.py#L68-L84
from itertools import chain from django.core.exceptions import SuspiciousOperation from django.db import models, router from django.db.models.fields import NOT_PROVIDED from utils.postgres.sql import PostgresInsertQuery, PostgresQuery from utils.postgres.types import ConflictAction class PostgresQuerySet(models.QuerySet): def __init__(self, model=None, query=None, using=None, hints=None): super().__init__(model, query, using, hints) self.query = query or PostgresQuery(self.model) self.conflict_target = None self.conflict_action = None self.index_predicate = None def annotate(self, **annotations): fields = { field.name: field for field in self.model._meta.get_fields() } new_annotations = {} renames = {} for name, value in annotations.items(): if name in fields: new_name = "%s_new" % name new_annotations[new_name] = value renames[new_name] = name else: new_annotations[name] = value result = super().annotate(**new_annotations) result.rename_annotations(**renames) return result def rename_annotations(self, **annotations): self.query.rename_annotations(annotations) return self
BSD 2-Clause Simplified License
openstack/solum
solum/api/handlers/workflow_handler.py
WorkflowHandler.get
python
def get(self, id): wf = objects.registry.Workflow.get_by_uuid(self.context, id) assembly = objects.registry.Assembly.get_by_id(self.context, wf.assembly) wf.status = assembly.status return wf
Return a workflow.
https://github.com/openstack/solum/blob/0a744883ca00a0ee80e1b6840ac42a78fc397450/solum/api/handlers/workflow_handler.py#L94-L102
import datetime import json from oslo_config import cfg from oslo_utils import uuidutils from solum.api.handlers import handler from solum.common import exception from solum.common import repo_utils from solum import objects from solum.objects import image from solum.objects.sqlalchemy import workflow from solum.worker import api as worker_api API_SERVICE_OPTS = [ cfg.StrOpt('image_format', default='qcow2', help='The format of the image to output'), cfg.StrOpt('source_format', default='heroku', help='The format of source repository'), cfg.StrOpt('rebuild_phrase', default='solum retry tests', help='Comment phrase to trigger rebuilding'), cfg.IntOpt('max_instances_per_app', default=100, help='Application scale limit'), ] def list_opts(): yield 'api', API_SERVICE_OPTS CONF = cfg.CONF CONF.register_opts(API_SERVICE_OPTS, group='api') IMAGE_STATES = image.States class WorkflowHandler(handler.Handler): def _update_app_scale_config(self, app, data): scale_config = dict() target = data.get('scale_target', '1') try: target = int(target) except ValueError: msg = "Must provide integer value for scale target." raise exception.BadRequest(reason=msg) if target <= 0: msg = "Scale target must be greater than zero." raise exception.BadRequest(reason=msg) if target > cfg.CONF.api.max_instances_per_app: msg = "Target scale '%s' exceeds maximum scale limit '%s'." % ( target, cfg.CONF.api.max_instances_per_app) raise exception.ResourceLimitExceeded(reason=msg) current_config = app.scale_config if current_config: current_config[app.name]['target'] = str(target) scale_config['scale_config'] = current_config else: config_data = dict() config_data['target'] = str(target) app_scale_config = dict() app_scale_config[app.name] = config_data scale_config = dict() scale_config['scale_config'] = app_scale_config objects.registry.App.update_and_save(self.context, app.id, scale_config)
Apache License 2.0
napalm-automation/napalm-yang
napalm_yang/models/openconfig/interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/state/__init__.py
state._set_advertisement_interval
python
def _set_advertisement_interval(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16, ), restriction_dict={"range": ["1..4095"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 100 ), is_leaf=True, yang_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """advertisement_interval must be of a type compatible with uint16""", "defined-type": "uint16", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4095']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(100), is_leaf=True, yang_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint16', is_config=False)""", } ) self.__advertisement_interval = t if hasattr(self, "_set"): self._set()
Setter method for advertisement_interval, mapped from YANG variable /interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/state/advertisement_interval (uint16) If this variable is read-only (config: false) in the source YANG file, then _set_advertisement_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_advertisement_interval() directly. YANG Description: Sets the interval between successive VRRP advertisements -- RFC 5798 defines this as a 12-bit value expressed as 0.1 seconds, with default 100, i.e., 1 second. Several implementation express this in units of seconds
https://github.com/napalm-automation/napalm-yang/blob/9148e015b086ebe311c07deb92e168ea36fd7771/napalm_yang/models/openconfig/interfaces/interface/routed_vlan/ipv6/addresses/address/vrrp/vrrp_group/state/__init__.py#L777-L831
from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class state(PybindBase): __slots__ = ( "_path_helper", "_extmethods", "__virtual_router_id", "__virtual_address", "__priority", "__preempt", "__preempt_delay", "__accept_mode", "__advertisement_interval", "__current_priority", "__virtual_link_local", ) _yang_name = "state" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__virtual_router_id = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..255"]}, ), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) self.__virtual_address = YANGDynClass( base=TypedListType( allowed_type=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ] ), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) self.__priority = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..254"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 )( 100 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) self.__preempt = YANGDynClass( base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) self.__preempt_delay = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), restriction_dict={"range": ["0..3600"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 0 ), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) self.__accept_mode = YANGDynClass( base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) self.__advertisement_interval = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), restriction_dict={"range": ["1..4095"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 100 ), is_leaf=True, yang_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) self.__current_priority = YANGDynClass( base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), is_leaf=True, yang_name="current-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) self.__virtual_link_local = YANGDynClass( base=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ], is_leaf=True, yang_name="virtual-link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "interfaces", "interface", "routed-vlan", "ipv6", "addresses", "address", "vrrp", "vrrp-group", "state", ] def _get_virtual_router_id(self): return self.__virtual_router_id def _set_virtual_router_id(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8, ), restriction_dict={"range": ["1..255"]}, ), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """virtual_router_id must be of a type compatible with uint8""", "defined-type": "uint8", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..255']}), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""", } ) self.__virtual_router_id = t if hasattr(self, "_set"): self._set() def _unset_virtual_router_id(self): self.__virtual_router_id = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..255"]}, ), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) def _get_virtual_address(self): return self.__virtual_address def _set_virtual_address(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=TypedListType( allowed_type=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ] ), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """virtual_address must be of a type compatible with inet:ip-address""", "defined-type": "inet:ip-address", "generated-type": """YANGDynClass(base=TypedListType(allowed_type=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),]), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='inet:ip-address', is_config=False)""", } ) self.__virtual_address = t if hasattr(self, "_set"): self._set() def _unset_virtual_address(self): self.__virtual_address = YANGDynClass( base=TypedListType( allowed_type=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ] ), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) def _get_priority(self): return self.__priority def _set_priority(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8, ), restriction_dict={"range": ["1..254"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 )( 100 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """priority must be of a type compatible with uint8""", "defined-type": "uint8", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(100), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""", } ) self.__priority = t if hasattr(self, "_set"): self._set() def _unset_priority(self): self.__priority = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..254"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 )( 100 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) def _get_preempt(self): return self.__preempt def _set_preempt(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """preempt must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='boolean', is_config=False)""", } ) self.__preempt = t if hasattr(self, "_set"): self._set() def _unset_preempt(self): self.__preempt = YANGDynClass( base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) def _get_preempt_delay(self): return self.__preempt_delay def _set_preempt_delay(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16, ), restriction_dict={"range": ["0..3600"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 0 ), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """preempt_delay must be of a type compatible with uint16""", "defined-type": "uint16", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['0..3600']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(0), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint16', is_config=False)""", } ) self.__preempt_delay = t if hasattr(self, "_set"): self._set() def _unset_preempt_delay(self): self.__preempt_delay = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), restriction_dict={"range": ["0..3600"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 0 ), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) def _get_accept_mode(self): return self.__accept_mode def _set_accept_mode(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """accept_mode must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='boolean', is_config=False)""", } ) self.__accept_mode = t if hasattr(self, "_set"): self._set() def _unset_accept_mode(self): self.__accept_mode = YANGDynClass( base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) def _get_advertisement_interval(self): return self.__advertisement_interval
Apache License 2.0
lyft/toasted-marshmallow
toastedmarshmallow/jit.py
attr_str
python
def attr_str(attr_name): if keyword.iskeyword(attr_name): return 'getattr(obj, "{0}")'.format(attr_name) return 'obj.{0}'.format(attr_name)
Gets the string to use when accessing an attribute on an object. Handles case where the attribute name collides with a keyword and would therefore be illegal to access with dot notation.
https://github.com/lyft/toasted-marshmallow/blob/00a8e76198e45a5731a664d3fec31e138ee4035c/toastedmarshmallow/jit.py#L38-L47
import base64 import keyword import re from abc import ABCMeta, abstractmethod from collections import Mapping import attr from six import exec_, iteritems, add_metaclass, text_type, string_types from marshmallow import missing, Schema, fields from marshmallow.base import SchemaABC from .compat import is_overridden from .utils import IndentedString _VALID_IDENTIFIER = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*') if False: from typing import Any, Callable, Dict, Optional, Tuple, Union, Set def field_symbol_name(field_name): if not _VALID_IDENTIFIER.match(field_name): field_name = str(base64.b64encode( field_name.encode('utf-8')).decode('utf-8').strip('=')) return '_field_{field_name}'.format(field_name=field_name)
Apache License 2.0
weinbe58/quspin
quspin/operators/hamiltonian_core.py
hamiltonian.nbytes
python
def nbytes(self): nbytes = 0 if _sp.issparse(self._static): nbytes += self._static.data.nbytes nbytes += self._static.indices.nbytes nbytes += self._static.indptr.nbytes else: nbytes += self._static.nbytes for Hd in itervalues(self._dynamic): if _sp.issparse(Hd): nbytes += Hd.data.nbytes nbytes += Hd.indices.nbytes nbytes += Hd.indptr.nbytes else: nbytes += Hd.nbytes return nbytes
float: Total bytes consumed by the elements of the `hamiltonian` array.
https://github.com/weinbe58/quspin/blob/5bbc3204dbf5c227a87a44f0dacf39509cba580c/quspin/operators/hamiltonian_core.py#L543-L561
from __future__ import print_function, division, absolute_import from ..basis import spin_basis_1d as _default_basis from ..basis import isbasis as _isbasis from ..tools.evolution import evolve from ..tools.matvec import _matvec from ..tools.matvec import _get_matvec_function from ._make_hamiltonian import make_static from ._make_hamiltonian import make_dynamic from ._make_hamiltonian import test_function from ._make_hamiltonian import _check_almost_zero from ._functions import function import scipy import scipy.sparse.linalg as _sla import scipy.linalg as _la import scipy.sparse as _sp import numpy as _np from operator import mul import functools from six import iteritems,itervalues,viewkeys try: from itertools import izip as zip except ImportError: pass try: from functools import reduce as reduce except ImportError: pass import warnings __all__ = ["commutator","anti_commutator","hamiltonian","ishamiltonian"] def commutator(H1,H2): if ishamiltonian(H1) or ishamiltonian(H2): return H1*H2 - H2*H1 else: return H1.dot(H2) - H2.dot(H1) def anti_commutator(H1,H2): if ishamiltonian(H1) or ishamiltonian(H2): return H1*H2 + H2*H1 else: return H1.dot(H2) + H2.dot(H1) class HamiltonianEfficiencyWarning(Warning): pass supported_dtypes=tuple([_np.float32, _np.float64, _np.complex64, _np.complex128]) def _check_static(sub_list): if (type(sub_list) in [list,tuple]) and (len(sub_list) == 2): if type(sub_list[0]) is not str: raise TypeError('expecting string type for opstr') if type(sub_list[1]) in [list,tuple]: for sub_sub_list in sub_list[1]: if (type(sub_sub_list) in [list,tuple]) and (len(sub_sub_list) > 0): for element in sub_sub_list: if not _np.isscalar(element): raise TypeError('expecting scalar elements of indx') else: raise TypeError('expecting list for indx') else: raise TypeError('expecting a list of one or more indx') return True else: return False def _check_dynamic(sub_list): if (type(sub_list) in [list,tuple]): if (len(sub_list) == 4): if type(sub_list[0]) is not str: raise TypeError('expecting string type for opstr') if type(sub_list[1]) in [list,tuple]: for sub_sub_list in sub_list[1]: if (type(sub_sub_list) in [list,tuple]) and (len(sub_sub_list) > 0): for element in sub_sub_list: if not _np.isscalar(element): raise TypeError('expecting scalar elements of indx') else: raise TypeError('expecting list for indx') else: raise TypeError('expecting a list of one or more indx') if not hasattr(sub_list[2],"__call__"): raise TypeError('expecting callable object for driving function') if type(sub_list[3]) not in [list,tuple]: raise TypeError('expecting list for function arguments') return True elif (len(sub_list) == 3): if not hasattr(sub_list[1],"__call__"): raise TypeError('expecting callable object for driving function') if type(sub_list[2]) not in [list,tuple]: raise TypeError('expecting list for function arguments') return False elif (len(sub_list) == 2): if not hasattr(sub_list[1],"__call__"): raise TypeError('expecting callable object for driving function') return False else: raise TypeError('expecting list with object, driving function, and function arguments') def _hamiltonian_dot(hamiltonian,time,v): return hamiltonian.dot(v,time=time,check=False) class hamiltonian(object): def __init__(self,static_list,dynamic_list,N=None,basis=None,shape=None,dtype=_np.complex128,static_fmt=None,dynamic_fmt=None,copy=True,check_symm=True,check_herm=True,check_pcon=True,**basis_kwargs): self._is_dense = False self._ndim = 2 self._basis = basis if not (dtype in supported_dtypes): raise TypeError('hamiltonian does not support type: '+str(dtype)) else: self._dtype=dtype if type(static_list) in [list,tuple]: static_opstr_list=[] static_other_list=[] for ele in static_list: if _check_static(ele): static_opstr_list.append(ele) else: static_other_list.append(ele) else: raise TypeError('expecting list/tuple of lists/tuples containing opstr and list of indx') if type(dynamic_list) in [list,tuple]: dynamic_opstr_list=[] dynamic_other_list=[] for ele in dynamic_list: if _check_dynamic(ele): dynamic_opstr_list.append(ele) else: dynamic_other_list.append(ele) else: raise TypeError('expecting list/tuple of lists/tuples containing opstr and list of indx, functions, and function args') self._static_opstr_list = static_opstr_list self._dynamic_opstr_list = dynamic_opstr_list if static_opstr_list or dynamic_opstr_list: if self._basis is not None: if len(basis_kwargs) > 0: wrong_keys = set(basis_kwargs.keys()) temp = ", ".join(["{}" for key in wrong_keys]) raise ValueError(("unexpected optional argument(s): "+temp).format(*wrong_keys)) if self._basis is None: if N is None: raise Exception('if opstrs in use, argument N needed for basis class') if type(N) is not int: raise TypeError('argument N must be integer') self._basis=_default_basis(N,**basis_kwargs) elif not _isbasis(self._basis): raise TypeError('expecting instance of basis class for argument: basis') if check_herm: self._basis.check_hermitian(static_opstr_list, dynamic_opstr_list) if check_symm: self._basis.check_symm(static_opstr_list,dynamic_opstr_list) if check_pcon: self._basis.check_pcon(static_opstr_list,dynamic_opstr_list) self._static=make_static(self._basis,static_opstr_list,dtype) self._dynamic=make_dynamic(self._basis,dynamic_opstr_list,dtype) self._shape = self._static.shape if static_other_list or dynamic_other_list: if not hasattr(self,"_shape"): found = False if shape is None: for i,O in enumerate(static_other_list): try: shape = O.shape found = True break except AttributeError: continue if not found: for tup in dynamic_other_list: if len(tup) == 2: O,_ = tup else: O,_,_ = tup try: shape = O.shape found = True break except AttributeError: continue else: found = True if not found: raise ValueError('missing argument shape') if shape[0] != shape[1]: raise ValueError('hamiltonian must be square matrix') self._shape=shape self._static = _sp.dia_matrix(self._shape,dtype=self._dtype) self._dynamic = {} for O in static_other_list: if _sp.issparse(O): self._mat_checks(O) if self._static is None: self._static = O.astype(self._dtype,copy=copy) continue try: self._static += O.astype(self._dtype) except NotImplementedError: self._static = self._static + O.astype(self._dtype) else: O = _np.asarray(O,dtype=self._dtype) self._mat_checks(O) self._is_dense=True if self._static is None: self._static = O.astype(self._dtype,copy=copy) continue try: self._static += O except NotImplementedError: self._static = self._static + O.astype(self._dtype) if not _sp.issparse(self._static): self._static = _np.asarray(self._static) try: self._static.sum_duplicates() self._static.eliminate_zeros() except: pass for tup in dynamic_other_list: if len(tup) == 2: O,func = tup else: O,f,f_args = tup test_function(f,f_args,self._dtype) func = function(f,tuple(f_args)) if _sp.issparse(O): self._mat_checks(O) O = O.astype(self._dtype,copy=copy) else: O = _np.array(O,copy=copy,dtype=self._dtype) self._mat_checks(O) self._is_dense=True if func in self._dynamic: try: self._dynamic[func] += O except: self._dynamic[func] = self._dynamic[func] + O else: self._dynamic[func] = O else: if not hasattr(self,"_shape"): if shape is None: if self._basis is None: if N is None: raise Exception("argument N or shape needed to create empty hamiltonian") if type(N) is not int: raise TypeError('argument N must be integer') self._basis=_default_basis(N,**basis_kwargs) elif not _isbasis(self._basis): raise TypeError('expecting instance of basis class for argument: basis') shape = (self._basis.Ns,self._basis.Ns) else: self._basis=basis_kwargs.get('basis') if not basis is None: raise ValueError("empty hamiltonian only accepts basis or shape, not both") if len(shape) != 2: raise ValueError('expecting ndim = 2') if shape[0] != shape[1]: raise ValueError('hamiltonian must be square matrix') self._shape=shape self._static = _sp.dia_matrix(self._shape,dtype=self._dtype) self._dynamic = {} self.update_matrix_formats(static_fmt,dynamic_fmt) self._Ns = self._shape[0] @property def basis(self): if self._basis is not None: return self._basis else: raise AttributeError("object has no attribute 'basis'") @property def ndim(self): return self._ndim @property def Ns(self): return self._Ns @property def shape(self): return self._shape @property def get_shape(self): return self._shape @property def is_dense(self): return self._is_dense @property def dtype(self): return _np.dtype(self._dtype).name @property def static(self): return self._static @property def dynamic(self): return self._dynamic @property def T(self): return self.transpose() @property def H(self): return self.getH() @property
BSD 3-Clause New or Revised License
thetruefuss/elmer
users/api/serializers.py
ProfileRetrieveSerializer.get_has_followed
python
def get_has_followed(self, obj): user = None request = self.context.get('request') if request and hasattr(request, 'user'): user = request.user if user in obj.profile.followers.all(): return True return False
Check if requester has followed the user. :return: boolean
https://github.com/thetruefuss/elmer/blob/753ec3b5a38f4f5d15bd451400b0374f7ffcdfa6/users/api/serializers.py#L240-L252
from django.contrib.auth import get_user_model from django.db.models import Q from rest_framework import serializers from rest_framework_jwt.settings import api_settings jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER User = get_user_model() class CurrentUserDetailSerializer(serializers.ModelSerializer): screen_name = serializers.SerializerMethodField() profile_picture = serializers.SerializerMethodField() class Meta: model = User fields = [ 'username', 'screen_name', 'profile_picture', ] def get_screen_name(self, obj): return obj.profile.screen_name() def get_profile_picture(self, obj): request = self.context.get('request') profile_picture_url = obj.profile.get_picture() return request.build_absolute_uri(profile_picture_url) class UserDetailSerializer(serializers.ModelSerializer): screen_name = serializers.SerializerMethodField() class Meta: model = User fields = ['screen_name', 'username'] def get_screen_name(self, obj): return obj.profile.screen_name() class UserLoginSerializer(serializers.ModelSerializer): token = serializers.CharField(allow_blank=True, read_only=True) username = serializers.CharField() class Meta: model = User fields = ['username', 'password', 'token'] extra_kwargs = {"password": {"write_only": True}} def validate(self, data): username = data['username'] password = data['password'] user_qs = User.objects.filter(Q(username__iexact=username) | Q(email__iexact=username)).distinct() if user_qs.exists() and user_qs.count() == 1: user_obj = user_qs.first() if user_obj.check_password(password): user = user_obj payload = jwt_payload_handler(user) token = jwt_encode_handler(payload) data['token'] = token else: raise serializers.ValidationError("Incorrect password.") else: raise serializers.ValidationError("The user with this username does not exists.") return data class UserSerializerWithToken(serializers.ModelSerializer): token = serializers.SerializerMethodField() password = serializers.CharField(write_only=True) def get_token(self, obj): payload = jwt_payload_handler(obj) token = jwt_encode_handler(payload) return token def create(self, validated_data): password = validated_data.pop('password', None) instance = self.Meta.model(**validated_data) if password is not None: instance.set_password(password) instance.save() return instance class Meta: model = User fields = [ 'token', 'username', 'email', 'password', ] class ProfileRetrieveSerializer(serializers.ModelSerializer): profile_picture_url = serializers.SerializerMethodField() screen_name = serializers.SerializerMethodField() requester_in_contact_list = serializers.SerializerMethodField() requester_in_pending_list = serializers.SerializerMethodField() has_followed = serializers.SerializerMethodField() is_requesters_profile = serializers.SerializerMethodField() created_boards_count = serializers.SerializerMethodField() posted_subjects_count = serializers.SerializerMethodField() boards_subsribed_count = serializers.SerializerMethodField() member_since = serializers.SerializerMethodField() class Meta: model = User fields = [ 'profile_picture_url', 'screen_name', 'requester_in_contact_list', 'requester_in_pending_list', 'has_followed', 'is_requesters_profile', 'created_boards_count', 'posted_subjects_count', 'boards_subsribed_count', 'member_since', ] def get_profile_picture_url(self, obj): request = self.context.get('request') profile_picture_url = obj.profile.get_picture() return request.build_absolute_uri(profile_picture_url) def get_screen_name(self, obj): return obj.profile.screen_name() def get_requester_in_contact_list(self, obj): user = None request = self.context.get('request') if request and hasattr(request, 'user'): user = request.user if user in obj.profile.contact_list.all(): return True return False def get_requester_in_pending_list(self, obj): user = None request = self.context.get('request') if request and hasattr(request, 'user'): user = request.user if user in obj.profile.pending_list.all(): return True return False def get_is_requesters_profile(self, obj): user = None request = self.context.get('request') if request and hasattr(request, 'user'): user = request.user if user == obj: return True return False
MIT License
gsi-upm/soil
soil/simulation.py
Simulation.run_gen
python
def run_gen(self, *args, parallel=False, dry_run=False, exporters=[default, ], stats=[defaultStats], outdir=None, exporter_params={}, stats_params={}, log_level=None, **kwargs): if log_level: logger.setLevel(log_level) logger.info('Using exporters: %s', exporters or []) logger.info('Output directory: %s', outdir) exporters = serialization.deserialize_all(exporters, simulation=self, known_modules=['soil.exporters',], dry_run=dry_run, outdir=outdir, **exporter_params) stats = serialization.deserialize_all(simulation=self, names=stats, known_modules=['soil.stats',], **stats_params) with utils.timer('simulation {}'.format(self.name)): for stat in stats: stat.start() for exporter in exporters: exporter.start() for env in self._run_sync_or_async(*args, parallel=parallel, log_level=log_level, **kwargs): collected = list(stat.trial(env) for stat in stats) saved = self.save_stats(collected, t_step=env.now, trial_id=env.name) for exporter in exporters: exporter.trial(env, saved) yield env collected = list(stat.end() for stat in stats) saved = self.save_stats(collected) for exporter in exporters: exporter.end(saved)
Run the simulation and yield the resulting environments.
https://github.com/gsi-upm/soil/blob/e860bdb922a22da2987fba07dffb81351c0272e5/soil/simulation.py#L161-L206
import os import time import importlib import sys import yaml import traceback import logging import networkx as nx from networkx.readwrite import json_graph from multiprocessing import Pool from functools import partial import pickle from . import serialization, utils, basestring, agents from .environment import Environment from .utils import logger from .exporters import default from .stats import defaultStats from .history import History class Simulation: def __init__(self, name=None, group=None, topology=None, network_params=None, network_agents=None, agent_type=None, states=None, default_state=None, interval=1, num_trials=1, max_time=100, load_module=None, seed=None, dir_path=None, environment_agents=None, environment_params=None, environment_class=None, **kwargs): self.load_module = load_module self.network_params = network_params self.name = name or 'Unnamed' self.seed = str(seed or name) self._id = '{}_{}'.format(self.name, time.strftime("%Y-%m-%d_%H.%M.%S")) self.group = group or '' self.num_trials = num_trials self.max_time = max_time self.default_state = default_state or {} self.dir_path = dir_path or os.getcwd() self.interval = interval sys.path += list(x for x in [os.getcwd(), self.dir_path] if x not in sys.path) if topology is None: topology = serialization.load_network(network_params, dir_path=self.dir_path) elif isinstance(topology, basestring) or isinstance(topology, dict): topology = json_graph.node_link_graph(topology) self.topology = nx.Graph(topology) self.environment_params = environment_params or {} self.environment_class = serialization.deserialize(environment_class, known_modules=['soil.environment', ]) or Environment environment_agents = environment_agents or [] self.environment_agents = agents._convert_agent_types(environment_agents, known_modules=[self.load_module]) distro = agents.calculate_distribution(network_agents, agent_type) self.network_agents = agents._convert_agent_types(distro, known_modules=[self.load_module]) self.states = agents._validate_states(states, self.topology) self._history = History(name=self.name, backup=False) def run_simulation(self, *args, **kwargs): return self.run(*args, **kwargs) def run(self, *args, **kwargs): return list(self.run_gen(*args, **kwargs)) def _run_sync_or_async(self, parallel=False, *args, **kwargs): if parallel: p = Pool() func = partial(self.run_trial_exceptions, *args, **kwargs) for i in p.imap_unordered(func, range(self.num_trials)): if isinstance(i, Exception): logger.error('Trial failed:\n\t%s', i.message) continue yield i else: for i in range(self.num_trials): yield self.run_trial(*args, **kwargs)
Apache License 2.0
jniediek/combinato
combinato/guisort/sessions.py
Sessions.save
python
def save(self): for group_id, group in self.groupsById.items(): idx_type = self.type_table[:, 0] == group_id self.type_table[idx_type, 1] = group.group_type for cluster in group.clusters: idx_cl = self.group_table[:, 0] == cluster.name self.group_table[idx_cl, 1] = group_id self.sorting_manager.save_groups_and_types(self.group_table, self.type_table)
save the sorting result to file
https://github.com/jniediek/combinato/blob/12cdf7d8e8b9b5f43873f1a8b7263aa7bf25fc27/combinato/guisort/sessions.py#L84-L99
from __future__ import print_function, division, absolute_import import numpy as np from scipy.io import savemat from .cluster import Cluster from .group_list_model import GroupListModel from .. import GROUP_ART, GROUP_NOCLASS, TYPE_MU, TYPE_ART, TYPE_NO class Sessions(object): def __init__(self, parent=None): self.dirty = False self.sorting_manager = parent.sorting_manager self.group_table = self.sorting_manager.get_group_table() self.type_table = self.sorting_manager.get_type_table() self.start_time = np.inf self.stop_time = 0 self.groupsById = {} self._init_clusters() def _init_clusters(self): groups = self.sorting_manager.get_groups() if GROUP_ART not in groups: print('Adding empty artifact group') model = GroupListModel('Artifacts', GROUP_ART, [], TYPE_ART) self.groupsById[GROUP_ART] = model self.type_table = np.vstack(([GROUP_ART, TYPE_ART], self.type_table)) if GROUP_NOCLASS not in groups: print('Adding empty noclass group') model = GroupListModel('Unassigned', GROUP_NOCLASS, [], TYPE_NO) self.groupsById[GROUP_NOCLASS] = model for gid, data in groups.items(): if not len(data): continue group_type = self.sorting_manager.get_group_type(gid) if gid == GROUP_ART: name = 'Artifacts' elif gid == GROUP_NOCLASS: name = 'Unassigned' else: name = str(gid) model = GroupListModel(name, gid, [], group_type) tmp_clusters = [] for clid, clus in data.items(): times = clus['times'] spikes = clus['spikes'] fname = clus['image'] clu = Cluster(clid, fname, spikes, times) tmp_clusters.append(clu) self.start_time = min(self.start_time, times[0]) self.stop_time = max(self.stop_time, times[-1]) model.addClusters(tmp_clusters) self.groupsById[gid] = model self.updateGroupsByName() def updateGroupsByName(self): self.groupsByName = {} for group in self.groupsById.values(): self.groupsByName[group.name] = group
MIT License
personads/synvae
magenta/music/pianoroll_encoder_decoder.py
PianorollEncoderDecoder.class_index_to_event
python
def class_index_to_event(self, class_index, events): assert class_index < self.num_classes event = [] for i in range(self.input_size): if class_index % 2: event.append(i) class_index >>= 1 assert class_index == 0 return tuple(event)
Returns the event for the given class index. This is the reverse process of the self.events_to_label method. Args: class_index: An integer in the range [0, self.num_classes). events: A list-like sequence of events. This object is not used in this implementation. Returns: An PianorollSequence event value.
https://github.com/personads/synvae/blob/2b75697286d775e449d505195e3aa5c693187465/magenta/music/pianoroll_encoder_decoder.py#L87-L107
from __future__ import division from magenta.music import encoder_decoder import numpy as np class PianorollEncoderDecoder(encoder_decoder.EventSequenceEncoderDecoder): def __init__(self, input_size=88): self._input_size = input_size @property def input_size(self): return self._input_size @property def num_classes(self): return 2 ** self.input_size @property def default_event_label(self): return 0 def _event_to_label(self, event): label = 0 for pitch in event: label += 2**pitch return label def _event_to_input(self, event): input_ = np.zeros(self.input_size, np.float32) input_[list(event)] = 1 return input_ def events_to_input(self, events, position): return self._event_to_input(events[position]) def events_to_label(self, events, position): return self._event_to_label(events[position])
MIT License
kriaga/health-checker
HealthChecker/venv/Lib/site-packages/nltk/translate/ibm2.py
IBMModel2.prob_alignment_point
python
def prob_alignment_point(self, i, j, src_sentence, trg_sentence): l = len(src_sentence) - 1 m = len(trg_sentence) - 1 s = src_sentence[i] t = trg_sentence[j] return self.translation_table[t][s] * self.alignment_table[i][j][l][m]
Probability that position j in ``trg_sentence`` is aligned to position i in the ``src_sentence``
https://github.com/kriaga/health-checker/blob/3d9ce933f131bcbb897103b0f509cc45393cae4a/HealthChecker/venv/Lib/site-packages/nltk/translate/ibm2.py#L218-L227
from __future__ import division from collections import defaultdict from nltk.translate import AlignedSent from nltk.translate import Alignment from nltk.translate import IBMModel from nltk.translate import IBMModel1 from nltk.translate.ibm_model import Counts import warnings class IBMModel2(IBMModel): def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): super(IBMModel2, self).__init__(sentence_aligned_corpus) if probability_tables is None: ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations) self.translation_table = ibm1.translation_table self.set_uniform_probabilities(sentence_aligned_corpus) else: self.translation_table = probability_tables['translation_table'] self.alignment_table = probability_tables['alignment_table'] for n in range(0, iterations): self.train(sentence_aligned_corpus) self.align_all(sentence_aligned_corpus) def set_uniform_probabilities(self, sentence_aligned_corpus): l_m_combinations = set() for aligned_sentence in sentence_aligned_corpus: l = len(aligned_sentence.mots) m = len(aligned_sentence.words) if (l, m) not in l_m_combinations: l_m_combinations.add((l, m)) initial_prob = 1 / (l + 1) if initial_prob < IBMModel.MIN_PROB: warnings.warn("A source sentence is too long (" + str(l) + " words). Results may be less accurate.") for i in range(0, l + 1): for j in range(1, m + 1): self.alignment_table[i][j][l][m] = initial_prob def train(self, parallel_corpus): counts = Model2Counts() for aligned_sentence in parallel_corpus: src_sentence = [None] + aligned_sentence.mots trg_sentence = ['UNUSED'] + aligned_sentence.words l = len(aligned_sentence.mots) m = len(aligned_sentence.words) total_count = self.prob_all_alignments(src_sentence, trg_sentence) for j in range(1, m + 1): t = trg_sentence[j] for i in range(0, l + 1): s = src_sentence[i] count = self.prob_alignment_point( i, j, src_sentence, trg_sentence) normalized_count = count / total_count[t] counts.update_lexical_translation(normalized_count, s, t) counts.update_alignment(normalized_count, i, j, l, m) self.maximize_lexical_translation_probabilities(counts) self.maximize_alignment_probabilities(counts) def maximize_alignment_probabilities(self, counts): MIN_PROB = IBMModel.MIN_PROB for i, j_s in counts.alignment.items(): for j, src_sentence_lengths in j_s.items(): for l, trg_sentence_lengths in src_sentence_lengths.items(): for m in trg_sentence_lengths: estimate = (counts.alignment[i][j][l][m] / counts.alignment_for_any_i[j][l][m]) self.alignment_table[i][j][l][m] = max(estimate, MIN_PROB) def prob_all_alignments(self, src_sentence, trg_sentence): alignment_prob_for_t = defaultdict(lambda: 0.0) for j in range(1, len(trg_sentence)): t = trg_sentence[j] for i in range(0, len(src_sentence)): alignment_prob_for_t[t] += self.prob_alignment_point( i, j, src_sentence, trg_sentence) return alignment_prob_for_t
MIT License
google-research/noisystudent
efficientnet_model.py
Model.__init__
python
def __init__(self, blocks_args=None, global_params=None, use_adv_bn=False, is_teacher=False): super(Model, self).__init__() if not isinstance(blocks_args, list): raise ValueError('blocks_args should be a list.') self._global_params = global_params self._blocks_args = blocks_args self._relu_fn = global_params.relu_fn or tf.nn.swish self.endpoints = None self.use_adv_bn = use_adv_bn self.is_teacher = is_teacher self._build()
Initializes an `Model` instance. Args: blocks_args: A list of BlockArgs to construct block modules. global_params: GlobalParams, a set of global parameters. Raises: ValueError: when blocks_args is not specified as a list.
https://github.com/google-research/noisystudent/blob/aac52bf02448f6e45aee6c418f3e588803e4fa43/efficientnet_model.py#L336-L357
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import numpy as np import six from six.moves import xrange import tensorflow as tf from absl import flags import utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.tpu import tpu_function from tensorflow.python.ops import nn from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.ops import variables as tf_variables GlobalParams = collections.namedtuple('GlobalParams', [ 'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'data_format', 'num_classes', 'width_coefficient', 'depth_coefficient', 'depth_divisor', 'min_depth', 'stochastic_depth_rate', 'relu_fn', ]) GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields) BlockArgs = collections.namedtuple('BlockArgs', [ 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', 'expand_ratio', 'id_skip', 'strides', 'se_ratio', 'conv_type', ]) BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields) FLAGS = flags.FLAGS def batchnorm(use_adv_bn=False, is_teacher=False, **kwargs): return utils.TpuBatchNormalization(**kwargs) def conv_kernel_initializer(shape, dtype=None, partition_info=None): del partition_info kernel_height, kernel_width, _, out_filters = shape fan_out = int(kernel_height * kernel_width * out_filters) return tf.random_normal( shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype) def dense_kernel_initializer(shape, dtype=None, partition_info=None): del partition_info init_range = 1.0 / np.sqrt(shape[1]) return tf.random_uniform(shape, -init_range, init_range, dtype=dtype) def round_filters(filters, global_params): orig_f = filters multiplier = global_params.width_coefficient divisor = global_params.depth_divisor min_depth = global_params.min_depth if not multiplier: return filters filters *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) if new_filters < 0.9 * filters: new_filters += divisor tf.logging.info('round_filter input={} output={}'.format(orig_f, new_filters)) return int(new_filters) def round_repeats(repeats, global_params): multiplier = global_params.depth_coefficient if not multiplier: return repeats return int(math.ceil(multiplier * repeats)) class MBConvBlock(object): def __init__(self, block_args, global_params, trainable, use_adv_bn, is_teacher): self._block_args = block_args self._batch_norm_momentum = global_params.batch_norm_momentum self._batch_norm_epsilon = global_params.batch_norm_epsilon self._data_format = global_params.data_format if self._data_format == 'channels_first': self._channel_axis = 1 self._spatial_dims = [2, 3] else: self._channel_axis = -1 self._spatial_dims = [1, 2] self._relu_fn = global_params.relu_fn or tf.nn.swish self._has_se = (self._block_args.se_ratio is not None) and ( self._block_args.se_ratio > 0) and (self._block_args.se_ratio <= 1) self.use_adv_bn = use_adv_bn self.is_teacher = is_teacher self.endpoints = None self.trainable = trainable self._build() def block_args(self): return self._block_args def _build(self): filters = self._block_args.input_filters * self._block_args.expand_ratio if self._block_args.expand_ratio != 1: self._expand_conv = tf.layers.Conv2D( filters, kernel_size=[1, 1], strides=[1, 1], kernel_initializer=conv_kernel_initializer, padding='same', data_format=self._data_format, use_bias=False, trainable=self.trainable) self._bn0 = batchnorm( axis=self._channel_axis, momentum=self._batch_norm_momentum, epsilon=self._batch_norm_epsilon, trainable=self.trainable, use_adv_bn=self.use_adv_bn, is_teacher=self.is_teacher) kernel_size = self._block_args.kernel_size self._depthwise_conv = utils.DepthwiseConv2D( [kernel_size, kernel_size], strides=self._block_args.strides, depthwise_initializer=conv_kernel_initializer, padding='same', data_format=self._data_format, use_bias=False, trainable=self.trainable) self._bn1 = batchnorm( axis=self._channel_axis, momentum=self._batch_norm_momentum, epsilon=self._batch_norm_epsilon, trainable=self.trainable, use_adv_bn=self.use_adv_bn, is_teacher=self.is_teacher) if self._has_se: num_reduced_filters = max( 1, int(self._block_args.input_filters * self._block_args.se_ratio)) self._se_reduce = tf.layers.Conv2D( num_reduced_filters, kernel_size=[1, 1], strides=[1, 1], kernel_initializer=conv_kernel_initializer, padding='same', data_format=self._data_format, use_bias=True, trainable=self.trainable) self._se_expand = tf.layers.Conv2D( filters, kernel_size=[1, 1], strides=[1, 1], kernel_initializer=conv_kernel_initializer, padding='same', data_format=self._data_format, use_bias=True, trainable=self.trainable) filters = self._block_args.output_filters self._project_conv = tf.layers.Conv2D( filters, kernel_size=[1, 1], strides=[1, 1], kernel_initializer=conv_kernel_initializer, padding='same', data_format=self._data_format, use_bias=False, trainable=self.trainable) self._bn2 = batchnorm( axis=self._channel_axis, momentum=self._batch_norm_momentum, epsilon=self._batch_norm_epsilon, trainable=self.trainable, use_adv_bn=self.use_adv_bn, is_teacher=self.is_teacher) def _call_se(self, input_tensor): se_tensor = tf.reduce_mean(input_tensor, self._spatial_dims, keepdims=True) se_tensor = self._se_expand(self._relu_fn(self._se_reduce(se_tensor))) tf.logging.info('Built Squeeze and Excitation with tensor shape: %s' % (se_tensor.shape)) return tf.sigmoid(se_tensor) * input_tensor def call(self, inputs, training=True, stochastic_depth_rate=None): tf.logging.info('Block input: %s shape: %s' % (inputs.name, inputs.shape)) if self._block_args.expand_ratio != 1: x = self._relu_fn(self._bn0(self._expand_conv(inputs), training=training and self.trainable)) else: x = inputs tf.logging.info('Expand: %s shape: %s' % (x.name, x.shape)) x = self._relu_fn(self._bn1(self._depthwise_conv(x), training=training and self.trainable)) tf.logging.info('DWConv: %s shape: %s' % (x.name, x.shape)) if self._has_se: with tf.variable_scope('se'): x = self._call_se(x) self.endpoints = {'expansion_output': x} x = self._bn2(self._project_conv(x), training=training and self.trainable) if self._block_args.id_skip: if all( s == 1 for s in self._block_args.strides ) and self._block_args.input_filters == self._block_args.output_filters: if stochastic_depth_rate: x = utils.stochastic_depth( x, training and self.trainable, stochastic_depth_rate) x = tf.add(x, inputs) tf.logging.info('Project: %s shape: %s' % (x.name, x.shape)) return x class Model(tf.keras.Model):
Apache License 2.0
lanpa/tensorboardx
tensorboardX/caffe2_graph.py
_get_blob_names
python
def _get_blob_names(ops): names = set() for op in ops: names.update(op.input) names.update(op.output) return {name: name for name in names}
Get all the operator input and output blobs and perform dedup on their names. Args: ops: List of Caffe2 operators to extract inputs and outputs from Returns: set containing distinct inputs and outputs from 'ops'
https://github.com/lanpa/tensorboardx/blob/054f1f3aa5e8313be42450f5e9ce1fc1799252a7/tensorboardX/caffe2_graph.py#L138-L152
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import copy import logging import os import re import six from builtins import bytes from caffe2.proto import caffe2_pb2 from caffe2.python import core, workspace from .proto.graph_pb2 import GraphDef from .proto.node_def_pb2 import NodeDef from .proto.tensor_shape_pb2 import TensorShapeProto def _make_unique_name(seen, name, min_version=0): assert name is not None i = min_version x = '%s_%d' % (name, i) if i else name while x in seen: i += 1 x = '%s_%d' % (name, i) seen.add(x) return x def _rename_tensorflow_style(shapes, blob_name_tracker, ops): WEIGHT = re.compile(r"(_w)$") WEIGHT_ = re.compile(r"(_w_)") BN = re.compile(r"(_bn)$") BN_ = re.compile(r"(_bn_)") BIAS = re.compile(r"(_b)$") BIAS_ = re.compile(r"(_b_)") SCALE = re.compile(r"(_s)$") SCALE_ = re.compile(r"(_s_)") SUM = re.compile(r"(_sum)$") SUM_ = re.compile(r"(_sum_)") BRANCH = re.compile(r"(_branch)") def f(name): inter_name = WEIGHT_.sub('/weight_', WEIGHT.sub('/weight', name)) inter_name = BN_.sub('/batchnorm_', BN.sub('/batchnorm', inter_name)) inter_name = BIAS_.sub('/bias_', BIAS.sub('/bias', inter_name)) inter_name = SCALE_.sub('/scale_', SCALE.sub('/scale', inter_name)) inter_name = SUM_.sub('/sum_', SUM.sub('/sum', inter_name)) new_name = BRANCH.sub('/branch', inter_name) return new_name _rename_all(shapes, blob_name_tracker, ops, f) def _convert_to_ssa(shapes, blob_name_tracker, ops): ir = core.IR(ops) seen = set() versioned = {} new_shapes = {} new_blob_name_tracker = {} def ssa_name(name, versions): assert name in versions version = versions[name] if (name, version) in versioned: return versioned[(name, version)] new_name = _make_unique_name(seen, name, min_version=version) versioned[(name, version)] = new_name if name in shapes: new_shapes[new_name] = shapes[name] if blob_name_tracker and name in blob_name_tracker: new_blob_name_tracker[new_name] = blob_name_tracker[name] return new_name for (op, ssa) in zip(ops, ir.ssa): assert op is ssa.op inputs = list(op.input) outputs = list(op.output) del op.input[:] del op.output[:] op.input.extend(ssa_name(name, ssa.in_versions) for name in inputs) op.output.extend(ssa_name(name, ssa.out_versions) for name in outputs) shapes.clear() shapes.update(new_shapes) if blob_name_tracker: blob_name_tracker.clear() blob_name_tracker.update(new_blob_name_tracker)
MIT License
jaraco/jaraco.windows
jaraco/windows/dpapi.py
CryptUnprotectData
python
def CryptUnprotectData(data, optional_entropy=None, prompt_struct=None, flags=0): data_in = DATA_BLOB(data) entropy = DATA_BLOB(optional_entropy) if optional_entropy else None data_out = DATA_BLOB() ptr_description = wintypes.LPWSTR() res = _CryptUnprotectData( data_in, ctypes.byref(ptr_description), entropy, None, prompt_struct, flags | CRYPTPROTECT_UI_FORBIDDEN, data_out, ) handle_nonzero_success(res) description = ptr_description.value if ptr_description.value is not None: ctypes.windll.kernel32.LocalFree(ptr_description) res = data_out.get_data() data_out.free() return description, res
Returns a tuple of (description, data) where description is the the description that was passed to the CryptProtectData call and data is the decrypted result.
https://github.com/jaraco/jaraco.windows/blob/e858172b4d5ee91233a8cc5319de99f17848f090/jaraco/windows/dpapi.py#L118-L143
import ctypes from ctypes import wintypes from jaraco.windows.error import handle_nonzero_success __import__('jaraco.windows.api.memory') class DATA_BLOB(ctypes.Structure): _fields_ = [('data_size', wintypes.DWORD), ('data', ctypes.c_void_p)] def __init__(self, data=None): super(DATA_BLOB, self).__init__() self.set_data(data) def set_data(self, data): if data is None: self.data_size = 0 self.data = None return self.data_size = len(data) self.data = ctypes.cast(ctypes.create_string_buffer(data), ctypes.c_void_p) def get_data(self): array = ctypes.POINTER(ctypes.c_char * len(self)) return ctypes.cast(self.data, array).contents.raw def __len__(self): return self.data_size def __str__(self): return self.get_data() def free(self): ctypes.windll.kernel32.LocalFree(self.data) p_DATA_BLOB = ctypes.POINTER(DATA_BLOB) _CryptProtectData = ctypes.windll.crypt32.CryptProtectData _CryptProtectData.argtypes = [ p_DATA_BLOB, wintypes.LPCWSTR, p_DATA_BLOB, ctypes.c_void_p, ctypes.c_void_p, wintypes.DWORD, p_DATA_BLOB, ] _CryptProtectData.restype = wintypes.BOOL _CryptUnprotectData = ctypes.windll.crypt32.CryptUnprotectData _CryptUnprotectData.argtypes = [ p_DATA_BLOB, ctypes.POINTER(wintypes.LPWSTR), p_DATA_BLOB, ctypes.c_void_p, ctypes.c_void_p, wintypes.DWORD, p_DATA_BLOB, ] _CryptUnprotectData.restype = wintypes.BOOL CRYPTPROTECT_UI_FORBIDDEN = 0x01 def CryptProtectData( data, description=None, optional_entropy=None, prompt_struct=None, flags=0 ): data_in = DATA_BLOB(data) entropy = DATA_BLOB(optional_entropy) if optional_entropy else None data_out = DATA_BLOB() res = _CryptProtectData( data_in, description, entropy, None, prompt_struct, flags, data_out ) handle_nonzero_success(res) res = data_out.get_data() data_out.free() return res
MIT License
nescau-ufla/fuzzingtool
src/fuzzingtool/conn/RequestParser.py
RequestParser.__getAjustedContentByIndexes
python
def __getAjustedContentByIndexes(self, content: dict) -> str: ajustedContent = content['content'] for i in content['fuzzingIndexes']: head = ajustedContent[:i] tail = ajustedContent[(i+1):] ajustedContent = head + self.__payload + tail return ajustedContent
Put the payload into the given content @type content: dict @param content: The target content dictionary @returns str: The new content
https://github.com/nescau-ufla/fuzzingtool/blob/d0dbe3ee4c17ec8ee72423bf7fabce6849e01807/src/fuzzingtool/conn/RequestParser.py#L90-L102
from ..utils.consts import * def checkForSubdomainFuzz(url: str) -> bool: if ('.' in url and FUZZING_MARK in url) and url.index(FUZZING_MARK) < url.index('.'): return True return False class RequestParser: def __init__(self): self.__payload = '' def getMethod(self, method: dict) -> str: return method['content'] if not method['fuzzingIndexes'] else self.__getAjustedContentByIndexes(method) def getUrl(self, url: dict) -> str: return url['content'] if not url['fuzzingIndexes'] else self.__getAjustedContentByIndexes(url) def getHeader(self, headers: dict) -> dict: return headers['content'] if not headers['payloadKeys'] else self.__getAjustedHeader(headers) def getData(self, data: dict) -> dict: return { 'PARAM': {} if not data['PARAM'] else self.__getAjustedData(data['PARAM']), 'BODY': {} if not data['BODY'] else self.__getAjustedData(data['BODY']) } def setPayload(self, payload: str) -> None: self.__payload = payload
MIT License
jbalogh/glow
vendor/lib/python/hbase/Hbase.py
Iface.deleteTable
python
def deleteTable(self, tableName): pass
Deletes a table @throws IOError if table doesn't exist on server or there was some other problem Parameters: - tableName: name of table to delete
https://github.com/jbalogh/glow/blob/776671773749e8d2352d3da71916cf60824f8782/vendor/lib/python/hbase/Hbase.py#L108-L118
from thrift.Thrift import * from ttypes import * from thrift.Thrift import TProcessor from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol, TProtocol try: from thrift.protocol import fastbinary except: fastbinary = None class Iface: def enableTable(self, tableName): pass def disableTable(self, tableName): pass def isTableEnabled(self, tableName): pass def compact(self, tableNameOrRegionName): pass def majorCompact(self, tableNameOrRegionName): pass def getTableNames(self, ): pass def getColumnDescriptors(self, tableName): pass def getTableRegions(self, tableName): pass def createTable(self, tableName, columnFamilies): pass
BSD 3-Clause New or Revised License
sametmax/django--an-app-at-a-time
ignore_this_directory/django/contrib/admin/options.py
ModelAdmin.get_changelist_form
python
def get_changelist_form(self, request, **kwargs): defaults = { 'formfield_callback': partial(self.formfield_for_dbfield, request=request), **kwargs, } if defaults.get('fields') is None and not modelform_defines_fields(defaults.get('form')): defaults['fields'] = forms.ALL_FIELDS return modelform_factory(self.model, **defaults)
Return a Form class for use in the Formset on the changelist page.
https://github.com/sametmax/django--an-app-at-a-time/blob/99eddf12ead76e6dfbeb09ce0bae61e282e22f8a/ignore_this_directory/django/contrib/admin/options.py#L762-L773
import copy import json import operator import re from collections import OrderedDict from functools import partial, reduce, update_wrapper from urllib.parse import quote as urlquote from django import forms from django.conf import settings from django.contrib import messages from django.contrib.admin import helpers, widgets from django.contrib.admin.checks import ( BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks, ) from django.contrib.admin.exceptions import DisallowedModelAdminToField from django.contrib.admin.templatetags.admin_urls import add_preserved_filters from django.contrib.admin.utils import ( NestedObjects, construct_change_message, flatten_fieldsets, get_deleted_objects, lookup_needs_distinct, model_format_dict, model_ngettext, quote, unquote, ) from django.contrib.admin.views.autocomplete import AutocompleteJsonView from django.contrib.admin.widgets import ( AutocompleteSelect, AutocompleteSelectMultiple, ) from django.contrib.auth import get_permission_codename from django.core.exceptions import ( FieldDoesNotExist, FieldError, PermissionDenied, ValidationError, ) from django.core.paginator import Paginator from django.db import models, router, transaction from django.db.models.constants import LOOKUP_SEP from django.db.models.fields import BLANK_CHOICE_DASH from django.forms.formsets import DELETION_FIELD_NAME, all_valid from django.forms.models import ( BaseInlineFormSet, inlineformset_factory, modelform_defines_fields, modelform_factory, modelformset_factory, ) from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple from django.http import HttpResponseRedirect from django.http.response import HttpResponseBase from django.template.response import SimpleTemplateResponse, TemplateResponse from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.html import format_html from django.utils.http import urlencode from django.utils.inspect import get_func_args from django.utils.safestring import mark_safe from django.utils.text import capfirst, format_lazy, get_text_list from django.utils.translation import gettext as _, ngettext from django.views.decorators.csrf import csrf_protect from django.views.generic import RedirectView IS_POPUP_VAR = '_popup' TO_FIELD_VAR = '_to_field' HORIZONTAL, VERTICAL = 1, 2 def get_content_type_for_model(obj): from django.contrib.contenttypes.models import ContentType return ContentType.objects.get_for_model(obj, for_concrete_model=False) def get_ul_class(radio_style): return 'radiolist' if radio_style == VERTICAL else 'radiolist inline' class IncorrectLookupParameters(Exception): pass FORMFIELD_FOR_DBFIELD_DEFAULTS = { models.DateTimeField: { 'form_class': forms.SplitDateTimeField, 'widget': widgets.AdminSplitDateTime }, models.DateField: {'widget': widgets.AdminDateWidget}, models.TimeField: {'widget': widgets.AdminTimeWidget}, models.TextField: {'widget': widgets.AdminTextareaWidget}, models.URLField: {'widget': widgets.AdminURLFieldWidget}, models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget}, models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget}, models.CharField: {'widget': widgets.AdminTextInputWidget}, models.ImageField: {'widget': widgets.AdminFileWidget}, models.FileField: {'widget': widgets.AdminFileWidget}, models.EmailField: {'widget': widgets.AdminEmailInputWidget}, models.UUIDField: {'widget': widgets.AdminUUIDInputWidget}, } csrf_protect_m = method_decorator(csrf_protect) class BaseModelAdmin(metaclass=forms.MediaDefiningClass): autocomplete_fields = () raw_id_fields = () fields = None exclude = None fieldsets = None form = forms.ModelForm filter_vertical = () filter_horizontal = () radio_fields = {} prepopulated_fields = {} formfield_overrides = {} readonly_fields = () ordering = None sortable_by = None view_on_site = True show_full_result_count = True checks_class = BaseModelAdminChecks def check(self, **kwargs): return self.checks_class().check(self, **kwargs) def __init__(self): overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS) for k, v in self.formfield_overrides.items(): overrides.setdefault(k, {}).update(v) self.formfield_overrides = overrides def formfield_for_dbfield(self, db_field, request, **kwargs): if db_field.choices: return self.formfield_for_choice_field(db_field, request, **kwargs) if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)): if db_field.__class__ in self.formfield_overrides: kwargs = {**self.formfield_overrides[db_field.__class__], **kwargs} if isinstance(db_field, models.ForeignKey): formfield = self.formfield_for_foreignkey(db_field, request, **kwargs) elif isinstance(db_field, models.ManyToManyField): formfield = self.formfield_for_manytomany(db_field, request, **kwargs) if formfield and db_field.name not in self.raw_id_fields: related_modeladmin = self.admin_site._registry.get(db_field.remote_field.model) wrapper_kwargs = {} if related_modeladmin: wrapper_kwargs.update( can_add_related=related_modeladmin.has_add_permission(request), can_change_related=related_modeladmin.has_change_permission(request), can_delete_related=related_modeladmin.has_delete_permission(request), can_view_related=related_modeladmin.has_view_permission(request), ) formfield.widget = widgets.RelatedFieldWidgetWrapper( formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs ) return formfield for klass in db_field.__class__.mro(): if klass in self.formfield_overrides: kwargs = {**copy.deepcopy(self.formfield_overrides[klass]), **kwargs} return db_field.formfield(**kwargs) return db_field.formfield(**kwargs) def formfield_for_choice_field(self, db_field, request, **kwargs): if db_field.name in self.radio_fields: if 'widget' not in kwargs: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) if 'choices' not in kwargs: kwargs['choices'] = db_field.get_choices( include_blank=db_field.blank, blank_choice=[('', _('None'))] ) return db_field.formfield(**kwargs) def get_field_queryset(self, db, db_field, request): related_admin = self.admin_site._registry.get(db_field.remote_field.model) if related_admin is not None: ordering = related_admin.get_ordering(request) if ordering is not None and ordering != (): return db_field.remote_field.model._default_manager.using(db).order_by(*ordering) return None def formfield_for_foreignkey(self, db_field, request, **kwargs): db = kwargs.get('using') if 'widget' not in kwargs: if db_field.name in self.get_autocomplete_fields(request): kwargs['widget'] = AutocompleteSelect(db_field.remote_field, self.admin_site, using=db) elif db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.remote_field, self.admin_site, using=db) elif db_field.name in self.radio_fields: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) kwargs['empty_label'] = _('None') if db_field.blank else None if 'queryset' not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs['queryset'] = queryset return db_field.formfield(**kwargs) def formfield_for_manytomany(self, db_field, request, **kwargs): if not db_field.remote_field.through._meta.auto_created: return None db = kwargs.get('using') autocomplete_fields = self.get_autocomplete_fields(request) if db_field.name in autocomplete_fields: kwargs['widget'] = AutocompleteSelectMultiple(db_field.remote_field, self.admin_site, using=db) elif db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.remote_field, self.admin_site, using=db) elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]: kwargs['widget'] = widgets.FilteredSelectMultiple( db_field.verbose_name, db_field.name in self.filter_vertical ) if 'queryset' not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs['queryset'] = queryset form_field = db_field.formfield(**kwargs) if (isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple))): msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.') help_text = form_field.help_text form_field.help_text = format_lazy('{} {}', help_text, msg) if help_text else msg return form_field def get_autocomplete_fields(self, request): return self.autocomplete_fields def get_view_on_site_url(self, obj=None): if obj is None or not self.view_on_site: return None if callable(self.view_on_site): return self.view_on_site(obj) elif self.view_on_site and hasattr(obj, 'get_absolute_url'): return reverse('admin:view_on_site', kwargs={ 'content_type_id': get_content_type_for_model(obj).pk, 'object_id': obj.pk }) def get_empty_value_display(self): try: return mark_safe(self.empty_value_display) except AttributeError: return mark_safe(self.admin_site.empty_value_display) def get_exclude(self, request, obj=None): return self.exclude def get_fields(self, request, obj=None): if self.fields: return self.fields form = self._get_form_for_get_fields(request, obj) return [*form.base_fields, *self.get_readonly_fields(request, obj)] def get_fieldsets(self, request, obj=None): if self.fieldsets: return self.fieldsets return [(None, {'fields': self.get_fields(request, obj)})] def get_ordering(self, request): return self.ordering or () def get_readonly_fields(self, request, obj=None): return self.readonly_fields def get_prepopulated_fields(self, request, obj=None): return self.prepopulated_fields def get_queryset(self, request): qs = self.model._default_manager.get_queryset() ordering = self.get_ordering(request) if ordering: qs = qs.order_by(*ordering) return qs def get_sortable_by(self, request): return self.sortable_by if self.sortable_by is not None else self.get_list_display(request) def lookup_allowed(self, lookup, value): from django.contrib.admin.filters import SimpleListFilter model = self.model for fk_lookup in model._meta.related_fkey_lookups: if callable(fk_lookup): fk_lookup = fk_lookup() if (lookup, value) in widgets.url_params_from_lookup_dict(fk_lookup).items(): return True relation_parts = [] prev_field = None for part in lookup.split(LOOKUP_SEP): try: field = model._meta.get_field(part) except FieldDoesNotExist: break if not prev_field or (prev_field.is_relation and field not in prev_field.get_path_info()[-1].target_fields): relation_parts.append(part) if not getattr(field, 'get_path_info', None): break prev_field = field model = field.get_path_info()[-1].to_opts.model if len(relation_parts) <= 1: return True valid_lookups = {self.date_hierarchy} for filter_item in self.list_filter: if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter): valid_lookups.add(filter_item.parameter_name) elif isinstance(filter_item, (list, tuple)): valid_lookups.add(filter_item[0]) else: valid_lookups.add(filter_item) return not { LOOKUP_SEP.join(relation_parts), LOOKUP_SEP.join(relation_parts + [part]) }.isdisjoint(valid_lookups) def to_field_allowed(self, request, to_field): opts = self.model._meta try: field = opts.get_field(to_field) except FieldDoesNotExist: return False if field.primary_key: return True for many_to_many in opts.many_to_many: if many_to_many.m2m_target_field_name() == to_field: return True registered_models = set() for model, admin in self.admin_site._registry.items(): registered_models.add(model) for inline in admin.inlines: registered_models.add(inline.model) related_objects = ( f for f in opts.get_fields(include_hidden=True) if (f.auto_created and not f.concrete) ) for related_object in related_objects: related_model = related_object.related_model remote_field = related_object.field.remote_field if (any(issubclass(model, related_model) for model in registered_models) and hasattr(remote_field, 'get_related_field') and remote_field.get_related_field() == field): return True return False def has_add_permission(self, request): opts = self.opts codename = get_permission_codename('add', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_change_permission(self, request, obj=None): opts = self.opts codename = get_permission_codename('change', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_delete_permission(self, request, obj=None): opts = self.opts codename = get_permission_codename('delete', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_view_permission(self, request, obj=None): opts = self.opts codename_view = get_permission_codename('view', opts) codename_change = get_permission_codename('change', opts) return ( request.user.has_perm('%s.%s' % (opts.app_label, codename_view)) or request.user.has_perm('%s.%s' % (opts.app_label, codename_change)) ) def has_view_or_change_permission(self, request, obj=None): return self.has_view_permission(request, obj) or self.has_change_permission(request, obj) def has_module_permission(self, request): return request.user.has_module_perms(self.opts.app_label) class ModelAdmin(BaseModelAdmin): list_display = ('__str__',) list_display_links = () list_filter = () list_select_related = False list_per_page = 100 list_max_show_all = 200 list_editable = () search_fields = () date_hierarchy = None save_as = False save_as_continue = True save_on_top = False paginator = Paginator preserve_filters = True inlines = [] add_form_template = None change_form_template = None change_list_template = None delete_confirmation_template = None delete_selected_confirmation_template = None object_history_template = None popup_response_template = None actions = [] action_form = helpers.ActionForm actions_on_top = True actions_on_bottom = False actions_selection_counter = True checks_class = ModelAdminChecks def __init__(self, model, admin_site): self.model = model self.opts = model._meta self.admin_site = admin_site super().__init__() def __str__(self): return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__) def get_inline_instances(self, request, obj=None): inline_instances = [] for inline_class in self.inlines: inline = inline_class(self.model, self.admin_site) if request: inline_has_add_permission = inline._has_add_permission(request, obj) if not (inline.has_view_or_change_permission(request, obj) or inline_has_add_permission or inline.has_delete_permission(request, obj)): continue if not inline_has_add_permission: inline.max_num = 0 inline_instances.append(inline) return inline_instances def get_urls(self): from django.urls import path def wrap(view): def wrapper(*args, **kwargs): return self.admin_site.admin_view(view)(*args, **kwargs) wrapper.model_admin = self return update_wrapper(wrapper, view) info = self.model._meta.app_label, self.model._meta.model_name urlpatterns = [ path('', wrap(self.changelist_view), name='%s_%s_changelist' % info), path('add/', wrap(self.add_view), name='%s_%s_add' % info), path('autocomplete/', wrap(self.autocomplete_view), name='%s_%s_autocomplete' % info), path('<path:object_id>/history/', wrap(self.history_view), name='%s_%s_history' % info), path('<path:object_id>/delete/', wrap(self.delete_view), name='%s_%s_delete' % info), path('<path:object_id>/change/', wrap(self.change_view), name='%s_%s_change' % info), path('<path:object_id>/', wrap(RedirectView.as_view( pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info) ))), ] return urlpatterns @property def urls(self): return self.get_urls() @property def media(self): extra = '' if settings.DEBUG else '.min' js = [ 'vendor/jquery/jquery%s.js' % extra, 'jquery.init.js', 'core.js', 'admin/RelatedObjectLookups.js', 'actions%s.js' % extra, 'urlify.js', 'prepopulate%s.js' % extra, 'vendor/xregexp/xregexp%s.js' % extra, ] return forms.Media(js=['admin/js/%s' % url for url in js]) def get_model_perms(self, request): return { 'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request), 'view': self.has_view_permission(request), } def _get_form_for_get_fields(self, request, obj): return self.get_form(request, obj, fields=None) def get_form(self, request, obj=None, change=False, **kwargs): if 'fields' in kwargs: fields = kwargs.pop('fields') else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) excluded = self.get_exclude(request, obj) exclude = [] if excluded is None else list(excluded) readonly_fields = self.get_readonly_fields(request, obj) exclude.extend(readonly_fields) if change and hasattr(request, 'user') and not self.has_change_permission(request, obj): exclude.extend(fields) if excluded is None and hasattr(self.form, '_meta') and self.form._meta.exclude: exclude.extend(self.form._meta.exclude) exclude = exclude or None new_attrs = OrderedDict.fromkeys( f for f in readonly_fields if f in self.form.declared_fields ) form = type(self.form.__name__, (self.form,), new_attrs) defaults = { 'form': form, 'fields': fields, 'exclude': exclude, 'formfield_callback': partial(self.formfield_for_dbfield, request=request), **kwargs, } if defaults['fields'] is None and not modelform_defines_fields(defaults['form']): defaults['fields'] = forms.ALL_FIELDS try: return modelform_factory(self.model, **defaults) except FieldError as e: raise FieldError( '%s. Check fields/fieldsets/exclude attributes of class %s.' % (e, self.__class__.__name__) ) def get_changelist(self, request, **kwargs): from django.contrib.admin.views.main import ChangeList return ChangeList def get_changelist_instance(self, request): list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) if self.get_actions(request): list_display = ['action_checkbox', *list_display] sortable_by = self.get_sortable_by(request) ChangeList = self.get_changelist(request) return ChangeList( request, self.model, list_display, list_display_links, self.get_list_filter(request), self.date_hierarchy, self.get_search_fields(request), self.get_list_select_related(request), self.list_per_page, self.list_max_show_all, self.list_editable, self, sortable_by, ) def get_object(self, request, object_id, from_field=None): queryset = self.get_queryset(request) model = queryset.model field = model._meta.pk if from_field is None else model._meta.get_field(from_field) try: object_id = field.to_python(object_id) return queryset.get(**{field.name: object_id}) except (model.DoesNotExist, ValidationError, ValueError): return None
MIT License
open-telemetry/opentelemetry-python
opentelemetry-api/src/opentelemetry/trace/__init__.py
Tracer.start_as_current_span
python
def start_as_current_span( self, name: str, context: Optional[Context] = None, kind: SpanKind = SpanKind.INTERNAL, attributes: types.Attributes = None, links: _Links = None, start_time: Optional[int] = None, record_exception: bool = True, set_status_on_exception: bool = True, end_on_exit: bool = True, ) -> Iterator["Span"]:
Context manager for creating a new span and set it as the current span in this tracer's context. Exiting the context manager will call the span's end method, as well as return the current span to its previous value by returning to the previous context. Example:: with tracer.start_as_current_span("one") as parent: parent.add_event("parent's event") with trace.start_as_current_span("two") as child: child.add_event("child's event") trace.get_current_span() # returns child trace.get_current_span() # returns parent trace.get_current_span() # returns previously active span This is a convenience method for creating spans attached to the tracer's context. Applications that need more control over the span lifetime should use :meth:`start_span` instead. For example:: with tracer.start_as_current_span(name) as span: do_work() is equivalent to:: span = tracer.start_span(name) with opentelemetry.trace.use_span(span, end_on_exit=True): do_work() Args: name: The name of the span to be created. context: An optional Context containing the span's parent. Defaults to the global context. kind: The span's kind (relationship to parent). Note that is meaningful even if there is no parent. attributes: The span's attributes. links: Links span to other spans start_time: Sets the start time of a span record_exception: Whether to record any exceptions raised within the context as error event on the span. set_status_on_exception: Only relevant if the returned span is used in a with/context manager. Defines wether the span status will be automatically set to ERROR when an uncaught exception is raised in the span with block. The span status won't be set by this mechanism if it was previously set manually. end_on_exit: Whether to end the span automatically when leaving the context manager. Yields: The newly-created span.
https://github.com/open-telemetry/opentelemetry-python/blob/65528f7534f1f5f2e8adc7520b6e696a84569c7d/opentelemetry-api/src/opentelemetry/trace/__init__.py#L318-L381
import os import typing from abc import ABC, abstractmethod from contextlib import contextmanager from enum import Enum from logging import getLogger from typing import Iterator, Optional, Sequence, cast from opentelemetry import context as context_api from opentelemetry.attributes import BoundedAttributes from opentelemetry.context.context import Context from opentelemetry.environment_variables import OTEL_PYTHON_TRACER_PROVIDER from opentelemetry.trace.propagation import ( _SPAN_KEY, get_current_span, set_span_in_context, ) from opentelemetry.trace.span import ( DEFAULT_TRACE_OPTIONS, DEFAULT_TRACE_STATE, INVALID_SPAN, INVALID_SPAN_CONTEXT, INVALID_SPAN_ID, INVALID_TRACE_ID, NonRecordingSpan, Span, SpanContext, TraceFlags, TraceState, format_span_id, format_trace_id, ) from opentelemetry.trace.status import Status, StatusCode from opentelemetry.util import types from opentelemetry.util._once import Once from opentelemetry.util._providers import _load_provider logger = getLogger(__name__) class _LinkBase(ABC): def __init__(self, context: "SpanContext") -> None: self._context = context @property def context(self) -> "SpanContext": return self._context @property @abstractmethod def attributes(self) -> types.Attributes: pass class Link(_LinkBase): def __init__( self, context: "SpanContext", attributes: types.Attributes = None, ) -> None: super().__init__(context) self._attributes = BoundedAttributes( attributes=attributes ) @property def attributes(self) -> types.Attributes: return self._attributes _Links = Optional[Sequence[Link]] class SpanKind(Enum): INTERNAL = 0 SERVER = 1 CLIENT = 2 PRODUCER = 3 CONSUMER = 4 class TracerProvider(ABC): @abstractmethod def get_tracer( self, instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, ) -> "Tracer": class _DefaultTracerProvider(TracerProvider): def get_tracer( self, instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, ) -> "Tracer": return _DefaultTracer() class ProxyTracerProvider(TracerProvider): def get_tracer( self, instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, ) -> "Tracer": if _TRACER_PROVIDER: return _TRACER_PROVIDER.get_tracer( instrumenting_module_name, instrumenting_library_version, schema_url, ) return ProxyTracer( instrumenting_module_name, instrumenting_library_version, schema_url, ) class Tracer(ABC): @abstractmethod def start_span( self, name: str, context: Optional[Context] = None, kind: SpanKind = SpanKind.INTERNAL, attributes: types.Attributes = None, links: _Links = None, start_time: Optional[int] = None, record_exception: bool = True, set_status_on_exception: bool = True, ) -> "Span": @contextmanager @abstractmethod
Apache License 2.0
xuefeng-xu/cs229-fall-2018-problem-solutions
PS3/code/src/p04_gmm.py
run_em
python
def run_em(x, w, phi, mu, sigma): eps = 1e-3 max_iter = 1000 it = 0 ll = prev_ll = None while it < max_iter and (prev_ll is None or np.abs(ll - prev_ll) >= eps): for i in range(K): w[:, i] = np.exp(-0.5 * ((x-mu[i]).dot(np.linalg.inv(sigma[i])) * (x-mu[i])).sum(axis=1)) / (np.linalg.det(sigma[i])**0.5) * phi[i] w /= w.sum(axis=1)[:, None] phi = np.mean(w, axis=0) for i in range(K): mu[i] = x.T.dot(w[:, i]) / sum(w[:, i]) sigma[i] = (w[:, i][:, None] * (x-mu[i])).T.dot(x-mu[i]) / sum(w[:, i]) it += 1 prev_ll = ll p_xz = np.zeros(w.shape) for i in range(K): p_xz[:, i] = np.exp(-0.5 * ((x-mu[i]).dot(np.linalg.inv(sigma[i])) * (x-mu[i])).sum(axis=1)) / (np.linalg.det(sigma[i])**0.5) * phi[i] ll = np.sum(np.log(p_xz)) print(f'Number of iterations:{it}') return w
Problem 3(d): EM Algorithm (unsupervised). See inline comments for instructions. Args: x: Design matrix of shape (m, n). w: Initial weight matrix of shape (m, k). phi: Initial mixture prior, of shape (k,). mu: Initial cluster means, list of k arrays of shape (n,). sigma: Initial cluster covariances, list of k arrays of shape (n, n). Returns: Updated weight matrix of shape (m, k) resulting from EM algorithm. More specifically, w[i, j] should contain the probability of example x^(i) belonging to the j-th Gaussian in the mixture.
https://github.com/xuefeng-xu/cs229-fall-2018-problem-solutions/blob/cc7a04ecf20c2e8b171a51b0cfd8520af3097695/PS3/code/src/p04_gmm.py#L68-L122
import matplotlib.pyplot as plt import numpy as np import os PLOT_COLORS = ['red', 'green', 'blue', 'orange'] K = 4 NUM_TRIALS = 3 UNLABELED = -1 def main(is_semi_supervised, trial_num): print('Running {} EM algorithm...' .format('semi-supervised' if is_semi_supervised else 'unsupervised')) train_path = os.path.join('..', 'data', 'ds4_train.csv') x, z = load_gmm_dataset(train_path) x_tilde = None if is_semi_supervised: labeled_idxs = (z != UNLABELED).squeeze() x_tilde = x[labeled_idxs, :] z = z[labeled_idxs, :] x = x[~labeled_idxs, :] m = x.shape[0] idx = np.random.permutation(m) group_member= int(m / K) mu = [] sigma = [] for i in range(K): if i!=K-1: x_temp = x[idx[i*group_member: (i+1)*group_member], :] else: x_temp = x[idx[i*group_member: m], :] mu_temp = np.mean(x_temp, axis=0) mu.append(mu_temp) sigma.append((x_temp-mu_temp).T.dot(x_temp-mu_temp) / x_temp.shape[0]) phi = np.ones(K) / K w = np.ones((m, K)) / K if is_semi_supervised: w = run_semi_supervised_em(x, x_tilde, z, w, phi, mu, sigma) else: w = run_em(x, w, phi, mu, sigma) z_pred = np.zeros(m) if w is not None: for i in range(m): z_pred[i] = np.argmax(w[i]) plot_gmm_preds(x, z_pred, is_semi_supervised, plot_id=trial_num)
MIT License
marselester/json-log-formatter
json_log_formatter/__init__.py
JSONFormatter.to_json
python
def to_json(self, record): try: return self.json_lib.dumps(record, default=_json_serializable) except TypeError: try: return self.json_lib.dumps(record) except TypeError: return '{}'
Converts record dict to a JSON string. It makes best effort to serialize a record (represents an object as a string) instead of raising TypeError if json library supports default argument. Note, ujson doesn't support it. Override this method to change the way dict is converted to JSON.
https://github.com/marselester/json-log-formatter/blob/0b3eb4d5b623882ae5a00609286279a93daee829/json_log_formatter/__init__.py#L73-L90
import logging from datetime import datetime import json BUILTIN_ATTRS = { 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', 'funcName', 'levelname', 'levelno', 'lineno', 'module', 'msecs', 'message', 'msg', 'name', 'pathname', 'process', 'processName', 'relativeCreated', 'stack_info', 'thread', 'threadName', } class JSONFormatter(logging.Formatter): json_lib = json def format(self, record): message = record.getMessage() extra = self.extra_from_record(record) json_record = self.json_record(message, extra, record) mutated_record = self.mutate_json_record(json_record) if mutated_record is None: mutated_record = json_record return self.to_json(mutated_record)
MIT License
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_5/models/protection_group_snapshot_transfer.py
ProtectionGroupSnapshotTransfer.__eq__
python
def __eq__(self, other): if not isinstance(other, ProtectionGroupSnapshotTransfer): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_5/models/protection_group_snapshot_transfer.py#L138-L143
import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flasharray.FA_2_5 import models class ProtectionGroupSnapshotTransfer(object): swagger_types = { 'name': 'str', 'destroyed': 'bool', 'started': 'int', 'progress': 'float', 'completed': 'int', 'data_transferred': 'int', 'physical_bytes_written': 'int' } attribute_map = { 'name': 'name', 'destroyed': 'destroyed', 'started': 'started', 'progress': 'progress', 'completed': 'completed', 'data_transferred': 'data_transferred', 'physical_bytes_written': 'physical_bytes_written' } required_args = { } def __init__( self, name=None, destroyed=None, started=None, progress=None, completed=None, data_transferred=None, physical_bytes_written=None, ): if name is not None: self.name = name if destroyed is not None: self.destroyed = destroyed if started is not None: self.started = started if progress is not None: self.progress = progress if completed is not None: self.completed = completed if data_transferred is not None: self.data_transferred = data_transferred if physical_bytes_written is not None: self.physical_bytes_written = physical_bytes_written def __setattr__(self, key, value): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `ProtectionGroupSnapshotTransfer`".format(key)) self.__dict__[key] = value def __getattribute__(self, item): value = object.__getattribute__(self, item) if isinstance(value, Property): raise AttributeError else: return value def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): if hasattr(self, attr): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ProtectionGroupSnapshotTransfer, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
BSD 2-Clause Simplified License
cbrnr/sleepecg
examples/benchmark/utils.py
reader_dispatch
python
def reader_dispatch(data_dir: str, db_slug: str) -> Iterator[ECGRecord]: readers = { 'gudb': sleepecg.io.read_gudb, 'ltdb': sleepecg.io.read_ltdb, 'mitdb': sleepecg.io.read_mitdb, } if db_slug not in readers: raise ValueError(f'Invalid db_slug: {db_slug}') yield from readers[db_slug](data_dir)
Read ECG records from mitdb, ltdb or gudb. Parameters ---------- data_dir : str Directory where all datasets are stored. db_slug : str Short identifier of a dataset, e.g. `'mitdb'`. Yields ------ ECGRecord Each element in the generator is of type `ECGRecord` and contains the ECG signal (`.ecg`), sampling frequency (`.fs`), annotated beat indices (`.annotations`), `.lead`, and `.id`.
https://github.com/cbrnr/sleepecg/blob/93e8181b2562ffa15ea57bfbb9cbfe2ddf72afd5/examples/benchmark/utils.py#L23-L48
import time from typing import Any, Dict, Iterator import biosppy import ecgdetectors import heartpy import heartpy.exceptions import mne import neurokit2 import numpy as np import wfdb.processing import sleepecg from sleepecg.io.ecg_readers import ECGRecord
BSD 3-Clause New or Revised License
mpicard/pyvas
src/pyvas/client.py
Client.get_report_format
python
def get_report_format(self, uuid): return self._get("report_format", uuid=uuid)
Get report format with uuid.
https://github.com/mpicard/pyvas/blob/a0563a29bb39d4481facd9a4f18bf52c75abf631/src/pyvas/client.py#L185-L187
from __future__ import unicode_literals, print_function import os import socket import ssl import six from lxml import etree from .response import Response from .utils import dict_to_lxml from .utils import lxml_to_dict from .exceptions import AuthenticationError from .exceptions import HTTPError from .exceptions import ElementNotFound DEFAULT_PORT = os.environ.get("OPENVASMD_PORT", 9390) DEFAULT_SCANNER_NAME = "OpenVAS Default" def print_xml(element): print(etree.tostring(element, pretty_print=True)) class Client(object): def __init__(self, host, username=None, password=None, port=DEFAULT_PORT): self.host = host self.port = port self.username = username self.password = password self.socket = None self.session = None def open(self, username=None, password=None): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket = sock = ssl.wrap_socket(sock) sock.connect((self.host, self.port)) self.authenticate(username, password) def close(self): self.socket.close() self.socket = None def authenticate(self, username=None, password=None): if self.socket is None: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket = sock = ssl.wrap_socket(sock) sock.connect((self.host, self.port)) if username is None: username = self.username if password is None: password = self.password request = dict_to_lxml( "authenticate", {"credentials": { "username": username, "password": password }} ) try: return self._command(request) except HTTPError: raise AuthenticationError(username) def list_port_lists(self, **kwargs): return self._list("port_list", **kwargs) def get_port_list(self, uuid): return self._get("port_list", uuid=uuid) def create_port_list(self, name, port_range, comment=None): if comment is None: comment = "" data = {"name": name, "port_range": port_range, "comment": comment} request = dict_to_lxml( "create_port_list", data ) return self._create(request) def delete_port_list(self, uuid): return self._delete("port_list", uuid=uuid) def list_targets(self, **kwargs): return self._list("target", **kwargs) def get_target(self, uuid): return self._get("target", uuid=uuid) def create_target(self, name, hosts, port_list=None, ssh_credential=None, alive_tests=None, comment=None): if comment is None: comment = "" data = {"name": name, "hosts": hosts, "comment": comment} if port_list: data.update({"port_list": {'@id': port_list}}) if alive_tests: data.update({"alive_tests": alive_tests}) if ssh_credential: data.update({"ssh_credential": {'@id': ssh_credential}}) request = dict_to_lxml( "create_target", data ) return self._create(request) def modify_target(self, uuid, **kwargs): return self._modify('target', uuid=uuid, exclude_hosts=None, **kwargs) def delete_target(self, uuid): return self._delete("target", uuid=uuid) def list_configs(self, **kwargs): return self._list("config", **kwargs) def get_config(self, uuid): return self._get("config", uuid=uuid) def create_config(self, name, copy_uuid=None, **kwargs): data = {"name": name} if copy_uuid is not None: data["copy"] = copy_uuid data.update(kwargs) request = dict_to_lxml("create_config", data) return self._create(request) def delete_config(self, uuid): return self._delete("config", uuid=uuid) def list_scanners(self, **kwargs): return self._list("scanner", **kwargs) def get_scanner(self, uuid): return self._get("scanner", uuid=uuid) def list_report_formats(self, **kwargs): return self._list("report_format", **kwargs)
MIT License
materials-consortia/optimade-python-tools
optimade/filtertransformers/mongo.py
MongoTransformer._apply_mongo_date_filter
python
def _apply_mongo_date_filter(self, filter_: dict) -> dict: def check_for_timestamp_field(prop, _): if self.mapper is not None: prop = self.mapper.get_optimade_field(prop) return prop == "last_modified" def replace_str_date_with_datetime(subdict, prop, expr): import bson.json_util for operator in subdict[prop]: query_datetime = bson.json_util.loads( bson.json_util.dumps({"$date": subdict[prop][operator]}), json_options=bson.json_util.DEFAULT_JSON_OPTIONS.with_options( tz_aware=True, tzinfo=bson.tz_util.utc ), ) if query_datetime.microsecond != 0: warnings.warn( f"Query for timestamp {subdict[prop][operator]!r} for field {prop!r} contained microseconds, which is not RFC3339 compliant. " "This may cause undefined behaviour for the underlying database.", TimestampNotRFCCompliant, ) subdict[prop][operator] = query_datetime return subdict return recursive_postprocessing( filter_, check_for_timestamp_field, replace_str_date_with_datetime )
This method loops through the query and replaces any operations on suspected timestamp properties with the corresponding operation on a BSON `DateTime` type.
https://github.com/materials-consortia/optimade-python-tools/blob/bb1baa87e6eb7a23e45eb308853665f51c56c4d2/optimade/filtertransformers/mongo.py#L530-L566
import copy import warnings import itertools from typing import Dict, List, Any from lark import v_args, Token from optimade.filtertransformers.base_transformer import BaseTransformer, Quantity from optimade.server.exceptions import BadRequest from optimade.server.warnings import TimestampNotRFCCompliant __all__ = ("MongoTransformer",) class MongoTransformer(BaseTransformer): operator_map = { "<": "$lt", "<=": "$lte", ">": "$gt", ">=": "$gte", "!=": "$ne", "=": "$eq", } inverse_operator_map = { "$lt": "$gte", "$lte": "$gt", "$gt": "$lte", "$gte": "$lt", "$ne": "$eq", "$eq": "$ne", "$in": "$nin", "$nin": "$in", } def postprocess(self, query: Dict[str, Any]): query = self._apply_relationship_filtering(query) query = self._apply_length_operators(query) query = self._apply_unknown_or_null_filter(query) query = self._apply_has_only_filter(query) query = self._apply_mongo_id_filter(query) query = self._apply_mongo_date_filter(query) return query def value_list(self, arg): for value in arg: if str(value) in self.operator_map.keys(): raise NotImplementedError( f"OPERATOR {value} inside value_list {arg} not implemented." ) return arg def value_zip(self, arg): raise NotImplementedError("Correlated list queries are not supported.") def value_zip_list(self, arg): raise NotImplementedError("Correlated list queries are not supported.") def expression(self, arg): return {"$or": arg} if len(arg) > 1 else arg[0] def expression_clause(self, arg): return {"$and": arg} if len(arg) > 1 else arg[0] def expression_phrase(self, arg): return self._recursive_expression_phrase(arg) @v_args(inline=True) def property_first_comparison(self, quantity, query): if "$ne" in query: return {"$and": [{quantity: query}, {quantity: {"$ne": None}}]} if "$size" in query: if ( getattr(self.backend_mapping.get(quantity), "length_quantity", None) is not None ): size_query = { self.backend_mapping[ quantity ].length_quantity.backend_field: query.pop("$size") } final_query = {} if query: final_query = {quantity: query} for q in size_query: if q in final_query: final_query[q].update(size_query[q]) else: final_query[q] = size_query[q] return final_query return {quantity: query} def constant_first_comparison(self, arg): return { arg[2]: {self.operator_map[self._reversed_operator_map[arg[1]]]: arg[0]} } @v_args(inline=True) def value_op_rhs(self, operator, value): return {self.operator_map[operator]: value} def known_op_rhs(self, arg): return {"#known": arg[1] == "KNOWN"} def fuzzy_string_op_rhs(self, arg): if isinstance(arg[1], Token) and arg[1].type == "WITH": pattern = arg[2] else: pattern = arg[1] if arg[0] == "CONTAINS": regex = f"{pattern}" elif arg[0] == "STARTS": regex = f"^{pattern}" elif arg[0] == "ENDS": regex = f"{pattern}$" return {"$regex": regex} def set_op_rhs(self, arg): if len(arg) == 2: return {"$in": arg[1:]} if arg[1] == "ALL": return {"$all": arg[2]} if arg[1] == "ANY": return {"$in": arg[2]} if arg[1] == "ONLY": return {"#only": arg[2]} raise NotImplementedError( f"set_op_rhs not implemented for use with OPERATOR. Given: {arg}" ) def property(self, args): quantity = super().property(args) if isinstance(quantity, Quantity): quantity = quantity.backend_field return ".".join([quantity] + args[1:]) def length_op_rhs(self, arg): if len(arg) == 2 or (len(arg) == 3 and arg[1] == "="): return {"$size": arg[-1]} if arg[1] in self.operator_map and arg[1] != "!=": return {"$size": {self.operator_map[arg[1]]: arg[-1]}} raise NotImplementedError( f"Operator {arg[1]} not implemented for LENGTH filter." ) def set_zip_op_rhs(self, arg): raise NotImplementedError("Correlated list queries are not supported.") def property_zip_addon(self, arg): raise NotImplementedError("Correlated list queries are not supported.") def _recursive_expression_phrase(self, arg: List) -> Dict[str, Any]: def handle_not_and(arg: Dict[str, List]) -> Dict[str, List]: expr1 = arg["$and"][0] expr2 = arg["$and"][1] if expr1.keys() == expr2.keys(): key = list(expr1.keys())[0] for e, f in itertools.permutations((expr1, expr2)): if e.get(key) == {"$ne": None}: return self._recursive_expression_phrase(["NOT", f]) return { "$or": [ self._recursive_expression_phrase(["NOT", subdict]) for subdict in arg["$and"] ] } def handle_not_or(arg: Dict[str, List]) -> Dict[str, List]: return { "$and": [ self._recursive_expression_phrase(["NOT", subdict]) for subdict in arg["$or"] ] } if len(arg) == 1: return arg[0] if "$or" in arg[1]: return handle_not_or(arg[1]) if "$and" in arg[1]: return handle_not_and(arg[1]) prop, expr = next(iter(arg[1].items())) operator, value = next(iter(expr.items())) if operator == "$not": return {prop: value} if operator in self.inverse_operator_map: filter_ = {prop: {self.inverse_operator_map[operator]: value}} if operator in ("$in", "$eq"): filter_ = {"$and": [filter_, {prop: {"$ne": None}}]} return filter_ filter_ = {prop: {"$not": expr}} if "#known" in expr: return filter_ return {"$and": [filter_, {prop: {"$ne": None}}]} def _apply_length_operators(self, filter_: dict) -> dict: def check_for_length_op_filter(_, expr): return ( isinstance(expr, dict) and "$size" in expr and isinstance(expr["$size"], dict) ) def apply_length_op(subdict, prop, expr): operator, value = list(expr["$size"].items())[0] if operator in self.operator_map.values() and operator != "$ne": _prop = None existence = None if operator == "$gt": _prop = f"{prop}.{value + 1}" existence = True elif operator == "$gte": _prop = f"{prop}.{value}" existence = True elif operator == "$lt": _prop = f"{prop}.{value}" existence = False elif operator == "$lte": _prop = f"{prop}.{value + 1}" existence = False if _prop is not None: subdict.pop(prop) subdict[_prop] = {"$exists": existence} return subdict return recursive_postprocessing( filter_, check_for_length_op_filter, apply_length_op, ) def _apply_relationship_filtering(self, filter_: dict) -> dict: def check_for_entry_type(prop, _): return str(prop).count(".") == 1 and str(prop).split(".")[0] in ( "structures", "references", ) def replace_with_relationship(subdict, prop, expr): _prop, _field = str(prop).split(".") if _field != "id": raise NotImplementedError( f'Cannot filter relationships by field "{_field}", only "id" is supported.' ) subdict[f"relationships.{_prop}.data.{_field}"] = expr subdict.pop(prop) return subdict return recursive_postprocessing( filter_, check_for_entry_type, replace_with_relationship ) def _apply_has_only_filter(self, filter_: dict) -> dict: def check_for_only_filter(_, expr): return isinstance(expr, dict) and ("#only" in expr) def replace_only_filter(subdict: dict, prop: str, expr: dict): if "$and" not in subdict: subdict["$and"] = [] if prop.startswith("relationships."): if prop not in ( "relationships.references.data.id", "relationships.structures.data.id", ): raise BadRequest(f"Unable to query on unrecognised field {prop}.") first_part_prop = ".".join(prop.split(".")[:-1]) subdict["$and"].append( { first_part_prop: { "$not": {"$elemMatch": {"id": {"$nin": expr["#only"]}}} } } ) subdict["$and"].append({first_part_prop + ".0": {"$exists": True}}) else: subdict["$and"].append( {prop: {"$not": {"$elemMatch": {"$nin": expr["#only"]}}}} ) subdict["$and"].append({prop + ".0": {"$exists": True}}) subdict.pop(prop) return subdict return recursive_postprocessing( filter_, check_for_only_filter, replace_only_filter ) def _apply_unknown_or_null_filter(self, filter_: dict) -> dict: def check_for_known_filter(_, expr): return isinstance(expr, dict) and ( "#known" in expr or "#known" in expr.get("$not", {}) ) def replace_known_filter_with_or(subdict, prop, expr): not_ = set(expr.keys()) == {"$not"} if not_: expr = expr["$not"] exists = expr["#known"] ^ not_ top_level_key = "$or" comparison_operator = "$eq" if exists: top_level_key = "$and" comparison_operator = "$ne" if top_level_key not in subdict: subdict[top_level_key] = [] subdict[top_level_key].append({prop: {"$exists": exists}}) subdict[top_level_key].append({prop: {comparison_operator: None}}) subdict.pop(prop) return subdict return recursive_postprocessing( filter_, check_for_known_filter, replace_known_filter_with_or ) def _apply_mongo_id_filter(self, filter_: dict) -> dict: def check_for_id_key(prop, _): return prop == "_id" def replace_str_id_with_objectid(subdict, prop, expr): from bson import ObjectId for operator in subdict[prop]: val = subdict[prop][operator] if operator not in ("$eq", "$ne"): if self.mapper is not None: prop = self.mapper.get_optimade_field(prop) raise NotImplementedError( f"Operator {operator} not supported for query on field {prop!r}, can only test for equality" ) if isinstance(val, str): subdict[prop][operator] = ObjectId(val) return subdict return recursive_postprocessing( filter_, check_for_id_key, replace_str_id_with_objectid )
MIT License
packtpublishing/generative-adversarial-networks-projects
Chapter02/run.py
plotAndSaveVoxel
python
def plotAndSaveVoxel(file_path, voxel): fig = plt.figure() ax = fig.gca(projection='3d') ax.set_aspect('equal') ax.voxels(voxel, edgecolor="red") plt.savefig(file_path)
Plot a voxel
https://github.com/packtpublishing/generative-adversarial-networks-projects/blob/ccfbf5ea809b8b147c92a7646ea1e3a716744c94/Chapter02/run.py#L134-L143
import glob import os import time import numpy as np import scipy.io as io import scipy.ndimage as nd import tensorflow as tf from keras import Sequential from keras.callbacks import TensorBoard from keras.layers import Input from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import Conv3D, Deconv3D from keras.layers.core import Activation from keras.layers.normalization import BatchNormalization from keras.models import Model from keras.optimizers import Adam from mpl_toolkits.mplot3d import Axes3D import matplotlib import matplotlib.pyplot as plt def build_generator(): z_size = 200 gen_filters = [512, 256, 128, 64, 1] gen_kernel_sizes = [4, 4, 4, 4, 4] gen_strides = [1, 2, 2, 2, 2] gen_input_shape = (1, 1, 1, z_size) gen_activations = ['relu', 'relu', 'relu', 'relu', 'sigmoid'] gen_convolutional_blocks = 5 input_layer = Input(shape=gen_input_shape) a = Deconv3D(filters=gen_filters[0], kernel_size=gen_kernel_sizes[0], strides=gen_strides[0])(input_layer) a = BatchNormalization()(a, training=True) a = Activation(activation='relu')(a) for i in range(gen_convolutional_blocks - 1): a = Deconv3D(filters=gen_filters[i + 1], kernel_size=gen_kernel_sizes[i + 1], strides=gen_strides[i + 1], padding='same')(a) a = BatchNormalization()(a, training=True) a = Activation(activation=gen_activations[i + 1])(a) gen_model = Model(inputs=[input_layer], outputs=[a]) return gen_model def build_discriminator(): dis_input_shape = (64, 64, 64, 1) dis_filters = [64, 128, 256, 512, 1] dis_kernel_sizes = [4, 4, 4, 4, 4] dis_strides = [2, 2, 2, 2, 1] dis_paddings = ['same', 'same', 'same', 'same', 'valid'] dis_alphas = [0.2, 0.2, 0.2, 0.2, 0.2] dis_activations = ['leaky_relu', 'leaky_relu', 'leaky_relu', 'leaky_relu', 'sigmoid'] dis_convolutional_blocks = 5 dis_input_layer = Input(shape=dis_input_shape) a = Conv3D(filters=dis_filters[0], kernel_size=dis_kernel_sizes[0], strides=dis_strides[0], padding=dis_paddings[0])(dis_input_layer) a = LeakyReLU(dis_alphas[0])(a) for i in range(dis_convolutional_blocks - 1): a = Conv3D(filters=dis_filters[i + 1], kernel_size=dis_kernel_sizes[i + 1], strides=dis_strides[i + 1], padding=dis_paddings[i + 1])(a) a = BatchNormalization()(a, training=True) if dis_activations[i + 1] == 'leaky_relu': a = LeakyReLU(dis_alphas[i + 1])(a) elif dis_activations[i + 1] == 'sigmoid': a = Activation(activation='sigmoid')(a) dis_model = Model(inputs=[dis_input_layer], outputs=[a]) return dis_model def write_log(callback, name, value, batch_no): summary = tf.Summary() summary_value = summary.value.add() summary_value.simple_value = value summary_value.tag = name callback.writer.add_summary(summary, batch_no) callback.writer.flush() def get3DImages(data_dir): all_files = np.random.choice(glob.glob(data_dir), size=10) all_volumes = np.asarray([getVoxelsFromMat(f) for f in all_files], dtype=np.bool) return all_volumes def getVoxelsFromMat(path, cube_len=64): voxels = io.loadmat(path)['instance'] voxels = np.pad(voxels, (1, 1), 'constant', constant_values=(0, 0)) if cube_len != 32 and cube_len == 64: voxels = nd.zoom(voxels, (2, 2, 2), mode='constant', order=0) return voxels def saveFromVoxels(voxels, path): z, x, y = voxels.nonzero() fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(x, y, -z, zdir='z', c='red') plt.savefig(path)
MIT License
juju/charm-helpers
charmhelpers/contrib/charmsupport/nrpe.py
remove_deprecated_check
python
def remove_deprecated_check(nrpe, deprecated_services): for dep_svc in deprecated_services: log('Deprecated service: {}'.format(dep_svc)) nrpe.remove_check(shortname=dep_svc)
Remove checks for deprecated services in list :param nrpe: NRPE object to remove check from :type nrpe: NRPE :param deprecated_services: List of deprecated services that are removed :type deprecated_services: list
https://github.com/juju/charm-helpers/blob/25b740578385d15b38f11bed8e4b6e732bdfb7c6/charmhelpers/contrib/charmsupport/nrpe.py#L511-L522
import glob import grp import os import pwd import re import shlex import shutil import subprocess import yaml from charmhelpers.core.hookenv import ( config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type, ) from charmhelpers.core.host import service from charmhelpers.core import host class CheckException(Exception): pass class Check(object): shortname_re = '[A-Za-z0-9-_.@]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed #--------------------------------------------------- define service {{ use active-service host_name {nagios_hostname} service_description {nagios_hostname}[{shortname}] """ """{description} check_command check_nrpe!{command} servicegroups {nagios_servicegroup} {service_config_overrides} }} """) def __init__(self, shortname, description, check_cmd, max_check_attempts=None): super(Check, self).__init__() if not re.match(self.shortname_re, shortname): raise CheckException("shortname must match {}".format( Check.shortname_re)) self.shortname = shortname self.command = "check_{}".format(shortname) self.description = description self.check_cmd = self._locate_cmd(check_cmd) self.max_check_attempts = max_check_attempts def _get_check_filename(self): return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) def _get_service_filename(self, hostname): return os.path.join(NRPE.nagios_exportdir, 'service__{}_{}.cfg'.format(hostname, self.command)) def _locate_cmd(self, check_cmd): search_path = ( '/usr/lib/nagios/plugins', '/usr/local/lib/nagios/plugins', ) parts = shlex.split(check_cmd) for path in search_path: if os.path.exists(os.path.join(path, parts[0])): command = os.path.join(path, parts[0]) if len(parts) > 1: command += " " + " ".join(parts[1:]) return command log('Check command not found: {}'.format(parts[0])) return '' def _remove_service_files(self): if not os.path.exists(NRPE.nagios_exportdir): return for f in os.listdir(NRPE.nagios_exportdir): if f.endswith('_{}.cfg'.format(self.command)): os.remove(os.path.join(NRPE.nagios_exportdir, f)) def remove(self, hostname): nrpe_check_file = self._get_check_filename() if os.path.exists(nrpe_check_file): os.remove(nrpe_check_file) self._remove_service_files() def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = self._get_check_filename() with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) if nagios_servicegroups: nrpe_check_config.write( "# The following header was added automatically by juju\n") nrpe_check_config.write( "# Modifying it will affect nagios monitoring and alerting\n") nrpe_check_config.write( "# servicegroups: {}\n".format(nagios_servicegroups)) nrpe_check_config.write("command[{}]={}\n".format( self.command, self.check_cmd)) if not os.path.exists(NRPE.nagios_exportdir): log('Not writing service config as {} is not accessible'.format( NRPE.nagios_exportdir)) else: self.write_service_config(nagios_context, hostname, nagios_servicegroups) def write_service_config(self, nagios_context, hostname, nagios_servicegroups): self._remove_service_files() if self.max_check_attempts: service_config_overrides = ' max_check_attempts {}'.format( self.max_check_attempts ) else: service_config_overrides = '' templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, 'service_config_overrides': service_config_overrides, } nrpe_service_text = Check.service_template.format(**templ_vars) nrpe_service_file = self._get_service_filename(hostname) with open(nrpe_service_file, 'w') as nrpe_service_config: nrpe_service_config.write(str(nrpe_service_text)) def run(self): subprocess.call(self.check_cmd) class NRPE(object): nagios_logdir = '/var/log/nagios' nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' homedir = '/var/lib/nagios' def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() self.config = config() self.primary = primary self.nagios_context = self.config['nagios_context'] if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: self.nagios_servicegroups = self.nagios_context self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname else: nagios_hostname = get_nagios_hostname() if nagios_hostname: self.hostname = nagios_hostname else: self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] relation = relation_ids('nrpe-external-master') if relation: log("Setting charm primary status {}".format(primary)) for rid in relation: relation_set(relation_id=rid, relation_settings={'primary': self.primary}) self.remove_check_queue = set() @classmethod def does_nrpe_conf_dir_exist(cls): return os.path.isdir(cls.nrpe_confdir) def add_check(self, *args, **kwargs): shortname = None if kwargs.get('shortname') is None: if len(args) > 0: shortname = args[0] else: shortname = kwargs['shortname'] self.checks.append(Check(*args, **kwargs)) try: self.remove_check_queue.remove(shortname) except KeyError: pass def remove_check(self, *args, **kwargs): if kwargs.get('shortname') is None: raise ValueError('shortname of check must be specified') if kwargs.get('check_cmd') is None: kwargs['check_cmd'] = 'check_disk' if kwargs.get('description') is None: kwargs['description'] = '' check = Check(*args, **kwargs) check.remove(self.hostname) self.remove_check_queue.add(kwargs['shortname']) def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid nagios_gid = grp.getgrnam('nagios').gr_gid except Exception: log("Nagios user not set up, nrpe checks not updated") return if not os.path.exists(NRPE.nagios_logdir): os.mkdir(NRPE.nagios_logdir) os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} if not self.does_nrpe_conf_dir_exist(): return for nrpecheck in self.checks: nrpecheck.write(self.nagios_context, self.hostname, self.nagios_servicegroups) nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } if nrpecheck.max_check_attempts is not None: nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts if not hook_name() == 'update-status': service('restart', 'nagios-nrpe-server') monitor_ids = relation_ids("local-monitors") + relation_ids("nrpe-external-master") for rid in monitor_ids: reldata = relation_get(unit=local_unit(), rid=rid) if 'monitors' in reldata: old_monitors = yaml.safe_load(reldata['monitors']) old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() if k not in self.remove_check_queue} old_nrpe_monitors.update(nrpe_monitors) old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) else: relation_set(relation_id=rid, monitors=yaml.dump(monitors)) self.remove_check_queue.clear() def get_nagios_hostcontext(relation_name='nrpe-external-master'): for rel in relations_of_type(relation_name): if 'nagios_host_context' in rel: return rel['nagios_host_context'] def get_nagios_hostname(relation_name='nrpe-external-master'): for rel in relations_of_type(relation_name): if 'nagios_hostname' in rel: return rel['nagios_hostname'] def get_nagios_unit_name(relation_name='nrpe-external-master'): host_context = get_nagios_hostcontext(relation_name) if host_context: unit = "%s:%s" % (host_context, local_unit()) else: unit = local_unit() return unit def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): for svc in services: if svc in ['ext-port', 'os-charm-phy-nic-mtu']: next upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc if host.init_is_systemd(service_name=svc): nrpe.add_check( shortname=svc, description='process check {%s}' % unit_name, check_cmd='check_systemd.py %s' % svc ) elif os.path.exists(upstart_init): nrpe.add_check( shortname=svc, description='process check {%s}' % unit_name, check_cmd='check_upstart_job %s' % svc ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) croncmd = ( '/usr/local/lib/nagios/plugins/check_exit_status.pl ' '-e -s /etc/init.d/%s status' % svc ) cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) f = open(cronpath, 'w') f.write(cron_file) f.close() nrpe.add_check( shortname=svc, description='service check {%s}' % unit_name, check_cmd='check_status_file.py -f %s' % checkpath, ) if immediate_check and os.path.isdir(nrpe.homedir): f = open(checkpath, 'w') subprocess.call( croncmd.split(), stdout=f, stderr=subprocess.STDOUT ) f.close() os.chmod(checkpath, 0o644) def copy_nrpe_checks(nrpe_files_dir=None): NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' if nrpe_files_dir is None: for segment in ['.', 'hooks']: nrpe_files_dir = os.path.abspath(os.path.join( os.getenv('CHARM_DIR'), segment, 'charmhelpers', 'contrib', 'openstack', 'files')) if os.path.isdir(nrpe_files_dir): break else: raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): if os.path.isfile(fname): shutil.copy2(fname, os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) def add_haproxy_checks(nrpe, unit_name): nrpe.add_check( shortname='haproxy_servers', description='Check HAProxy {%s}' % unit_name, check_cmd='check_haproxy.sh') nrpe.add_check( shortname='haproxy_queue', description='Check HAProxy queue depth {%s}' % unit_name, check_cmd='check_haproxy_queue_depth.sh')
Apache License 2.0
sunblaze-ucb/rl-generalization
examples/ppo2_baselines/train.py
train
python
def train( env_id, total_episodes, seed, ncpu, policy, lr, nsteps, nminibatches, ): from baselines.common import set_global_seeds from baselines.common.vec_env.vec_normalize import VecNormalize import gym import tensorflow as tf from baselines.common.vec_env.dummy_vec_env import DummyVecEnv config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=ncpu, inter_op_parallelism_threads=ncpu) tf.Session(config=config).__enter__() if ncpu == 1: def make_env(): env = base.make_env(env_id, outdir=logger.get_dir()) env.seed(seed) env = bench.Monitor(env, logger.get_dir()) return env env = DummyVecEnv([make_env]) else: def make_env(rank): def _thunk(): env = base.make_env(env_id, process_idx=rank, outdir=logger.get_dir()) env.seed(seed + rank) if logger.get_dir(): env = bench.Monitor(env, os.path.join(logger.get_dir(), 'train-{}.monitor.json'.format(rank))) return env return _thunk env = SubprocVecEnv([make_env(i) for i in range(ncpu)]) env = VecNormalize(env) ''' # Set inside set_global_seeds: import numpy as np; import random np.random.seed(seed) random.seed(seed) tf.set_random_seed(seed) # Other possible RNGs (don't seem to matter): gym.utils.seeding.np_random(seed) from gym import spaces spaces.prng.seed(seed) ''' set_global_seeds(seed) " Note that currently the architecture is fixed for each type of policy regardless of environment " if policy=='mlp': policy_fn = base.mlp_policy elif policy=='lstm': policy_fn = base.lstm_policy else: raise NotImplementedError if 'Breakout' in env_id or 'SpaceInvaders' in env_id: raise NotImplementedError else: ppo2_episodes.learn( save_interval=10, policy=policy_fn, env=env, nsteps=nsteps, nminibatches=nminibatches, lam=0.95, gamma=0.99, noptepochs=10, log_interval=1, ent_coef=0.0, lr=lr, cliprange=0.2, total_episodes=total_episodes, )
# Set inside set_global_seeds: import numpy as np; import random np.random.seed(seed) random.seed(seed) tf.set_random_seed(seed) # Other possible RNGs (don't seem to matter): gym.utils.seeding.np_random(seed) from gym import spaces spaces.prng.seed(seed)
https://github.com/sunblaze-ucb/rl-generalization/blob/5e05aa108aeadd4cf21f45d56128e9da3bddc14c/examples/ppo2_baselines/train.py#L15-L116
import argparse import os import random from baselines.common.cmd_util import mujoco_arg_parser from baselines import bench, logger from gym.utils.seeding import create_seed from . import base from . import ppo2_episodes import sunblaze_envs
MIT License
wildmeorg/wildbook-ia
wbia/init/sysres.py
get_args_dbdir
python
def get_args_dbdir(defaultdb=None, allow_newdir=False, db=None, dbdir=None): if not ut.QUIET and ut.VERBOSE: logger.info('[sysres] get_args_dbdir: parsing commandline for dbdir') logger.info('[sysres] defaultdb=%r, allow_newdir=%r' % (defaultdb, allow_newdir)) logger.info('[sysres] db=%r, dbdir=%r' % (db, dbdir)) def prioritize(dbdir_, db_): invalid = ['', ' ', '.', 'None'] if dbdir_ in invalid: dbdir_ = None if db_ in invalid: db_ = None if dbdir_ is not None: return realpath(dbdir_) if db_ is not None: return db_to_dbdir(db_, allow_newdir=allow_newdir) return None dbdir = prioritize(dbdir, db) if dbdir is not None: return dbdir dbdir_arg = ut.get_argval('--dbdir', default=None) db_arg = ut.get_argval('--db', default=None) dbdir = prioritize(dbdir_arg, db_arg) if dbdir is not None: return dbdir if defaultdb is None: raise ValueError('Must specify at least db, dbdir, or defaultdb') elif defaultdb == 'cache': return get_default_dbdir() else: return db_to_dbdir(defaultdb, allow_newdir=allow_newdir)
r""" Machinery for finding a database directory using the following priorities. The function first defaults to the specified function arguments. If those are not specified, then command line arguments are used. In all other circumstances the defaultdb is used. If defaultdb='cache' then the most recently used database directory is returned. Args: defaultdb (None): database return if none other is specified allow_newdir (bool): raises error if True and directory not found db (None): specification using workdir priority dbdir (None): specification using normal directory priority cache_priority (bool): (default = False) Returns: str: dbdir CommandLine: python -m wbia.init.sysres get_args_dbdir Example: >>> # ENABLE_DOCTEST >>> from wbia.init.sysres import * # NOQA >>> dir1 = get_args_dbdir(None, False, 'testdb1', None) >>> print('dir1 = %r' % (dir1,)) >>> dir2 = get_args_dbdir(None, False, dir1, None) >>> print('dir2 = %r' % (dir2,)) >>> ut.assert_raises(ValueError, get_args_dbdir) >>> print('dir3 = %r' % (dir2,))
https://github.com/wildmeorg/wildbook-ia/blob/017057cfd3a2a7ea22f575842c9473e121c66ea4/wbia/init/sysres.py#L303-L372
import logging import os from functools import lru_cache from os.path import exists, join, realpath from pathlib import Path import utool as ut import ubelt as ub from wbia import constants as const from wbia.dtool.copy_sqlite_to_postgres import copy_sqlite_to_postgres (print, rrr, profile) = ut.inject2(__name__) logger = logging.getLogger('wbia') WORKDIR_CACHEID = 'work_directory_cache_id' DEFAULTDB_CAHCEID = 'cached_dbdir' LOGDIR_CACHEID = ut.logdir_cacheid __APPNAME__ = 'wbia' ALLOW_GUI = ut.WIN32 or os.environ.get('DISPLAY', None) is not None def get_wbia_resource_dir(): return ub.ensure_app_cache_dir('wbia') def _wbia_cache_dump(): ut.global_cache_dump(appname=__APPNAME__) def _wbia_cache_write(key, val): logger.info('[sysres] set %s=%r' % (key, val)) ut.global_cache_write(key, val, appname=__APPNAME__) def _wbia_cache_read(key, **kwargs): return ut.global_cache_read(key, appname=__APPNAME__, **kwargs) def get_wbia_db_uri(db_dir: str = None): return ut.get_argval('--db-uri', default=None) def set_default_dbdir(dbdir): if ut.DEBUG2: logger.info('[sysres] SETTING DEFAULT DBDIR: %r' % dbdir) _wbia_cache_write(DEFAULTDB_CAHCEID, dbdir) def get_default_dbdir(): dbdir = _wbia_cache_read(DEFAULTDB_CAHCEID, default=None) if ut.DEBUG2: logger.info('[sysres] READING DEFAULT DBDIR: %r' % dbdir) return dbdir def get_workdir(allow_gui=True): work_dir = _wbia_cache_read(WORKDIR_CACHEID, default='.') logger.info('[wbia.sysres.get_workdir] work_dir = {!r}'.format(work_dir)) if work_dir != '.' and exists(work_dir): return work_dir if allow_gui: work_dir = set_workdir() return get_workdir(allow_gui=False) return None def set_workdir(work_dir=None, allow_gui=ALLOW_GUI): if work_dir is None: if allow_gui: try: work_dir = guiselect_workdir() except ImportError: allow_gui = False if not allow_gui: work_dir = ut.truepath(input('specify a workdir: ')) if work_dir is None or not exists(work_dir): raise AssertionError('invalid workdir=%r' % work_dir) _wbia_cache_write(WORKDIR_CACHEID, work_dir) def set_logdir(log_dir): from os.path import realpath, expanduser log_dir = realpath(expanduser(log_dir)) ut.ensuredir(log_dir, verbose=True) ut.stop_logging() _wbia_cache_write(LOGDIR_CACHEID, log_dir) ut.start_logging(appname=__APPNAME__) def get_logdir_global(): return _wbia_cache_read(LOGDIR_CACHEID, default=ut.get_logging_dir(appname='wbia')) def get_rawdir(): workdir = get_workdir() rawdir = ut.truepath(join(workdir, '../raw')) return rawdir def guiselect_workdir(): from wbia import guitool guitool.ensure_qtapp() work_dir = guitool.select_directory('Select a work directory') if not exists(work_dir): try_again = guitool.user_option( paremt=None, msg='Directory %r does not exist.' % work_dir, title='get work dir failed', options=['Try Again'], use_cache=False, ) if try_again == 'Try Again': return guiselect_workdir() return work_dir def get_dbalias_dict(): dbalias_dict = {} if ut.is_developer(): dbalias_dict.update( { 'NAUTS': 'NAUT_Dan', 'WD': 'WD_Siva', 'LF': 'LF_all', 'GZ': 'GZ_ALL', 'MOTHERS': 'PZ_MOTHERS', 'FROGS': 'Frogs', 'TOADS': 'WY_Toads', 'SEALS_SPOTTED': 'Seals', 'OXFORD': 'Oxford_Buildings', 'PARIS': 'Paris_Buildings', 'JAG_KELLY': 'JAG_Kelly', 'JAG_KIERYN': 'JAG_Kieryn', 'WILDEBEAST': 'Wildebeast', 'WDOGS': 'WD_Siva', 'PZ': 'PZ_FlankHack', 'PZ2': 'PZ-Sweatwater', 'PZ_MARIANNE': 'PZ_Marianne', 'PZ_DANEXT_TEST': 'PZ_DanExt_Test', 'PZ_DANEXT_ALL': 'PZ_DanExt_All', 'LF_ALL': 'LF_all', 'WS_HARD': 'WS_hard', 'SONOGRAMS': 'sonograms', } ) dbalias_dict['JAG'] = dbalias_dict['JAG_KELLY'] return dbalias_dict def delete_dbdir(dbname): ut.delete(join(get_workdir(), dbname), ignore_errors=False) def db_to_dbdir(db, allow_newdir=False, extra_workdirs=[]): if ut.VERBOSE: logger.info('[sysres] db_to_dbdir: db=%r, allow_newdir=%r' % (db, allow_newdir)) if db is None: raise ValueError('db is None') work_dir = get_workdir() dbalias_dict = get_dbalias_dict() workdir_list = [] for extra_dir in extra_workdirs: if exists(extra_dir): workdir_list.append(extra_dir) workdir_list.append(work_dir) for _dir in workdir_list: dbdir = realpath(join(_dir, db)) if not exists(dbdir) and db.upper() in dbalias_dict: dbdir = join(_dir, dbalias_dict[db.upper()]) if exists(dbdir): break if allow_newdir: ut.ensuredir(dbdir, verbose=True) if not exists(dbdir): logger.info('!!!') logger.info('[sysres] WARNING: db=%r not found in work_dir=%r' % (db, work_dir)) fname_list = os.listdir(work_dir) lower_list = [fname.lower() for fname in fname_list] index = ut.listfind(lower_list, db.lower()) if index is not None: logger.info('[sysres] WARNING: db capitalization seems to be off') if not ut.STRICT: logger.info('[sysres] attempting to fix it') db = fname_list[index] dbdir = join(work_dir, db) logger.info('[sysres] dbdir=%r' % dbdir) logger.info('[sysres] db=%r' % db) if not exists(dbdir): msg = '[sysres!] ERROR: Database does not exist and allow_newdir=False' logger.info('<!!!>') logger.info(msg) logger.info( '[sysres!] Here is a list of valid dbs: ' + ut.indentjoin(sorted(fname_list), '\n * ') ) logger.info('[sysres!] dbdir=%r' % dbdir) logger.info('[sysres!] db=%r' % db) logger.info('[sysres!] work_dir=%r' % work_dir) logger.info('</!!!>') raise AssertionError(msg) logger.info('!!!') return dbdir
Apache License 2.0
hrnet/hrformer
pose/mmpose/utils/logger.py
get_root_logger
python
def get_root_logger(log_file=None, log_level=logging.INFO): return get_logger(__name__.split('.')[0], log_file, log_level)
Use `get_logger` method in mmcv to get the root logger. The logger will be initialized if it has not been initialized. By default a StreamHandler will be added. If `log_file` is specified, a FileHandler will also be added. The name of the root logger is the top-level package name, e.g., "mmpose". Args: log_file (str | None): The log filename. If specified, a FileHandler will be added to the root logger. log_level (int): The root logger level. Note that only the process of rank 0 is affected, while other processes will set the level to "Error" and be silent most of the time. Returns: logging.Logger: The root logger.
https://github.com/hrnet/hrformer/blob/94aaf06d02ab0bd05575c85803439dc13941de5a/pose/mmpose/utils/logger.py#L6-L24
import logging from mmcv.utils import get_logger
MIT License
apache/allura
Allura/allura/model/artifact.py
Artifact.type_name
python
def type_name(self): return self.type_s.lower()
:return: a presentation name for this type of artifact :rtype: str
https://github.com/apache/allura/blob/04f14f15a9a9364e18c61f68acdaa241a470186b/Allura/allura/model/artifact.py#L347-L352
from __future__ import unicode_literals from __future__ import absolute_import import logging from collections import defaultdict from datetime import datetime import typing import pymongo from tg import tmpl_context as c, app_globals as g from tg import request from ming import schema as S from ming.orm import state, session from ming.orm import FieldProperty, ForeignIdProperty, RelationProperty from ming.orm.declarative import MappedClass from ming.utils import LazyProperty import feedgenerator as FG from allura.lib import helpers as h from allura.lib import security from allura.lib import utils from allura.lib import plugin from allura.lib import exceptions as forge_exc from allura.lib.decorators import memoize from allura.lib.search import SearchIndexable from .session import main_orm_session from .session import project_orm_session from .session import artifact_orm_session from .index import ArtifactReference from .types import ACL, MarkdownCache from .project import AppConfig from .notification import MailFooter from .filesystem import File import six if typing.TYPE_CHECKING: from ming.odm.mapper import Query log = logging.getLogger(__name__) class Artifact(MappedClass, SearchIndexable): class __mongometa__: session = artifact_orm_session name = str('artifact') indexes = [ ('app_config_id', 'labels'), ] def before_save(data): _session = artifact_orm_session._get() skip_mod_date = getattr(_session, 'skip_mod_date', False) skip_last_updated = getattr(_session, 'skip_last_updated', False) if not skip_mod_date: data['mod_date'] = datetime.utcnow() else: log.debug('Not updating mod_date') if c.project and not skip_last_updated: c.project.last_updated = datetime.utcnow() query: 'Query[Artifact]' type_s = 'Generic Artifact' _id = FieldProperty(S.ObjectId) mod_date = FieldProperty(datetime, if_missing=datetime.utcnow) app_config_id = ForeignIdProperty( 'AppConfig', if_missing=lambda: c.app.config._id) plugin_verson = FieldProperty(S.Deprecated) tool_version = FieldProperty(S.Deprecated) acl = FieldProperty(ACL) tags = FieldProperty(S.Deprecated) labels = FieldProperty([str]) references = FieldProperty(S.Deprecated) backreferences = FieldProperty(S.Deprecated) app_config = RelationProperty('AppConfig') import_id = FieldProperty(None, if_missing=None) deleted = FieldProperty(bool, if_missing=False) def __json__(self, posts_limit=None, is_export=False, user=None): return dict( _id=str(self._id), mod_date=self.mod_date, labels=list(self.labels), related_artifacts=[a.url() for a in self.related_artifacts(user=user or c.user)], discussion_thread=self.discussion_thread.__json__(limit=posts_limit, is_export=is_export), discussion_thread_url=h.absurl('/rest%s' % self.discussion_thread.url()), ) def parent_security_context(self): return self.app_config @classmethod def attachment_class(cls): raise NotImplementedError('attachment_class') @LazyProperty def ref(self): return ArtifactReference.from_artifact(self) @LazyProperty def refs(self): return self.ref.references @LazyProperty def backrefs(self): q = ArtifactReference.query.find(dict(references=self.index_id())) return [aref._id for aref in q] def related_artifacts(self, user=None): related_artifacts = [] for ref_id in self.refs + self.backrefs: ref = ArtifactReference.query.get(_id=ref_id) if ref is None: continue artifact = ref.artifact if artifact is None: continue artifact = artifact.primary() if artifact is None: continue if hasattr(artifact, 'app_config') and artifact.app_config is None: continue try: if user and not h.has_access(artifact, 'read', user): continue except Exception: log.debug('Error doing permission check on related artifacts of {}, ' 'probably because the "artifact" is a Commit not a real artifact'.format(self.index_id()), exc_info=True) if artifact.type_s == 'Commit' and not artifact.repo: ac = AppConfig.query.get(_id=ref.artifact_reference['app_config_id']) app = ac.project.app_instance(ac) if ac else None if app: artifact.set_context(app.repo) if artifact not in related_artifacts and (getattr(artifact, 'deleted', False) is False): related_artifacts.append(artifact) return sorted(related_artifacts, key=lambda a: a.url()) def subscribe(self, user=None, topic=None, type='direct', n=1, unit='day'): from allura.model import Mailbox if user is None: user = c.user return Mailbox.subscribe( user_id=user._id, project_id=self.app_config.project_id, app_config_id=self.app_config._id, artifact=self, topic=topic, type=type, n=n, unit=unit) def unsubscribe(self, user=None): from allura.model import Mailbox if user is None: user = c.user Mailbox.unsubscribe( user_id=user._id, project_id=self.app_config.project_id, app_config_id=self.app_config._id, artifact_index_id=self.index_id()) @memoize def subscribed(self, user=None, include_parents=True): from allura.model import Mailbox if user is None: user = c.user user_proj_app_q = dict(user_id=user._id, project_id=self.app_config.project_id, app_config_id=self.app_config._id) art_subscribed = Mailbox.subscribed(artifact=self, **user_proj_app_q) if art_subscribed: return True if include_parents: tool_subscribed = Mailbox.subscribed(**user_proj_app_q) if tool_subscribed: return True return False def primary(self): return self @classmethod def artifacts_labeled_with(cls, label, app_config): return cls.query.find({'labels': label, 'app_config_id': app_config._id}) def email_link(self, subject='artifact'): if subject: return 'mailto:%s?subject=[%s:%s:%s] Re: %s' % ( self.email_address, self.app_config.project.shortname, self.app_config.options.mount_point, self.shorthand_id(), subject) else: return 'mailto:%s' % self.email_address @property def email_domain(self): url = self.app.url[1:-1].split('/') return '.'.join(reversed(url)).replace('_', '-') @property def project(self): return getattr(self.app_config, 'project', None) @property def project_id(self): return self.app_config.project_id @LazyProperty def app(self): if not self.app_config: return None if getattr(c, 'app', None) and c.app.config._id == self.app_config._id: return c.app else: return self.app_config.load()(self.project, self.app_config) def index(self): project = self.project return dict( id=self.index_id(), mod_date_dt=self.mod_date, title='Artifact %s' % self._id, project_id_s=str(project._id), project_name_t=project.name, project_shortname_t=project.shortname, tool_name_s=self.app_config.tool_name, mount_point_s=self.app_config.options.mount_point, is_history_b=False, url_s=self.url(), type_s=self.type_s, labels_t=' '.join(l for l in self.labels), snippet_s='', deleted_b=self.deleted) @property
Apache License 2.0
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_5/models/directory_performance.py
DirectoryPerformance.__eq__
python
def __eq__(self, other): if not isinstance(other, DirectoryPerformance): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_5/models/directory_performance.py#L213-L218
import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flasharray.FA_2_5 import models class DirectoryPerformance(object): swagger_types = { 'id': 'str', 'name': 'str', 'bytes_per_op': 'int', 'bytes_per_read': 'int', 'bytes_per_write': 'int', 'others_per_sec': 'int', 'read_bytes_per_sec': 'int', 'reads_per_sec': 'int', 'time': 'int', 'usec_per_other_op': 'int', 'usec_per_read_op': 'int', 'usec_per_write_op': 'int', 'write_bytes_per_sec': 'int', 'writes_per_sec': 'int' } attribute_map = { 'id': 'id', 'name': 'name', 'bytes_per_op': 'bytes_per_op', 'bytes_per_read': 'bytes_per_read', 'bytes_per_write': 'bytes_per_write', 'others_per_sec': 'others_per_sec', 'read_bytes_per_sec': 'read_bytes_per_sec', 'reads_per_sec': 'reads_per_sec', 'time': 'time', 'usec_per_other_op': 'usec_per_other_op', 'usec_per_read_op': 'usec_per_read_op', 'usec_per_write_op': 'usec_per_write_op', 'write_bytes_per_sec': 'write_bytes_per_sec', 'writes_per_sec': 'writes_per_sec' } required_args = { } def __init__( self, id=None, name=None, bytes_per_op=None, bytes_per_read=None, bytes_per_write=None, others_per_sec=None, read_bytes_per_sec=None, reads_per_sec=None, time=None, usec_per_other_op=None, usec_per_read_op=None, usec_per_write_op=None, write_bytes_per_sec=None, writes_per_sec=None, ): if id is not None: self.id = id if name is not None: self.name = name if bytes_per_op is not None: self.bytes_per_op = bytes_per_op if bytes_per_read is not None: self.bytes_per_read = bytes_per_read if bytes_per_write is not None: self.bytes_per_write = bytes_per_write if others_per_sec is not None: self.others_per_sec = others_per_sec if read_bytes_per_sec is not None: self.read_bytes_per_sec = read_bytes_per_sec if reads_per_sec is not None: self.reads_per_sec = reads_per_sec if time is not None: self.time = time if usec_per_other_op is not None: self.usec_per_other_op = usec_per_other_op if usec_per_read_op is not None: self.usec_per_read_op = usec_per_read_op if usec_per_write_op is not None: self.usec_per_write_op = usec_per_write_op if write_bytes_per_sec is not None: self.write_bytes_per_sec = write_bytes_per_sec if writes_per_sec is not None: self.writes_per_sec = writes_per_sec def __setattr__(self, key, value): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `DirectoryPerformance`".format(key)) if key == "bytes_per_op" and value is not None: if value < 0: raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0`") if key == "bytes_per_read" and value is not None: if value < 0: raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0`") if key == "bytes_per_write" and value is not None: if value < 0: raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0`") if key == "others_per_sec" and value is not None: if value < 0: raise ValueError("Invalid value for `others_per_sec`, must be a value greater than or equal to `0`") if key == "read_bytes_per_sec" and value is not None: if value < 0: raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0`") if key == "reads_per_sec" and value is not None: if value < 0: raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0`") if key == "usec_per_other_op" and value is not None: if value < 0: raise ValueError("Invalid value for `usec_per_other_op`, must be a value greater than or equal to `0`") if key == "usec_per_read_op" and value is not None: if value < 0: raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0`") if key == "usec_per_write_op" and value is not None: if value < 0: raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0`") if key == "write_bytes_per_sec" and value is not None: if value < 0: raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0`") if key == "writes_per_sec" and value is not None: if value < 0: raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0`") self.__dict__[key] = value def __getattribute__(self, item): value = object.__getattribute__(self, item) if isinstance(value, Property): raise AttributeError else: return value def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): if hasattr(self, attr): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DirectoryPerformance, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
BSD 2-Clause Simplified License
sectorlabs/django-localized-fields
localized_fields/widgets.py
LocalizedFieldWidget.decompress
python
def decompress(self, value: LocalizedValue) -> List[str]: result = [] for lang_code, _ in settings.LANGUAGES: if value: result.append(value.get(lang_code)) else: result.append(None) return result
Decompresses the specified value so it can be spread over the internal widgets. Arguments: value: The :see:LocalizedValue to display in this widget. Returns: All values to display in the inner widgets.
https://github.com/sectorlabs/django-localized-fields/blob/f024e4feb50a3aac2cdfd965243f48dcf1628aa5/localized_fields/widgets.py#L32-L52
import copy from typing import List from django import forms from django.conf import settings from django.contrib.admin import widgets from .value import LocalizedValue class LocalizedFieldWidget(forms.MultiWidget): template_name = "localized_fields/multiwidget.html" widget = forms.Textarea def __init__(self, *args, **kwargs): initial_widgets = [copy.copy(self.widget) for _ in settings.LANGUAGES] super().__init__(initial_widgets, *args, **kwargs) for ((lang_code, lang_name), widget) in zip( settings.LANGUAGES, self.widgets ): widget.attrs["lang"] = lang_code widget.lang_code = lang_code widget.lang_name = lang_name
MIT License
martlgap/faceidlight
FaceIDLight/tools.py
FaceDetection.__stage1
python
def __stage1(self, image, scales: list, stage_status: StageStatus): total_boxes = np.empty((0, 9)) status = stage_status for scale in scales: scaled_image = self.__scale_image(image, scale) img_x = np.expand_dims(scaled_image, 0) img_y = np.transpose(img_x, (0, 2, 1, 3)) out = tflite_inference(self.p_net, img_y) out0 = np.transpose(out[0], (0, 2, 1, 3)) out1 = np.transpose(out[1], (0, 2, 1, 3)) boxes, _ = self.__generate_bounding_box( out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, self._steps_threshold[0], ) pick = self.__nms(boxes.copy(), 0.5, "Union") if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) numboxes = total_boxes.shape[0] if numboxes > 0: pick = self.__nms(total_boxes.copy(), 0.7, "Union") total_boxes = total_boxes[pick, :] regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = self.__rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) status = StageStatus( self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height, ) return total_boxes, status
First stage of the MTCNN. :param image: :param scales: :param stage_status: :return:
https://github.com/martlgap/faceidlight/blob/3eed10a4cd65dc54509be71a298996beb61123ca/FaceIDLight/tools.py#L449-L508
import tflite_runtime.interpreter as tflite import cv2 import numpy as np import os import glob from skimage.transform import SimilarityTransform from FaceIDLight.helper import get_file from scipy.spatial import distance from sklearn.metrics.pairwise import cosine_distances BASE_URL = "https://github.com/Martlgap/FaceIDLight/releases/download/v.0.1/" FILE_HASHES = { "o_net": "768385d570300648b7b881acbd418146522b79b4771029bb2e684bdd8c764b9f", "p_net": "530183192e24f7cc86b6706e1eb600482c4ed4306399ac939c472e3957bae15e", "r_net": "5ec33b065eb2802bc4c2575d21feff1a56958d854785bc3e2907d3b7ace861a2", "mobileNet": "6c19b789f661caa8da735566490bfd8895beffb2a1ec97a56b126f0539991aa6", "resNet50": "f4d8b0194957a3ad766135505fc70a91343660151a8103bbb6c3b8ac34dbb4e2", "sample_gallery": "9f43a83c89a8099e1f3aab75ed9531f932f1b392bea538d6afe52509587438d4", } class FaceID: def __init__(self, gal_dir: str = None): self.detector = FaceDetection() self.recognizer = FaceRecognition() self.gal_embs = [] self.gal_names = [] self.gal_faces = [] self.gal_dir = ( gal_dir if gal_dir is not None else get_file(BASE_URL + "sample_gallery.zip", FILE_HASHES["sample_gallery"], is_zip=True) ) self.update_gallery() def update_gallery(self): files = glob.glob("{}/*.jpg".format(self.gal_dir)) + glob.glob("{}/*.png".format(self.gal_dir)) for file in files: img = cv2.imread(file) detections = self.detector.detect_faces(img) if not detections: continue _, points, _ = detections[0] self.gal_names.append(os.path.basename(file).split(".")[0]) face = self.detector.get_face(img, points) self.gal_faces.append( cv2.cvtColor(face.astype(np.float32) / 255, cv2.COLOR_BGR2RGB) ) self.gal_embs = self.recognizer.get_emb(np.asarray(self.gal_faces))[0] def recognize_faces(self, img): detections = self.detector.detect_faces(img) if not detections: return [] faces = [] for detection in detections: bbox, points, conf = detection face = self.detector.get_face(img, points) faces.append(cv2.cvtColor(face.astype(np.float32) / 255, cv2.COLOR_BGR2RGB)) embs = self.recognizer.get_emb(np.asarray(faces))[0] ids = [] for i in range(embs.shape[0]): pred, dist, conf = self.recognizer.identify(np.expand_dims(embs[i], axis=0), self.gal_embs, thresh=0.6) ids.append( [ self.gal_names[pred] if pred is not None else "Other", cv2.cvtColor(self.gal_faces[pred] * 255, cv2.COLOR_RGB2BGR) if pred is not None else None, dist, conf, ] ) faces_ = [] for face in faces: faces_.append(cv2.cvtColor(face * 255, cv2.COLOR_RGB2BGR)) out = [i for i in zip(faces_, detections, ids)] return out def tflite_inference(model, img): input_details = model.get_input_details() output_details = model.get_output_details() model.resize_tensor_input(input_details[0]["index"], img.shape) model.allocate_tensors() model.set_tensor(input_details[0]["index"], img.astype(np.float32)) model.invoke() return [model.get_tensor(elem["index"]) for elem in output_details] class FaceRecognition: def __init__( self, model_path: str = None, model_type: str = "mobileNet", ): if model_path is None: model_path = get_file(BASE_URL + model_type + ".tflite", FILE_HASHES[model_type]) self.face_recognizer = tflite.Interpreter(model_path=model_path) def get_emb(self, img): return tflite_inference(self.face_recognizer, img) @staticmethod def verify(emb1, emb2, thresh): dist = distance.cosine(emb1, emb2) prediction = thresh > np.squeeze(dist, axis=-1) confidence = ( ((thresh - dist) / thresh) / 2 + 0.5 if prediction else ((dist - thresh) / (1.4 - thresh)) / 2 + 0.5 ) return prediction, np.squeeze(dist, axis=-1), confidence @staticmethod def identify(emb_src, embs_gal, thresh=None): dists = cosine_distances(emb_src, embs_gal)[0] pred = np.argmin(dists) if thresh and dists[pred] > thresh: idx = np.argsort(dists) conf = (dists[idx[0]] - thresh) / (1.4 - thresh) dist = dists[idx[0]] pred = None else: idx = np.argsort(dists) conf = (dists[idx[1]] - dists[pred]) / 1.4 dist = dists[pred] return pred, dist, conf class StageStatus: def __init__(self, pad_result: tuple = None, width=0, height=0): self.width = width self.height = height self.dy = self.edy = self.dx = self.edx = self.y = self.ey = self.x = self.ex = self.tmp_w = self.tmp_h = [] if pad_result is not None: self.update(pad_result) def update(self, pad_result: tuple): s = self s.dy, s.edy, s.dx, s.edx, s.y, s.ey, s.x, s.ex, s.tmp_w, s.tmp_h = pad_result class FaceDetection: def __init__( self, min_face_size: int = 40, steps_threshold: list = None, scale_factor: float = 0.7, ): if steps_threshold is None: steps_threshold = [0.6, 0.7, 0.7] self._min_face_size = min_face_size self._steps_threshold = steps_threshold self._scale_factor = scale_factor self.p_net = tflite.Interpreter(model_path=get_file(BASE_URL + "p_net.tflite", FILE_HASHES["p_net"])) self.r_net = tflite.Interpreter(model_path=get_file(BASE_URL + "r_net.tflite", FILE_HASHES["r_net"])) self.o_net = tflite.Interpreter(model_path=get_file(BASE_URL + "o_net.tflite", FILE_HASHES["o_net"])) def detect_faces(self, img): height, width, _ = img.shape stage_status = StageStatus(width=width, height=height) m = 12 / self._min_face_size min_layer = np.amin([height, width]) * m scales = self.__compute_scale_pyramid(m, min_layer) total_boxes, stage_status = self.__stage1(img, scales, stage_status) total_boxes, stage_status = self.__stage2(img, total_boxes, stage_status) bboxes, points = self.__stage3(img, total_boxes, stage_status) detections = [] for i in range(bboxes.shape[0]): bboxes_c = np.reshape(bboxes[i, :-1], [2, 2]).astype(np.float32) points_c = np.reshape(points[i], [2, 5]).transpose().astype(np.float32) conf = bboxes[i, -1].astype(np.float32) detections.append([bboxes_c, points_c, conf]) return detections def __compute_scale_pyramid(self, m, min_layer): scales = [] factor_count = 0 while min_layer >= 12: scales += [m * np.power(self._scale_factor, factor_count)] min_layer = min_layer * self._scale_factor factor_count += 1 return scales @staticmethod def __scale_image(image, scale: float): height, width, _ = image.shape width_scaled = int(np.ceil(width * scale)) height_scaled = int(np.ceil(height * scale)) im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA) im_data_normalized = (im_data - 127.5) * 0.0078125 return im_data_normalized @staticmethod def __generate_bounding_box(imap, reg, scale, t): stride = 2 cellsize = 12 imap = np.transpose(imap) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y, x)] reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) if reg.size == 0: reg = np.empty(shape=(0, 3)) bb = np.transpose(np.vstack([y, x])) q1 = np.fix((stride * bb + 1) / scale) q2 = np.fix((stride * bb + cellsize) / scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) return boundingbox, reg @staticmethod def __nms(boxes, threshold, method): if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] s = boxes[:, 4] area = (x2 - x1 + 1) * (y2 - y1 + 1) sorted_s = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while sorted_s.size > 0: i = sorted_s[-1] pick[counter] = i counter += 1 idx = sorted_s[0:-1] xx1 = np.maximum(x1[i], x1[idx]) yy1 = np.maximum(y1[i], y1[idx]) xx2 = np.minimum(x2[i], x2[idx]) yy2 = np.minimum(y2[i], y2[idx]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h if method == "Min": o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) sorted_s = sorted_s[np.where(o <= threshold)] pick = pick[0:counter] return pick @staticmethod def __pad(total_boxes, w, h): tmp_w = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) tmp_h = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) numbox = total_boxes.shape[0] dx = np.ones(numbox, dtype=np.int32) dy = np.ones(numbox, dtype=np.int32) edx = tmp_w.copy().astype(np.int32) edy = tmp_h.copy().astype(np.int32) x = total_boxes[:, 0].copy().astype(np.int32) y = total_boxes[:, 1].copy().astype(np.int32) ex = total_boxes[:, 2].copy().astype(np.int32) ey = total_boxes[:, 3].copy().astype(np.int32) tmp = np.where(ex > w) edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmp_w[tmp], 1) ex[tmp] = w tmp = np.where(ey > h) edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmp_h[tmp], 1) ey[tmp] = h tmp = np.where(x < 1) dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) x[tmp] = 1 tmp = np.where(y < 1) dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) y[tmp] = 1 return dy, edy, dx, edx, y, ey, x, ex, tmp_w, tmp_h @staticmethod def __rerec(bbox): height = bbox[:, 3] - bbox[:, 1] width = bbox[:, 2] - bbox[:, 0] max_side_length = np.maximum(width, height) bbox[:, 0] = bbox[:, 0] + width * 0.5 - max_side_length * 0.5 bbox[:, 1] = bbox[:, 1] + height * 0.5 - max_side_length * 0.5 bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(max_side_length, (2, 1))) return bbox @staticmethod def __bbreg(boundingbox, reg): if reg.shape[1] == 1: reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) return boundingbox
MIT License
tensortrade-org/tensortrade
tensortrade/feed/core/base.py
Stream.placeholder
python
def placeholder(dtype: str = None) -> "Stream[T]": return Placeholder(dtype=dtype)
Creates a placholder stream for data to provided to at a later date. Parameters ---------- dtype : str The data type that will be provided. Returns ------- `Stream[T]` A stream representing a placeholder.
https://github.com/tensortrade-org/tensortrade/blob/65151a72090ab372ed7a33edc673e45b53d2f763/tensortrade/feed/core/base.py#L339-L352
import inspect from abc import abstractmethod from typing import ( Generic, Iterable, TypeVar, Dict, Any, Callable, List, Tuple ) from tensortrade.core import Observable from tensortrade.feed.core.accessors import CachedAccessor from tensortrade.feed.core.mixins import DataTypeMixin T = TypeVar("T") class Named: generic_name: str = "generic" namespaces: List[str] = [] names: Dict[str, int] = {} def __init__(self, name: str = None): if not name: name = self.generic_name if name in Stream.names.keys(): Stream.names[name] += 1 name += ":/" + str(Stream.names[name] - 1) else: Stream.names[name] = 0 self.name = name def rename(self, name: str, sep: str = ":/") -> "Named": if len(Named.namespaces) > 0: name = Named.namespaces[-1] + sep + name self.name = name return self class NameSpace(Named): def __init__(self, name: str) -> None: super().__init__(name) def __enter__(self) -> None: Named.namespaces += [self.name] def __exit__(self, exc_type, exc_val, exc_tb) -> None: Named.namespaces.pop() class Stream(Generic[T], Named, Observable): _mixins: "Dict[str, DataTypeMixin]" = {} _accessors: "List[CachedAccessor]" = [] generic_name: str = "stream" def __new__(cls, *args, **kwargs): dtype = kwargs.get("dtype") instance = super().__new__(cls) if dtype in Stream._mixins.keys(): mixin = Stream._mixins[dtype] instance = Stream.extend_instance(instance, mixin) return instance def __init__(self, name: str = None, dtype: str = None): Named.__init__(self, name) Observable.__init__(self) self.dtype = dtype self.inputs = [] self.value = None def __call__(self, *inputs) -> "Stream[T]": self.inputs = inputs return self def run(self) -> None: self.value = self.forward() for listener in self.listeners: listener.on_next(self.value) @abstractmethod def forward(self) -> T: raise NotImplementedError() @abstractmethod def has_next(self) -> bool: raise NotImplementedError() def astype(self, dtype: str) -> "Stream[T]": self.dtype = dtype mixin = Stream._mixins[dtype] return Stream.extend_instance(self, mixin) def reset(self) -> None: for listener in self.listeners: if hasattr(listener, "reset"): listener.reset() for stream in self.inputs: stream.reset() self.value = None def gather(self) -> "List[Tuple[Stream, Stream]]": return self._gather(self, [], []) @staticmethod def source(iterable: "Iterable[T]", dtype: str = None) -> "Stream[T]": return IterableStream(iterable, dtype=dtype) @staticmethod def group(streams: "List[Stream[T]]") -> "Stream[dict]": return Group()(*streams) @staticmethod def sensor(obj: "Any", func: "Callable[[Any], T]", dtype: str = None) -> "Stream[T]": return Sensor(obj, func, dtype=dtype) @staticmethod def select(streams: "List[Stream[T]]", func: "Callable[[Stream[T]], bool]") -> "Stream[T]": for s in streams: if func(s): return s raise Exception("No stream satisfies selector condition.") @staticmethod def constant(value: "T", dtype: str = None) -> "Stream[T]": return Constant(value, dtype=dtype) @staticmethod
Apache License 2.0
wchill/ffmpy3
ffmpy3.py
FFmpeg.wait
python
async def wait(self): if not self.process: return exitcode = await self.process.wait() if exitcode != 0: raise FFRuntimeError(self.cmd, exitcode) return exitcode
Asynchronously wait for the process to complete execution. Raises ------- FFRuntimeError The process exited with an error. Returns -------- int or None 0 if the process finished successfully, or None if it has not been started
https://github.com/wchill/ffmpy3/blob/b22049746ddc5f18c934df9cf1f5f6943a95d03f/ffmpy3.py#L196-L214
import errno import shlex import asyncio import subprocess __version__ = '0.2.4' __license__ = 'MIT' class FFmpeg(object): def __init__(self, executable='ffmpeg', global_options=None, inputs=None, outputs=None): self.executable = executable self._cmd = [executable] global_options = global_options or [] if _is_sequence(global_options): normalized_global_options = [] for opt in global_options: normalized_global_options += shlex.split(opt) else: normalized_global_options = shlex.split(global_options) self._cmd += normalized_global_options self._cmd += _merge_args_opts(inputs, add_input_option=True) self._cmd += _merge_args_opts(outputs) self.cmd = subprocess.list2cmdline(self._cmd) self.process = None def __repr__(self): return '<{0!r} {1!r}>'.format(self.__class__.__name__, self.cmd) def run(self, input_data=None, stdout=None, stderr=None): try: self.process = subprocess.Popen( self._cmd, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr ) except OSError as e: if e.errno == errno.ENOENT: raise FFExecutableNotFoundError("Executable '{0}' not found".format(self.executable)) else: raise out = self.process.communicate(input=input_data) if self.process.returncode != 0: raise FFRuntimeError(self.cmd, self.process.returncode, out[0], out[1]) return out async def run_async(self, input_data=None, stdout=None, stderr=None): try: if input_data: stdin = asyncio.subprocess.PIPE else: stdin = None self.process = await asyncio.create_subprocess_exec( *self._cmd, stdin=stdin, stdout=stdout, stderr=stderr ) except OSError as e: if e.errno == errno.ENOENT: raise FFExecutableNotFoundError("Executable '{0}' not found".format(self.executable)) else: raise if input_data: self.process.stdin.write(input_data) return self.process
MIT License
google-research/disentanglement_lib
disentanglement_lib/evaluation/metrics/modularity_explicitness.py
compute_modularity_explicitness
python
def compute_modularity_explicitness(ground_truth_data, representation_function, random_state, artifact_dir=None, num_train=gin.REQUIRED, num_test=gin.REQUIRED, batch_size=16): del artifact_dir scores = {} mus_train, ys_train = utils.generate_batch_factor_code( ground_truth_data, representation_function, num_train, random_state, batch_size) mus_test, ys_test = utils.generate_batch_factor_code( ground_truth_data, representation_function, num_test, random_state, batch_size) discretized_mus = utils.make_discretizer(mus_train) mutual_information = utils.discrete_mutual_info(discretized_mus, ys_train) assert mutual_information.shape[0] == mus_train.shape[0] assert mutual_information.shape[1] == ys_train.shape[0] scores["modularity_score"] = modularity(mutual_information) explicitness_score_train = np.zeros([ys_train.shape[0], 1]) explicitness_score_test = np.zeros([ys_test.shape[0], 1]) mus_train_norm, mean_mus, stddev_mus = utils.normalize_data(mus_train) mus_test_norm, _, _ = utils.normalize_data(mus_test, mean_mus, stddev_mus) for i in range(ys_train.shape[0]): explicitness_score_train[i], explicitness_score_test[i] = explicitness_per_factor(mus_train_norm, ys_train[i, :], mus_test_norm, ys_test[i, :]) scores["explicitness_score_train"] = np.mean(explicitness_score_train) scores["explicitness_score_test"] = np.mean(explicitness_score_test) return scores
Computes the modularity metric according to Sec 3. Args: ground_truth_data: GroundTruthData to be sampled from. representation_function: Function that takes observations as input and outputs a dim_representation sized representation for each observation. random_state: Numpy random state used for randomness. artifact_dir: Optional path to directory where artifacts can be saved. num_train: Number of points used for training. num_test: Number of points used for testing. batch_size: Batch size for sampling. Returns: Dictionary with average modularity score and average explicitness (train and test).
https://github.com/google-research/disentanglement_lib/blob/86a644d4ed35c771560dc3360756363d35477357/disentanglement_lib/evaluation/metrics/modularity_explicitness.py#L37-L84
from __future__ import absolute_import from __future__ import division from __future__ import print_function from disentanglement_lib.evaluation.metrics import utils import numpy as np from six.moves import range from sklearn import linear_model from sklearn import metrics from sklearn import preprocessing import gin.tf @gin.configurable( "modularity_explicitness", blacklist=["ground_truth_data", "representation_function", "random_state", "artifact_dir"])
Apache License 2.0
angr/angr
angr/knowledge_plugins/key_definitions/uses.py
Uses.get_uses
python
def get_uses(self, definition: 'Definition'): return self._uses_by_definition.get(definition, set())
Retrieve the uses of a given definition. :param definition: The definition for which we get the uses.
https://github.com/angr/angr/blob/94de0f468df0c0d27428301dae93d94f935ade9b/angr/knowledge_plugins/key_definitions/uses.py#L28-L34
from typing import Dict, Set, TYPE_CHECKING from collections import defaultdict from ...code_location import CodeLocation if TYPE_CHECKING: from .definition import Definition class Uses: __slots__ = ('_uses_by_definition', '_uses_by_location' ) def __init__(self): self._uses_by_definition: Dict['Definition',Set[CodeLocation]] = defaultdict(set) self._uses_by_location: Dict[CodeLocation, Set['Definition']] = defaultdict(set) def add_use(self, definition, codeloc: CodeLocation): self._uses_by_definition[definition].add(codeloc) self._uses_by_location[codeloc].add(definition)
BSD 2-Clause Simplified License
huntfx/vfxwindow
vfxwindow/blender.py
BlenderWindow.addCallbackGameBefore
python
def addCallbackGameBefore(self, func, persistent=True, group=None): self._addApplicationHandler('game_pre', func, persistent=persistent, group=group)
On starting the game engine.
https://github.com/huntfx/vfxwindow/blob/eb54cb6f50f8b7b53b26de0f7155e0dbd62517a5/vfxwindow/blender.py#L147-L149
from __future__ import absolute_import import sys from collections import defaultdict from Qt import QtWidgets import bpy from .utils import setCoordinatesToScreen, hybridmethod from .standalone import StandaloneWindow VERSION = bpy.app.version_string class BlenderWindow(StandaloneWindow): def __init__(self, parent=None, **kwargs): super(BlenderWindow, self).__init__(parent, **kwargs) self.blender = True self.standalone = False def saveWindowPosition(self): if 'blender' not in self.windowSettings: self.windowSettings['blender'] = {} settings = self.windowSettings['blender'] key = self._getSettingsKey() if key not in settings: settings[key] = {} settings[key]['width'] = self.width() settings[key]['height'] = self.height() settings[key]['x'] = self.x() settings[key]['y'] = self.y() super(BlenderWindow, self).saveWindowPosition() def loadWindowPosition(self): key = self._getSettingsKey() try: x = self.windowSettings['blender'][key]['x'] y = self.windowSettings['blender'][key]['y'] width = self.windowSettings['blender'][key]['width'] height = self.windowSettings['blender'][key]['height'] except KeyError: super(BlenderWindow, self).loadWindowPosition() else: x, y = setCoordinatesToScreen(x, y, width, height, padding=5) self.resize(width, height) self.move(x, y) @hybridmethod def show(cls, self, *args, **kwargs): if self is not cls: return super(BlenderWindow, self).show() try: cls.clearWindowInstance(cls.ID) except AttributeError: pass kwargs['instance'] = True kwargs['exec_'] = False return super(BlenderWindow, cls).show(*args, **kwargs) @hybridmethod def removeCallbacks(cls, self, group=None, windowInstance=None, windowID=None): if self is cls: if windowInstance is None and windowID is not None: windowInstance = cls.windowInstance(windowID) if windowInstance is None: raise ValueError('windowInstance or windowID parameter is required for classmethod') elif windowInstance is None: windowInstance = self.windowInstance() if group is None: groups = windowInstance['callback'].keys() else: if group not in windowInstance['callback']: return 0 groups = [group] numEvents = 0 for group in groups: for callback_attr, callbacks in windowInstance['callback'][group].items(): callback_list = getattr(bpy.app.handlers, callback_attr) for func in callbacks: callback_list.remove(func) numEvents += 1 del windowInstance['callback'][group] return numEvents def _addBlenderCallbackGroup(self, group): windowInstance = self.windowInstance() if group in windowInstance['callback']: return windowInstance['callback'][group] = defaultdict(list) def _addApplicationHandler(self, handler, func, persistent=True, group=None): self._addBlenderCallbackGroup(group) isPersistent = hasattr(func, '_bpy_persistent') if persistent and not isPersistent: func = bpy.app.handlers.persistent(func) elif not persistent and isPersistent: del func._bpy_persistent getattr(bpy.app.handlers, handler).append(func) self.windowInstance()['callback'][group][handler].append(func) def addCallbackFrameChangeAfter(self, func, persistent=True, group=None): self._addApplicationHandler('frame_change_post', func, persistent=persistent, group=group) def addCallbackFrameChangeBefore(self, func, persistent=True, group=None): self._addApplicationHandler('frame_change_pre', func, persistent=persistent, group=group) def addCallbackGameAfter(self, func, persistent=True, group=None): self._addApplicationHandler('game_post', func, persistent=persistent, group=group)
MIT License
spotify/luigi
luigi/parameter.py
_DateParameterBase.parse
python
def parse(self, s): return datetime.datetime.strptime(s, self.date_format).date()
Parses a date string formatted like ``YYYY-MM-DD``.
https://github.com/spotify/luigi/blob/ad5ddc9875e54cca8209863a8ec7bcc5d13ece8a/luigi/parameter.py#L367-L371
import abc import datetime import warnings from enum import IntEnum import json from json import JSONEncoder import operator from ast import literal_eval from configparser import NoOptionError, NoSectionError from luigi import date_interval from luigi import task_register from luigi import configuration from luigi.cmdline_parser import CmdlineParser from .freezing import recursively_freeze, FrozenOrderedDict _no_value = object() class ParameterVisibility(IntEnum): PUBLIC = 0 HIDDEN = 1 PRIVATE = 2 @classmethod def has_value(cls, value): return any(value == item.value for item in cls) def serialize(self): return self.value class ParameterException(Exception): pass class MissingParameterException(ParameterException): pass class UnknownParameterException(ParameterException): pass class DuplicateParameterException(ParameterException): pass class Parameter: _counter = 0 def __init__(self, default=_no_value, is_global=False, significant=True, description=None, config_path=None, positional=True, always_in_help=False, batch_method=None, visibility=ParameterVisibility.PUBLIC): self._default = default self._batch_method = batch_method if is_global: warnings.warn("is_global support is removed. Assuming positional=False", DeprecationWarning, stacklevel=2) positional = False self.significant = significant self.positional = positional self.visibility = visibility if ParameterVisibility.has_value(visibility) else ParameterVisibility.PUBLIC self.description = description self.always_in_help = always_in_help if config_path is not None and ('section' not in config_path or 'name' not in config_path): raise ParameterException('config_path must be a hash containing entries for section and name') self._config_path = config_path self._counter = Parameter._counter Parameter._counter += 1 def _get_value_from_config(self, section, name): conf = configuration.get_config() try: value = conf.get(section, name) except (NoSectionError, NoOptionError, KeyError): return _no_value return self.parse(value) def _get_value(self, task_name, param_name): for value, warn in self._value_iterator(task_name, param_name): if value != _no_value: if warn: warnings.warn(warn, DeprecationWarning) return value return _no_value def _value_iterator(self, task_name, param_name): cp_parser = CmdlineParser.get_instance() if cp_parser: dest = self._parser_global_dest(param_name, task_name) found = getattr(cp_parser.known_args, dest, None) yield (self._parse_or_no_value(found), None) yield (self._get_value_from_config(task_name, param_name), None) if self._config_path: yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']), 'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format( self._config_path['section'], self._config_path['name'], task_name, param_name)) yield (self._default, None) def has_task_value(self, task_name, param_name): return self._get_value(task_name, param_name) != _no_value def task_value(self, task_name, param_name): value = self._get_value(task_name, param_name) if value == _no_value: raise MissingParameterException("No default specified") else: return self.normalize(value) def _is_batchable(self): return self._batch_method is not None def parse(self, x): return x def _parse_list(self, xs): if not self._is_batchable(): raise NotImplementedError('No batch method found') elif not xs: raise ValueError('Empty parameter list passed to parse_list') else: return self._batch_method(map(self.parse, xs)) def serialize(self, x): return str(x) def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != Parameter: return if not isinstance(param_value, str): warnings.warn('Parameter "{}" with value "{}" is not of type string.'.format(param_name, param_value)) def normalize(self, x): return x def next_in_enumeration(self, _value): return None def _parse_or_no_value(self, x): if not x: return _no_value else: return self.parse(x) @staticmethod def _parser_global_dest(param_name, task_name): return task_name + '_' + param_name @classmethod def _parser_kwargs(cls, param_name, task_name=None): return { "action": "store", "dest": cls._parser_global_dest(param_name, task_name) if task_name else param_name, } class OptionalParameter(Parameter): def serialize(self, x): if x is None: return '' else: return str(x) def parse(self, x): return x or None def _warn_on_wrong_param_type(self, param_name, param_value): if self.__class__ != OptionalParameter: return if not isinstance(param_value, str) and param_value is not None: warnings.warn('OptionalParameter "{}" with value "{}" is not of type string or None.'.format( param_name, param_value)) _UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0) class _DateParameterBase(Parameter): def __init__(self, interval=1, start=None, **kwargs): super(_DateParameterBase, self).__init__(**kwargs) self.interval = interval self.start = start if start is not None else _UNIX_EPOCH.date() @property @abc.abstractmethod def date_format(self): pass
Apache License 2.0
numba/numba
numba/cuda/cuda_paths.py
get_cuda_home
python
def get_cuda_home(*subdirs): cuda_home = os.environ.get('CUDA_HOME') if cuda_home is None: cuda_home = os.environ.get('CUDA_PATH') if cuda_home is not None: return os.path.join(cuda_home, *subdirs)
Get paths of CUDA_HOME. If *subdirs* are the subdirectory name to be appended in the resulting path.
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/cuda/cuda_paths.py#L121-L131
import sys import re import os from collections import defaultdict, namedtuple from numba.core.config import IS_WIN32, IS_OSX from numba.misc.findlib import find_lib, find_file _env_path_tuple = namedtuple('_env_path_tuple', ['by', 'info']) def _find_valid_path(options): for by, data in options: if data is not None: return by, data else: return '<unknown>', None def _get_libdevice_path_decision(): options = [ ('Conda environment', get_conda_ctk()), ('CUDA_HOME', get_cuda_home('nvvm', 'libdevice')), ('System', get_system_ctk('nvvm', 'libdevice')), ('Debian package', get_debian_pkg_libdevice()), ] by, libdir = _find_valid_path(options) return by, libdir def _nvvm_lib_dir(): if IS_WIN32: return 'nvvm', 'bin' elif IS_OSX: return 'nvvm', 'lib' else: return 'nvvm', 'lib64' def _get_nvvm_path_decision(): options = [ ('Conda environment', get_conda_ctk()), ('CUDA_HOME', get_cuda_home(*_nvvm_lib_dir())), ('System', get_system_ctk(*_nvvm_lib_dir())), ] by, path = _find_valid_path(options) return by, path def _get_libdevice_paths(): by, libdir = _get_libdevice_path_decision() pat = r'libdevice(\.(?P<arch>compute_\d+))?(\.\d+)*\.bc$' candidates = find_file(re.compile(pat), libdir) out = defaultdict(list) for path in candidates: m = re.search(pat, path) arch = m.group('arch') out[arch].append(path) out = {k: max(v) for k, v in out.items()} return _env_path_tuple(by, out) def _cudalib_path(): if IS_WIN32: return 'bin' elif IS_OSX: return 'lib' else: return 'lib64' def _get_cudalib_dir_path_decision(): options = [ ('Conda environment', get_conda_ctk()), ('CUDA_HOME', get_cuda_home(_cudalib_path())), ('System', get_system_ctk(_cudalib_path())), ] by, libdir = _find_valid_path(options) return by, libdir def _get_cudalib_dir(): by, libdir = _get_cudalib_dir_path_decision() return _env_path_tuple(by, libdir) def get_system_ctk(*subdirs): if sys.platform.startswith('linux'): base = '/usr/local/cuda' if os.path.exists(base): return os.path.join(base, *subdirs) def get_conda_ctk(): is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta')) if not is_conda_env: return paths = find_lib('nvvm') if not paths: return return os.path.dirname(max(paths))
BSD 2-Clause Simplified License
strinking/futaba
futaba/cogs/settings/core.py
Settings.tracker_blacklist_remove
python
async def tracker_blacklist_remove( self, ctx, *, user_or_channel: Union[MemberConv, TextChannelConv] ): logger.info( "Removing %s '%s' (%d) from the tracking blacklist for guild '%s' (%d)", "user" if isinstance(user_or_channel, discord.abc.User) else "channel", user_or_channel.name, user_or_channel.id, ctx.guild.name, ctx.guild.id, ) with self.bot.sql.transaction(): self.bot.sql.settings.remove_from_tracking_blacklist( ctx.guild, user_or_channel ) embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.description = ( f"Removed {user_or_channel.mention} from the tracking blacklist" ) await ctx.send(embed=embed)
Remove a user or channel from the tracking blacklist.
https://github.com/strinking/futaba/blob/6b1ae13b529843b04cfbf7641dc43f8213d78448/futaba/cogs/settings/core.py#L704-L728
import logging import re from itertools import chain from typing import Union import discord from discord.ext import commands from futaba import permissions from futaba.converters import MemberConv, RoleConv, TextChannelConv from futaba.emojis import ICONS from futaba.exceptions import CommandFailed, ManualCheckFailure, SendHelp from futaba.permissions import admin_perm, mod_perm from futaba.str_builder import StringBuilder from futaba.utils import plural from ..abc import AbstractCog logger = logging.getLogger(__name__) __all__ = ["Settings"] class Settings(AbstractCog): __slots__ = ("journal",) def __init__(self, bot): super().__init__(bot) self.journal = bot.get_broadcaster("/settings") def setup(self): for guild in self.bot.guilds: self.bot.sql.settings.get_special_roles(guild) self.bot.sql.settings.get_reapply_roles(guild) @commands.command(name="prefix") async def prefix(self, ctx, *, prefix: str = None): if prefix is None: bot_prefix = self.bot.prefix(ctx.guild) embed = discord.Embed(colour=discord.Colour.dark_teal()) if ctx.guild is None: embed.description = "No command prefix, all messages are commands" else: embed.description = f"Prefix for {ctx.guild.name} is `{bot_prefix}`" elif ctx.guild is None and prefix is not None: embed = discord.Embed(colour=discord.Colour.red()) embed.description = "Cannot set a command prefix outside of a server!" raise CommandFailed(embed=embed) elif not mod_perm(ctx): embed = discord.Embed(colour=discord.Colour.red()) embed.description = "You do not have permission to set the prefix" raise ManualCheckFailure(embed=embed) elif prefix == "_": with self.bot.sql.transaction(): self.bot.sql.settings.set_prefix(ctx.guild, None) bot_prefix = self.bot.prefix(ctx.guild) embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.description = ( f"Unset prefix for {ctx.guild.name}. (Default prefix: `{bot_prefix}`)" ) self.journal.send( "prefix", ctx.guild, "Unset bot command prefix", icon="settings", prefix=None, default_prefix=self.bot.config.default_prefix, ) else: bot_prefix = re.sub(r"_$", " ", prefix) with self.bot.sql.transaction(): self.bot.sql.settings.set_prefix(ctx.guild, bot_prefix) embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.description = f"Set prefix for {ctx.guild.name} to `{bot_prefix}`" self.journal.send( "prefix", ctx.guild, "Unset bot command prefix", icon="settings", prefix=bot_prefix, default_prefix=self.bot.config.default_prefix, ) await ctx.send(embed=embed) @commands.command(name="maxdelete", aliases=["maxdeletemsg"]) @commands.guild_only() async def max_delete(self, ctx, count: int = None): if count is None: max_delete_messages = self.bot.sql.settings.get_max_delete_messages( ctx.guild ) embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.description = f"Maximum number of messages that can be deleted in bulk is `{max_delete_messages}`" elif not admin_perm(ctx): embed = discord.Embed(colour=discord.Colour.red()) embed.description = ( "You do not have permission to set the maximum deletable messages" ) raise ManualCheckFailure(embed=embed) elif count <= 0: embed = discord.Embed(colour=discord.Colour.red()) embed.description = "This value must be a positive, non-zero integer" raise CommandFailed(embed=embed) elif count >= 2 ** 32 - 1: embed = discord.Embed(colour=discord.Colour.red()) embed.description = ( "This value is way too high. Try a more reasonable value." ) raise CommandFailed(embed=embed) else: with self.bot.sql.transaction(): self.bot.sql.settings.set_max_delete_messages(ctx.guild, count) embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.description = f"Set maximum deletable messages to `{count}`" await ctx.send(embed=embed) @commands.command(name="warnmanual") @commands.guild_only() async def warn_manual_mod_action(self, ctx, value: bool = None): if value is None: warn_manual_mod_action = self.bot.sql.settings.get_warn_manual_mod_action( ctx.guild ) embed = discord.Embed(colour=discord.Colour.dark_teal()) state = "enabled" if warn_manual_mod_action else "disabled" embed.description = f"Warning moderators about performing mod actions manually is **{state}**" elif not admin_perm(ctx): embed = discord.Embed(colour=discord.Colour.red()) embed.description = "You do not have permission to enable or disable manual mod action warning" raise ManualCheckFailure(embed=embed) else: with self.bot.sql.transaction(): self.bot.sql.settings.set_warn_manual_mod_action(ctx.guild, value) embed = discord.Embed(colour=discord.Colour.teal()) embed.description = f"Set warning moderators about performing mod actions manually to `{value}`" await ctx.send(embed=embed) @commands.command(name="removeother", aliases=["rmother"]) @commands.guild_only() async def remove_other_roles_on_punish(self, ctx, value: bool = None): if value is None: remove_other_roles = self.bot.sql.settings.get_remove_other_roles(ctx.guild) embed = discord.Embed(colour=discord.Colour.dark_teal()) state = "are removed" if remove_other_roles else "are kept" embed.description = ( f"When punishment roles are added other roles **{state}**" ) elif not admin_perm(ctx): embed = discord.Embed(colour=discord.Colour.red()) embed.description = "You do not have permissions to change the removal of non-punishment roles" raise ManualCheckFailure(embed=embed) else: with self.bot.sql.transaction(): self.bot.sql.settings.set_remove_other_roles(ctx.guild, value) embed = discord.Embed(colour=discord.Colour.teal()) embed.description = ( f"Set removal of other non-punishment roles to `{value}`" ) await ctx.send(embed=embed) @commands.command(name="specroles", aliases=["sroles"]) @commands.guild_only() async def special_roles(self, ctx): logger.info( "Sending list of all configured roles for guild '%s' (%d)", ctx.guild.name, ctx.guild.id, ) def mention(role): return getattr(role, "mention", "(none)") roles = self.bot.sql.settings.get_special_roles(ctx.guild) embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.description = "\n".join( ( f'{ICONS["member"]} Member: {mention(roles.member)}', f'{ICONS["guest"]} Guest: {mention(roles.guest)}', f'{ICONS["mute"]} Mute: {mention(roles.mute)}', f'{ICONS["jail"]} Jail: {mention(roles.jail)}', f'{ICONS["focus"]} Focus: {mention(roles.focus)}', ) ) await ctx.send(embed=embed) async def check_role(self, ctx, role): embed = discord.Embed(colour=discord.Colour.red()) if role.is_default(): embed.description = "@everyone role cannot be assigned for this purpose" raise CommandFailed(embed=embed) special_roles = self.bot.sql.settings.get_special_roles(ctx.guild) if role in special_roles: embed.description = "Cannot assign the same role for multiple purposes" raise CommandFailed(embed=embed) embed = permissions.elevated_role_embed(ctx.guild, role, "warning") if embed is not None: await ctx.send(embed=embed) @commands.command(name="setmember") @commands.guild_only() @permissions.check_mod() async def set_member_role(self, ctx, *, role: RoleConv = None): logger.info( "Setting member role for guild '%s' (%d) to '%s'", ctx.guild.name, ctx.guild.id, role, ) if role is not None: await self.check_role(ctx, role) with self.bot.sql.transaction(): self.bot.sql.settings.set_special_roles(ctx.guild, member=role) embed = discord.Embed(colour=discord.Colour.green()) if role: embed.description = f"Set member role to {role.mention}" content = f"Set member role to {role.mention}" else: embed.description = "Unset member role" content = "Unset the member role" await ctx.send(embed=embed) self.journal.send( "roles/member", ctx.guild, content, icon="settings", role=role ) @commands.command(name="setguest") @commands.guild_only() @permissions.check_mod() async def set_guest_role(self, ctx, *, role: RoleConv = None): logger.info( "Setting guest role for guild '%s' (%d) to '%s'", ctx.guild.name, ctx.guild.id, role, ) if role is not None: await self.check_role(ctx, role) with self.bot.sql.transaction(): self.bot.sql.settings.set_special_roles(ctx.guild, guest=role) embed = discord.Embed(colour=discord.Colour.green()) if role: embed.description = f"Set guest role to {role.mention}" content = f"Set the guest role to {role.mention}" else: embed.description = "Unset guest role" content = "Unset the guest role" await ctx.send(embed=embed) self.journal.send("roles/guest", ctx.guild, content, icon="settings", role=role) @commands.command(name="setmute") @commands.guild_only() @permissions.check_mod() async def set_mute_role(self, ctx, *, role: RoleConv = None): logger.info( "Setting mute role for guild '%s' (%d) to '%s'", ctx.guild.name, ctx.guild.id, role, ) if role is not None: await self.check_role(ctx, role) with self.bot.sql.transaction(): self.bot.sql.settings.set_special_roles(ctx.guild, mute=role) embed = discord.Embed(colour=discord.Colour.green()) if role: embed.description = f"Set mute role to {role.mention}" content = f"Set the mute role to {role.mention}" else: embed.description = "Unset mute role" content = "Unset the mute role" await ctx.send(embed=embed) self.journal.send("roles/mute", ctx.guild, content, icon="settings", role=role) @commands.command(name="setjail") @commands.guild_only() @permissions.check_mod() async def set_jail_role(self, ctx, *, role: RoleConv = None): logger.info( "Setting mute role for guild '%s' (%d) to '%s'", ctx.guild.name, ctx.guild.id, role, ) if role is not None: await self.check_role(ctx, role) with self.bot.sql.transaction(): self.bot.sql.settings.set_special_roles(ctx.guild, jail=role) embed = discord.Embed(colour=discord.Colour.green()) if role: embed.description = f"Set jail role to {role.mention}" content = f"Set the jail role to {role.mention}" else: embed.description = "Unset jail role" content = "Unset the jail role" await ctx.send(embed=embed) self.journal.send("roles/jail", ctx.guild, content, icon="settings", role=role) @commands.command(name="setfocus") @commands.guild_only() @permissions.check_mod() async def set_focus_role(self, ctx, *, role: RoleConv = None): logger.info( "Setting focus role for guild '%s' (%d) to '%s'", ctx.guild.name, ctx.guild.id, role, ) if role is not None: await self.check_role(ctx, role) with self.bot.sql.transaction(): self.bot.sql.settings.set_special_roles(ctx.guild, focus=role) embed = discord.Embed(colour=discord.Colour.green()) if role: embed.description = f"Set focus role to {role.mention}" content = f"Set the focus role to {role.mention}" else: embed.description = "Unset focus role" content = "Unset the focus role" await ctx.send(embed=embed) self.journal.send("roles/focus", ctx.guild, content, icon="settings", role=role) @commands.command(name="setnonpurge") @commands.guild_only() @permissions.check_mod() async def set_nonpurge_role(self, ctx, *, role: RoleConv = None): logger.info( "Setting nonpurge role for guild '%s' (%d) to '%s'", ctx.guild.name, ctx.guild.id, role, ) if role is not None: await self.check_role(ctx, role) with self.bot.sql.transaction(): self.bot.sql.settings.set_special_roles(ctx.guild, nonpurge=role) embed = discord.Embed(colour=discord.Colour.green()) if role: embed.description = f"Set nonpurge role to {role.mention}" content = f"Set the nonpurge role to {role.mention}" else: embed.description = "Unset nonpurge role" content = "Unset the nonpurge role" await ctx.send(embed=embed) self.journal.send("roles/nonpurge", ctx.guild, content, icon="settings", role=role) @commands.group(name="reapply", aliases=["reapp"]) @commands.guild_only() async def reapply(self, ctx): if ctx.invoked_subcommand is None: raise SendHelp() @reapply.command(name="add", aliases=["append", "extend", "new", "register", "set"]) @commands.guild_only() async def reapply_add(self, ctx, *roles: RoleConv): warning = StringBuilder() roles = set(roles) special_roles = self.bot.sql.settings.get_special_roles(ctx.guild) if ctx.guild.default_role in roles: warning.writeln("You should not make @everyone reappliable.") roles.remove(ctx.guild.default_role) if special_roles.guest_role in roles: warning.writeln( f"You should not make {special_roles.guest_role.mention} reappliable." ) if special_roles.member_role in roles: warning.writeln( f"You should not make {special_roles.member_role.mention} reappliable." ) if special_roles.mute_role in roles: warning.writeln( f"The {special_roles.mute_role.mention} is always reappliable." ) if special_roles.jail_role in roles: warning.writeln( f"The {special_roles.jail_role.mention} is always reappliable." ) if "SelfAssignableRoles" in self.bot.cogs: assignable_roles = self.bot.sql.roles.get_assignable_roles(ctx.guild) else: assignable_roles = () for role in roles: if role in assignable_roles: warning.writeln( f"The {role.mention} is already reappliable since it is self-assignable." ) if warning: embed = discord.Embed(colour=discord.Colour.dark_purple()) embed.description = str(warning) await ctx.send(embed=embed) logger.info( "Setting roles as 'reappliable': [%s]", ", ".join(role.name for role in roles), ) with self.bot.sql.transaction(): self.bot.sql.settings.update_reapply_roles(ctx.guild, roles, True) @reapply.command( name="remove", aliases=["rm", "delete", "del", "unregister", "unset"] ) @commands.guild_only() async def reapply_remove(self, ctx, *roles: RoleConv): logger.info( "Unsetting roles as 'reappliable': [%s]", ", ".join(role.name for role in roles), ) with self.bot.sql.transaction(): self.bot.sql.settings.update_reapply_roles(ctx.guild, set(roles), False) @reapply.command(name="show", aliases=["display", "list", "ls"]) @commands.guild_only() async def reapply_show(self, ctx): reapply_roles = self.bot.sql.settings.get_reapply_roles(ctx.guild) special_roles = self.bot.sql.settings.get_special_roles(ctx.guild) embed = discord.Embed(colour=discord.Colour.dark_teal()) descr = StringBuilder(sep=", ") has_roles = False for role in sorted(reapply_roles, key=lambda r: r.name): descr.write(role.mention) if descr: embed.add_field(name="Manually designated", value=str(descr)) has_roles = True else: embed.add_field(name="Manually designated", value="(none)") descr.clear() if special_roles.mute_role is not None: descr.write(special_roles.mute_role.mention) if special_roles.jail_role is not None: descr.write(special_roles.jail_role.mention) if descr: embed.add_field(name="Punishment roles", value=str(descr)) has_roles = True if "SelfAssignableRoles" in self.bot.cogs: assignable_roles = self.bot.sql.roles.get_assignable_roles(ctx.guild) if assignable_roles: embed.add_field( name="Self-assignable roles", value=", ".join( role.mention for role in sorted(assignable_roles, key=lambda r: r.name) ), ) has_roles = True if has_roles: embed.title = "\N{MILITARY MEDAL} Roles which are automatically reapplied" else: embed.colour = discord.Colour.dark_purple() await ctx.send(embed=embed) @reapply.command(name="auto", aliases=["automatic"]) @commands.guild_only() async def reapply_auto(self, ctx, value: bool = None): if value is None: reapply = self.bot.sql.settings.get_auto_reapply(ctx.guild) embed = discord.Embed(colour=discord.Colour.dark_teal()) enabled = "enabled" if reapply else "disabled" embed.description = ( f"Automatic role reapplication is **{enabled}** on this server" ) elif not admin_perm(ctx): embed = discord.Embed(colour=discord.Colour.red()) embed.description = ( "You do not have permission to set automatic role reapplication" ) raise ManualCheckFailure(embed=embed) else: with self.bot.sql.transaction(): self.bot.sql.settings.set_auto_reapply(ctx.guild, value) embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.description = ( f"{'Enabled' if value else 'Disabled'} automatic role reapplication" ) await ctx.send(embed=embed) @commands.command(name="mentionprefix", aliases=["mprefix", "menpfx"]) @commands.guild_only() async def mentionable_prefix(self, ctx, value: int = None): if value is None: value = self.bot.sql.settings.get_mentionable_name_prefix(ctx.guild) embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.description = ( f"Names must begin with at least {value} typeable character{plural(value)}" if value else "No guild requirement for mentionable names" ) elif not mod_perm(ctx): embed = discord.Embed(colour=discord.Colour.red()) embed.description = ( "You do not have permission to set the mentionable name prefix" ) raise ManualCheckFailure(embed=embed) else: if value < 0 or value > 32: embed = discord.Embed() embed.colour = discord.Colour.red() embed.description = "Prefix lengths must be between `0` and `32`." raise CommandFailed(embed=embed) with self.bot.sql.transaction(): self.bot.sql.settings.set_mentionable_name_prefix(ctx.guild, value) embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.description = ( f"Set mentionable prefix to {value} character{plural(value)}" if value else "Disabled mentionable prefix requirement" ) await ctx.send(embed=embed) @commands.group(name="trackerblacklist", aliases=["trackerbl", "trkbl"]) @commands.guild_only() async def tracker_blacklist(self, ctx): if ctx.invoked_subcommand is None: raise SendHelp() @tracker_blacklist.command(name="add", aliases=["append", "extend"]) @commands.guild_only() @permissions.check_mod() async def tracker_blacklist_add( self, ctx, *, user_or_channel: Union[MemberConv, TextChannelConv] ): logger.info( "Adding %s '%s' (%d) to the tracking blacklist for guild '%s' (%d)", "user" if isinstance(user_or_channel, discord.abc.User) else "channel", user_or_channel.name, user_or_channel.id, ctx.guild.name, ctx.guild.id, ) with self.bot.sql.transaction(): self.bot.sql.settings.add_to_tracking_blacklist(ctx.guild, user_or_channel) embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.description = f"Added {user_or_channel.mention} to the tracking blacklist" await ctx.send(embed=embed) @tracker_blacklist.command(name="remove", aliases=["rm", "delete", "del"]) @commands.guild_only() @permissions.check_mod()
MIT License
azure/azure-devops-cli-extension
azure-devops/azext_devops/devops_sdk/v5_0/build/build_client.py
BuildClient.queue_build
python
def queue_build(self, build, project, ignore_warnings=None, check_in_ticket=None, source_build_id=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if ignore_warnings is not None: query_parameters['ignoreWarnings'] = self._serialize.query('ignore_warnings', ignore_warnings, 'bool') if check_in_ticket is not None: query_parameters['checkInTicket'] = self._serialize.query('check_in_ticket', check_in_ticket, 'str') if source_build_id is not None: query_parameters['sourceBuildId'] = self._serialize.query('source_build_id', source_build_id, 'int') content = self._serialize.body(build, 'Build') response = self._send(http_method='POST', location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf', version='5.0', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('Build', response)
QueueBuild. Queues a build :param :class:`<Build> <azure.devops.v5_0.build.models.Build>` build: :param str project: Project ID or project name :param bool ignore_warnings: :param str check_in_ticket: :param int source_build_id: :rtype: :class:`<Build> <azure.devops.v5_0.build.models.Build>`
https://github.com/azure/azure-devops-cli-extension/blob/5f33f7d81a9c2d2990044fbd9ffa6b535cbda528/azure-devops/azext_devops/devops_sdk/v5_0/build/build_client.py#L448-L475
 from msrest import Serializer, Deserializer from ...client import Client from . import models class BuildClient(Client): def __init__(self, base_url=None, creds=None): super(BuildClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = '965220d5-5bb9-42cf-8d67-9b146df2a5a4' def create_artifact(self, artifact, project, build_id): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') content = self._serialize.body(artifact, 'BuildArtifact') response = self._send(http_method='POST', location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984', version='5.0', route_values=route_values, content=content) return self._deserialize('BuildArtifact', response) def get_artifact(self, project, build_id, artifact_name): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if artifact_name is not None: query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str') response = self._send(http_method='GET', location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('BuildArtifact', response) def get_artifact_content_zip(self, project, build_id, artifact_name, **kwargs): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if artifact_name is not None: query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str') response = self._send(http_method='GET', location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984', version='5.0', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/zip') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_artifacts(self, project, build_id): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') response = self._send(http_method='GET', location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984', version='5.0', route_values=route_values) return self._deserialize('[BuildArtifact]', self._unwrap_collection(response)) def get_file(self, project, build_id, artifact_name, file_id, file_name, **kwargs): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if artifact_name is not None: query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str') if file_id is not None: query_parameters['fileId'] = self._serialize.query('file_id', file_id, 'str') if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') response = self._send(http_method='GET', location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984', version='5.0', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_attachments(self, project, build_id, type): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') if type is not None: route_values['type'] = self._serialize.url('type', type, 'str') response = self._send(http_method='GET', location_id='f2192269-89fa-4f94-baf6-8fb128c55159', version='5.0-preview.2', route_values=route_values) return self._deserialize('[Attachment]', self._unwrap_collection(response)) def get_attachment(self, project, build_id, timeline_id, record_id, type, name, **kwargs): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') if timeline_id is not None: route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str') if record_id is not None: route_values['recordId'] = self._serialize.url('record_id', record_id, 'str') if type is not None: route_values['type'] = self._serialize.url('type', type, 'str') if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') response = self._send(http_method='GET', location_id='af5122d3-3438-485e-a25a-2dbbfde84ee6', version='5.0-preview.2', route_values=route_values, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def authorize_project_resources(self, resources, project): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(resources, '[DefinitionResourceReference]') response = self._send(http_method='PATCH', location_id='398c85bc-81aa-4822-947c-a194a05f0fef', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('[DefinitionResourceReference]', self._unwrap_collection(response)) def get_project_resources(self, project, type=None, id=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if type is not None: query_parameters['type'] = self._serialize.query('type', type, 'str') if id is not None: query_parameters['id'] = self._serialize.query('id', id, 'str') response = self._send(http_method='GET', location_id='398c85bc-81aa-4822-947c-a194a05f0fef', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[DefinitionResourceReference]', self._unwrap_collection(response)) def list_branches(self, project, provider_name, service_endpoint_id=None, repository=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if provider_name is not None: route_values['providerName'] = self._serialize.url('provider_name', provider_name, 'str') query_parameters = {} if service_endpoint_id is not None: query_parameters['serviceEndpointId'] = self._serialize.query('service_endpoint_id', service_endpoint_id, 'str') if repository is not None: query_parameters['repository'] = self._serialize.query('repository', repository, 'str') response = self._send(http_method='GET', location_id='e05d4403-9b81-4244-8763-20fde28d1976', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[str]', self._unwrap_collection(response)) def get_build_badge(self, project, repo_type, repo_id=None, branch_name=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repo_type is not None: route_values['repoType'] = self._serialize.url('repo_type', repo_type, 'str') query_parameters = {} if repo_id is not None: query_parameters['repoId'] = self._serialize.query('repo_id', repo_id, 'str') if branch_name is not None: query_parameters['branchName'] = self._serialize.query('branch_name', branch_name, 'str') response = self._send(http_method='GET', location_id='21b3b9ce-fad5-4567-9ad0-80679794e003', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('BuildBadge', response) def get_build_badge_data(self, project, repo_type, repo_id=None, branch_name=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repo_type is not None: route_values['repoType'] = self._serialize.url('repo_type', repo_type, 'str') query_parameters = {} if repo_id is not None: query_parameters['repoId'] = self._serialize.query('repo_id', repo_id, 'str') if branch_name is not None: query_parameters['branchName'] = self._serialize.query('branch_name', branch_name, 'str') response = self._send(http_method='GET', location_id='21b3b9ce-fad5-4567-9ad0-80679794e003', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('str', response) def delete_build(self, project, build_id): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') self._send(http_method='DELETE', location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf', version='5.0', route_values=route_values) def get_build(self, project, build_id, property_filters=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if property_filters is not None: query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str') response = self._send(http_method='GET', location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('Build', response) def get_builds(self, project, definitions=None, queues=None, build_number=None, min_time=None, max_time=None, requested_for=None, reason_filter=None, status_filter=None, result_filter=None, tag_filters=None, properties=None, top=None, continuation_token=None, max_builds_per_definition=None, deleted_filter=None, query_order=None, branch_name=None, build_ids=None, repository_id=None, repository_type=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if definitions is not None: definitions = ",".join(map(str, definitions)) query_parameters['definitions'] = self._serialize.query('definitions', definitions, 'str') if queues is not None: queues = ",".join(map(str, queues)) query_parameters['queues'] = self._serialize.query('queues', queues, 'str') if build_number is not None: query_parameters['buildNumber'] = self._serialize.query('build_number', build_number, 'str') if min_time is not None: query_parameters['minTime'] = self._serialize.query('min_time', min_time, 'iso-8601') if max_time is not None: query_parameters['maxTime'] = self._serialize.query('max_time', max_time, 'iso-8601') if requested_for is not None: query_parameters['requestedFor'] = self._serialize.query('requested_for', requested_for, 'str') if reason_filter is not None: query_parameters['reasonFilter'] = self._serialize.query('reason_filter', reason_filter, 'str') if status_filter is not None: query_parameters['statusFilter'] = self._serialize.query('status_filter', status_filter, 'str') if result_filter is not None: query_parameters['resultFilter'] = self._serialize.query('result_filter', result_filter, 'str') if tag_filters is not None: tag_filters = ",".join(tag_filters) query_parameters['tagFilters'] = self._serialize.query('tag_filters', tag_filters, 'str') if properties is not None: properties = ",".join(properties) query_parameters['properties'] = self._serialize.query('properties', properties, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if max_builds_per_definition is not None: query_parameters['maxBuildsPerDefinition'] = self._serialize.query('max_builds_per_definition', max_builds_per_definition, 'int') if deleted_filter is not None: query_parameters['deletedFilter'] = self._serialize.query('deleted_filter', deleted_filter, 'str') if query_order is not None: query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str') if branch_name is not None: query_parameters['branchName'] = self._serialize.query('branch_name', branch_name, 'str') if build_ids is not None: build_ids = ",".join(map(str, build_ids)) query_parameters['buildIds'] = self._serialize.query('build_ids', build_ids, 'str') if repository_id is not None: query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str') if repository_type is not None: query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str') response = self._send(http_method='GET', location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[Build]', self._unwrap_collection(response))
MIT License
aydanomachado/mlclass
04_ReinforcementLearning/ple/games/monsterkong/board.py
Board.__init__
python
def __init__(self, width, height, rewards, rng, _dir): self.__width = width self.__actHeight = height self.__height = self.__actHeight + 10 self.score = 0 self.rng = rng self.rewards = rewards self.cycles = 0 self.direction = 0 self._dir = _dir self.IMAGES = { "still": pygame.image.load(os.path.join(_dir, 'assets/still.png')).convert_alpha(), "monster0": pygame.image.load(os.path.join(_dir, 'assets/monster0.png')).convert_alpha(), "princess": pygame.image.load(os.path.join(_dir, 'assets/princess.png')).convert_alpha(), "fireballright": pygame.image.load(os.path.join(_dir, 'assets/fireballright.png')).convert_alpha(), "coin1": pygame.image.load(os.path.join(_dir, 'assets/coin1.png')).convert_alpha(), "wood_block": pygame.image.load(os.path.join(_dir, 'assets/wood_block.png')).convert_alpha(), "ladder": pygame.image.load(os.path.join(_dir, 'assets/ladder.png')).convert_alpha() } self.white = (255, 255, 255) ''' The map is essentially an array of 30x80 in which we store what each block on our map is. 1 represents a wall, 2 for a ladder and 3 for a coin. ''' self.map = [] self.Players = [] self.Enemies = [] self.Allies = [] self.Coins = [] self.Walls = [] self.Ladders = [] self.Fireballs = [] self.Boards = [] self.FireballEndpoints = [] self.resetGroups() self.fireballGroup = pygame.sprite.RenderPlain(self.Fireballs) self.playerGroup = pygame.sprite.RenderPlain(self.Players) self.enemyGroup = pygame.sprite.RenderPlain(self.Enemies) self.wallGroup = pygame.sprite.RenderPlain(self.Walls) self.ladderGroup = pygame.sprite.RenderPlain(self.Ladders) self.coinGroup = pygame.sprite.RenderPlain(self.Coins) self.allyGroup = pygame.sprite.RenderPlain(self.Allies) self.fireballEndpointsGroup = pygame.sprite.RenderPlain( self.FireballEndpoints)
The map is essentially an array of 30x80 in which we store what each block on our map is. 1 represents a wall, 2 for a ladder and 3 for a coin.
https://github.com/aydanomachado/mlclass/blob/b6d2cadbc57030b7af8bcad8c14bc0ec86febefa/04_ReinforcementLearning/ple/games/monsterkong/board.py#L22-L75
__author__ = 'Batchu Vishal' import pygame import math import sys import os from .person import Person from .onBoard import OnBoard from .coin import Coin from .player import Player from .fireball import Fireball from .monsterPerson import MonsterPerson class Board(object):
MIT License
vincent-lg/tsunami
src/secondaires/navigation/commandes/loch/__init__.py
CmdLoch.__init__
python
def __init__(self): Commande.__init__(self, "loch", "log") self.nom_categorie = "navire" self.aide_courte = "manipule le loch" self.aide_longue = "Cette commande permet d'utiliser le loch présent dans la " "salle pour estimer la vitesse du navire."
Constructeur de la commande
https://github.com/vincent-lg/tsunami/blob/36b3b974f6eefbf15cd5d5f099fc14630e66570b/src/secondaires/navigation/commandes/loch/__init__.py#L39-L46
from primaires.interpreteur.commande.commande import Commande class CmdLoch(Commande):
BSD 3-Clause New or Revised License
open-risk/concentrationmetrics
concentrationMetrics/model.py
Index.graph_density
python
def graph_density(self, adjacency_matrix): pass
Calculate the Graph Density of an Adjacency Matrix. :param adjacency_matrix: :type adjacency_matrix: matrix :return: D (Float) `Open Risk Manual Entry for Graph Density <https://www.openriskmanual.org/wiki/Graph_Density>`_
https://github.com/open-risk/concentrationmetrics/blob/9f606b858109db210668bb01588843e72d8e3723/concentrationMetrics/model.py#L537-L546
import os import numpy as np import pandas as pd package_name = 'concentrationMetrics' module_path = os.path.dirname(__file__) source_path = module_path dataset_path = source_path + "/datasets/" class Index(object): def __init__(self, data=None, index=None, *args): if data is not None: self.data = data self.index = index self.arguments = args self.results = None results = [] for i in range(data.shape[0]): calc = self.call_method(index, data[i, :], *args) results.append(calc) self.results = results def print(self, cols=None): print(self.index) print('--------') if cols is None: for i in self.results: print(i) else: for i in self.results[:cols]: print(i) def call_method(self, name, data, *args): return getattr(self, name)(data, *args) def total_size(self, data): return np.sum(data) def get_weights(self, data): if len(data.shape) > 1: data = data[:, 0] if not (data >= 0).all(): raise ValueError('Input data vector must have positive values') else: ts = self.total_size(data) if not ts > 0: raise ValueError('Input data vector must have some non-zero values') else: return np.true_divide(data, ts) def cr(self, data, n): if n < 0 or n > data.size: raise ValueError('n must be an positive integer smaller than the data size') else: data = np.array(sorted(data, reverse=True)) weights = self.get_weights(data) return weights[:n].sum() def berger_parker(self, data): return self.cr(data, 1) def hhi(self, data, normalized=True, ci=None, samples=None): weights = self.get_weights(data) n = weights.size if n == 0: return 0 else: h = np.square(weights).sum() if normalized: return (h - 1.0 / n) / (1.0 - 1.0 / n) else: return h def simpson(self, data): return 1.0 - self.hhi(data, normalized=False, ci=None, samples=None) def invsimpson(self, data): return 1.0 / self.hhi(data, normalized=False, ci=None, samples=None) def hk(self, data, a): weights = self.get_weights(data) n = weights.size if n == 0: return 0 else: if a <= 0: raise ValueError('Alpha must be strictly positive') elif a == 1: weights_nz = weights[weights != 0] log_weights = np.log(weights_nz) h = np.multiply(weights_nz, log_weights).sum() return np.exp(h) else: h1 = np.power(weights, a).sum() h2 = np.power(h1, 1.0 / (a - 1.0)) return h2 def hoover(self, data): weights = self.get_weights(data) n = weights.size if n == 0: return 0 else: return 0.5 * np.absolute(weights - 1.0 / n).sum() def hti(self, data): data = np.array(sorted(data, reverse=True)) weights = self.get_weights(data) n = weights.size if n == 0: return 0 else: i = np.arange(1, n + 1) return 1.0 / (2.0 * np.multiply(i, weights).sum() - 1.0) def gini(self, data): data = np.array(sorted(data, reverse=True)) weights = self.get_weights(data) n = weights.size if n == 0: return 0 else: i = np.arange(1, n + 1) return 1.0 + (1.0 - 2.0 * np.multiply(i, weights).sum()) / n def shannon(self, data, normalized=False): weights = self.get_weights(data) weights_nz = weights[weights != 0] n = weights_nz.size if n == 0: return 0 else: log_weights = np.log(weights_nz) h = - np.multiply(weights_nz, log_weights).sum() if normalized: return 1.0 - h / np.log(n) else: return h def atkinson(self, data, epsilon): weights = self.get_weights(data) n = weights.size if n == 0: return 0 else: if epsilon <= 0: raise ValueError('Epsilon must be strictly positive (>0.0)') elif epsilon == 1: weights_nz = weights[weights != 0] n = weights_nz.size log_weights = np.log(weights_nz) h = log_weights.sum() / n return 1 - n * np.exp(h) else: n2 = np.power(n, epsilon / (epsilon - 1.0)) h1 = np.power(weights, 1.0 - epsilon).sum() h2 = np.power(h1, 1.0 / (1.0 - epsilon)) return 1 - n2 * h2 def gei(self, data, alpha): weights = self.get_weights(data) n = weights.size if n == 0: return 0 else: if alpha == 0: weights_nz = weights[weights != 0] n = weights_nz.size log_weights = np.log(weights_nz) h = log_weights.sum() / n index = - (np.log(n) + h) elif alpha == 1: weights_nz = weights[weights != 0] n = weights_nz.size log_weights = np.log(weights_nz) h = np.multiply(weights_nz, log_weights).sum() index = np.log(n) + h else: n2 = np.power(n, alpha) h1 = n2 * np.power(weights, alpha).sum() - n index = h1 / n / alpha / (alpha - 1.0) return index def theil(self, data): weights = self.get_weights(data) return self.gei(weights, 1) def kolm(self, data, alpha): n = data.size if n == 0: return 0 else: mu = data.mean() weights = self.get_weights(data) - mu n_weights = np.multiply(alpha, weights) h = np.exp(n_weights).sum() return (np.log(h) - np.log(n)) / alpha def ellison_glaeser(self, data, na, ni): area_groups = data.groupby(['Area']).sum() area_totals = area_groups['Exposure'].values total_exposure = area_totals.sum() xa = area_totals / total_exposure hhi_g = self.hhi(area_totals, normalized=False) industry_groups = data.groupby(['Industry']) hhi_i = [] industry_totals = [] total = 0 eg_indexes = [] s = np.zeros((ni, na)) for industry_index, group in industry_groups: x = group['Exposure'].values i_total = x.sum() total += i_total industry_totals.append(i_total) hhi_i_val = self.hhi(x, normalized=False) hhi_i.append(hhi_i_val) industry_group = pd.DataFrame(group).groupby('Area').sum() ig_values = industry_group.values[:, 0] ai = list(industry_group.index) for a in range(len(ai)): share = ig_values[a] / i_total s[industry_index, ai[a]] = share egi = 0 for a in range(len(ai)): egi += (s[industry_index, a] - xa[a]) ** 2 val = hhi_i[industry_index] gi = egi / (1.0 - hhi_g) / (1 - val) eg_indexes.append(gi) return eg_indexes def compute(self, data, *args, ci=None, samples=None, index='hhi'): value = self.call_method(index, data, *args) if ci is not None: sample_values = [] for s in range(samples): sample_data = np.random.choice(data, size=len(data), replace=True) sample_values.append(self.call_method(index, sample_data, *args)) values = np.array(sample_values) values.sort() lower_bound_index = int((1.0 - ci) * samples) upper_bound_index = int(ci * samples) lower_bound = values[lower_bound_index] upper_bound = values[upper_bound_index] return lower_bound, value, upper_bound else: return value def describe(self, index): print(index) return def margalev(self, data): n = len(data) s = len(list(set(data))) if n == 0: return 0 else: return (s - 1) / np.log(n) def tango(self, data): pass
MIT License
rootpy/rootpy
rootpy/memory/deletion.py
monitor_deletion
python
def monitor_deletion(): monitors = {} def set_deleted(x): def _(weakref): del monitors[x] return _ def monitor(item, name): monitors[name] = ref(item, set_deleted(name)) def is_alive(name): return monitors.get(name, None) is not None return monitor, is_alive
Function for checking for correct deletion of weakref-able objects. Example usage:: monitor, is_alive = monitor_deletion() obj = set() monitor(obj, "obj") assert is_alive("obj") # True because there is a ref to `obj` is_alive del obj assert not is_alive("obj") # True because there `obj` is deleted
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/memory/deletion.py#L25-L52
from __future__ import absolute_import from weakref import ref import ctypes from ctypes import CFUNCTYPE, py_object, addressof, c_int from .. import compiled as C from .. import QROOT, log from ..utils.cinterface import callback, objectproxy_realaddress __all__ = [ 'monitor_deletion', 'monitor_object_deletion', ]
BSD 3-Clause New or Revised License
sage-bionetworks/synapsepythonclient
synapseutils/walk.py
walk
python
def walk(syn, synId): return _helpWalk(syn, synId)
Traverse through the hierarchy of files and folders stored under the synId. Has the same behavior as os.walk() :param syn: A synapse object: syn = synapseclient.login()- Must be logged into synapse :param synId: A synapse ID of a folder or project Example:: walkedPath = walk(syn, "syn1234") for dirpath, dirname, filename in walkedPath: print(dirpath) print(dirname) #All the folders in the directory path print(filename) #All the files in the directory path
https://github.com/sage-bionetworks/synapsepythonclient/blob/c75185c01f648d3c5c2b797876835b1ec05356c6/synapseutils/walk.py#L5-L23
from synapseclient.entity import is_container import os
Apache License 2.0
ml-tooling/lazydocs
src/lazydocs/generation.py
MarkdownGenerator.module2md
python
def module2md(self, module: types.ModuleType, depth: int = 1) -> str: if _is_object_ignored(module): return "" modname = module.__name__ doc = _doc2md(module) summary = _get_doc_summary(module) path = self._get_src_path(module) found = [] self.generated_objects.append( { "type": "module", "name": modname, "full_name": modname, "module": modname, "anchor_tag": _get_anchor_tag("module-" + modname), "description": summary, } ) classes: List[str] = [] line_nos: List[int] = [] for name, obj in inspect.getmembers(module, inspect.isclass): found.append(name) if ( not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname ): class_markdown = self.class2md(obj, depth=depth + 1) if class_markdown: classes.append(_SEPARATOR + class_markdown) line_nos.append(_get_line_no(obj) or 0) classes = _order_by_line_nos(classes, line_nos) functions: List[str] = [] line_nos = [] for name, obj in inspect.getmembers(module, inspect.isfunction): found.append(name) if ( not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname ): function_md = self.func2md(obj, depth=depth + 1) if function_md: functions.append(_SEPARATOR + function_md) line_nos.append(_get_line_no(obj) or 0) functions = _order_by_line_nos(functions, line_nos) variables: List[str] = [] line_nos = [] for name, obj in module.__dict__.items(): if not name.startswith("_") and name not in found: if hasattr(obj, "__module__") and obj.__module__ != modname: continue if hasattr(obj, "__name__") and not obj.__name__.startswith(modname): continue comments = inspect.getcomments(obj) comments = ": %s" % comments if comments else "" variables.append("- **%s**%s" % (name, comments)) line_nos.append(_get_line_no(obj) or 0) variables = _order_by_line_nos(variables, line_nos) if variables: new_list = ["\n**Global Variables**", "---------------", *variables] variables = new_list markdown = _MODULE_TEMPLATE.format( header=modname, section="#" * depth, doc=doc, global_vars="\n".join(variables) if variables else "", functions="\n".join(functions) if functions else "", classes="".join(classes) if classes else "", ) if path: markdown = _SOURCE_BADGE_TEMPLATE.format(path=path) + markdown return markdown
Takes an imported module object and create a Markdown string containing functions and classes. Args: module (types.ModuleType): Selected module for markdown generation. depth (int, optional): Number of # to append before module heading. Defaults to 1. Returns: str: Markdown documentation for selected module.
https://github.com/ml-tooling/lazydocs/blob/f19810ef020684abc7cac8e705ca1258478cdcbb/src/lazydocs/generation.py#L720-L813
import datetime import importlib import importlib.util import inspect import os import pkgutil import re import subprocess import types from pydoc import locate from typing import Any, Callable, Dict, List, Optional _RE_BLOCKSTART_LIST = re.compile( r"(Args:|Arg:|Arguments:|Parameters:|Kwargs:|Attributes:|Returns:|Yields:|Kwargs:|Raises:).{0,2}$", re.IGNORECASE, ) _RE_BLOCKSTART_TEXT = re.compile(r"(Examples:|Example:|Todo:).{0,2}$", re.IGNORECASE) _RE_QUOTE_TEXT = re.compile(r"(Notes:|Note:).{0,2}$", re.IGNORECASE) _RE_TYPED_ARGSTART = re.compile(r"([\w\[\]_]{1,}?)\s*?\((.*?)\):(.{2,})", re.IGNORECASE) _RE_ARGSTART = re.compile(r"(.{1,}?):(.{2,})", re.IGNORECASE) _IGNORE_GENERATION_INSTRUCTION = "lazydocs: ignore" _SOURCE_BADGE_TEMPLATE = """ <a href="{path}"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a> """ _SEPARATOR = """ --- """ _FUNC_TEMPLATE = """ {section} <kbd>{func_type}</kbd> `{header}` ```python {funcdef} ``` {doc} """ _CLASS_TEMPLATE = """ {section} <kbd>class</kbd> `{header}` {doc} {init} {variables} {handlers} {methods} """ _MODULE_TEMPLATE = """ {section} <kbd>module</kbd> `{header}` {doc} {global_vars} {functions} {classes} """ _OVERVIEW_TEMPLATE = """ # API Overview ## Modules {modules} ## Classes {classes} ## Functions {functions} """ _WATERMARK_TEMPLATE = """ --- _This file was automatically generated via [lazydocs](https://github.com/ml-tooling/lazydocs)._ """ _MKDOCS_PAGES_TEMPLATE = """title: API Reference nav: - Overview: {overview_file} - ... """ def _get_function_signature( function: Callable, owner_class: Any = None, show_module: bool = False, ignore_self: bool = False, wrap_arguments: bool = False, remove_package: bool = False, ) -> str: isclass = inspect.isclass(function) name_parts = [] if show_module: name_parts.append(function.__module__) if owner_class: name_parts.append(owner_class.__name__) if hasattr(function, "__name__"): name_parts.append(function.__name__) else: name_parts.append(type(function).__name__) name_parts.append("__call__") function = function.__call__ name = ".".join(name_parts) if isclass: function = getattr(function, "__init__", None) arguments = [] return_type = "" if hasattr(inspect, "signature"): parameters = inspect.signature(function).parameters if inspect.signature(function).return_annotation != inspect.Signature.empty: return_type = str(inspect.signature(function).return_annotation) if return_type.startswith("<class"): try: return_type = inspect.signature(function).return_annotation.__name__ except Exception: pass return_type = return_type.replace("typing.", "") if remove_package: return_type = re.sub(r"([a-zA-Z0-9_]*?\.)", "", return_type) for parameter in parameters: argument = str(parameters[parameter]) if ignore_self and argument == "self": continue argument = re.sub(r"Union\[(.*?), NoneType\]", r"Optional[\1]", argument) if remove_package: if "=" not in argument: argument = re.sub(r"([a-zA-Z0-9_]*?\.)", "", argument) else: argument_split = argument.split("=") argument_split[0] = re.sub( r"([a-zA-Z0-9_]*?\.)", "", argument_split[0] ) argument = "=".join(argument_split) arguments.append(argument) else: print("Seems like function " + name + " does not have any signature") signature = name + "(" if wrap_arguments: for i, arg in enumerate(arguments): signature += "\n " + arg signature += "," if i is not len(arguments) - 1 else "\n" else: signature += ", ".join(arguments) signature += ")" + ((" → " + return_type) if return_type else "") return signature def _order_by_line_nos(objs: Any, line_nos: List[int]) -> List[str]: ordering = sorted(range(len(line_nos)), key=line_nos.__getitem__) return [objs[i] for i in ordering] def to_md_file( markdown_str: str, filename: str, out_path: str = ".", watermark: bool = True, disable_markdownlint: bool = True, ) -> None: if not markdown_str: return md_file = filename if not filename.endswith(".md"): md_file = filename + ".md" if disable_markdownlint: markdown_str = "<!-- markdownlint-disable -->\n" + markdown_str if watermark: markdown_str += _WATERMARK_TEMPLATE.format( date=datetime.date.today().strftime("%d %b %Y") ) print("Writing {}.".format(md_file)) with open(os.path.join(out_path, md_file), "w", encoding="utf-8") as f: f.write(markdown_str) def _code_snippet(snippet: str) -> str: result = "```python\n" result += snippet + "\n" result += "```\n\n" return result def _get_line_no(obj: Any) -> Optional[int]: try: return inspect.getsourcelines(obj)[1] except Exception: return None def _get_class_that_defined_method(meth: Any) -> Any: if inspect.ismethod(meth): for cls in inspect.getmro(meth.__self__.__class__): if cls.__dict__.get(meth.__name__) is meth: return cls meth = meth.__func__ if inspect.isfunction(meth): mod = inspect.getmodule(meth) if mod is None: return None cls = getattr( inspect.getmodule(meth), meth.__qualname__.split(".<locals>", 1)[0].rsplit(".", 1)[0], ) if isinstance(cls, type): return cls return getattr(meth, "__objclass__", None) def _get_docstring(obj: Any) -> str: return "" if obj.__doc__ is None else inspect.getdoc(obj) or "" def _is_object_ignored(obj: Any) -> bool: if ( _IGNORE_GENERATION_INSTRUCTION.replace(" ", "").lower() in _get_docstring(obj).replace(" ", "").lower() ): return True return False def _is_module_ignored(module_name: str, ignored_modules: List[str]) -> bool: if module_name.split(".")[-1].startswith("_"): return True for ignored_module in ignored_modules: if module_name == ignored_module: return True if module_name.startswith(ignored_module + "."): return True return False def _get_src_root_path(obj: Any) -> str: module = obj if not isinstance(obj, types.ModuleType): module = inspect.getmodule(obj) root_package = module.__name__.split(".")[0] return module.__file__.split(root_package)[0] + root_package def _get_doc_summary(obj: Any) -> str: return _get_docstring(obj).split("\n")[0] def _get_anchor_tag(header: str) -> str: anchor_tag = header.strip().lower() anchor_tag = re.compile(r"\s").sub("-", anchor_tag) anchor_tag = re.compile(r"[^a-zA-Z0-9-_]").sub("", anchor_tag) return anchor_tag def _doc2md(obj: Any) -> str: doc = _get_docstring(obj) blockindent = 0 argindent = 1 out = [] arg_list = False literal_block = False md_code_snippet = False quote_block = False for line in doc.split("\n"): indent = len(line) - len(line.lstrip()) if not md_code_snippet and not literal_block: line = line.lstrip() if line.startswith(">>>"): line = line.replace(">>>", "```") + "```" if ( _RE_BLOCKSTART_LIST.match(line) or _RE_BLOCKSTART_TEXT.match(line) or _RE_QUOTE_TEXT.match(line) ): blockindent = indent if quote_block: quote_block = False if literal_block: out.append("```\n") literal_block = False out.append("\n\n**{}**\n".format(line.strip())) arg_list = bool(_RE_BLOCKSTART_LIST.match(line)) if _RE_QUOTE_TEXT.match(line): quote_block = True out.append("\n>") elif line.strip().startswith("```"): if md_code_snippet: md_code_snippet = False else: md_code_snippet = True out.append(line) elif line.strip().endswith("::"): literal_block = True out.append(line.replace("::", ":\n```")) elif quote_block: out.append(line.strip()) elif line.strip().startswith("-"): out.append("\n" + (" " * indent) + line) elif indent > blockindent: if arg_list and not literal_block and _RE_TYPED_ARGSTART.match(line): out.append( "\n" + " " * blockindent + " - " + _RE_TYPED_ARGSTART.sub(r"<b>`\1`</b> (\2): \3", line) ) argindent = indent elif arg_list and not literal_block and _RE_ARGSTART.match(line): out.append( "\n" + " " * blockindent + " - " + _RE_ARGSTART.sub(r"<b>`\1`</b>: \2", line) ) argindent = indent elif indent > argindent: out.append(" " + line) else: out.append(line) else: if line.strip() and literal_block: line = "```\n" + line literal_block = False out.append(line) if md_code_snippet: out.append("\n") elif not line and not quote_block: out.append("\n\n") elif not line and quote_block: out.append("\n>") else: out.append(" ") return "".join(out) class MarkdownGenerator(object): def __init__( self, src_root_path: Optional[str] = None, src_base_url: Optional[str] = None, remove_package_prefix: bool = False, ): self.src_root_path = src_root_path self.src_base_url = src_base_url self.remove_package_prefix = remove_package_prefix self.generated_objects: List[Dict] = [] def _get_src_path(self, obj: Any, append_base: bool = True) -> str: src_root_path = None if self.src_root_path: src_root_path = os.path.abspath(self.src_root_path) else: return "" try: path = os.path.abspath(inspect.getsourcefile(obj)) except Exception: return "" assert isinstance(path, str) if src_root_path not in path: if hasattr(obj, "__module__"): path = "%s.%s" % (obj.__module__, obj.__name__) else: path = obj.__name__ assert isinstance(path, str) path = path.replace(".", "/") relative_path = os.path.relpath(path, src_root_path) lineno = _get_line_no(obj) lineno_hashtag = "" if lineno is None else "#L{}".format(lineno) relative_path = relative_path + lineno_hashtag if append_base and self.src_base_url: relative_path = os.path.join(self.src_base_url, relative_path) return relative_path def func2md(self, func: Callable, clsname: str = "", depth: int = 3) -> str: if _is_object_ignored(func): return "" section = "#" * depth funcname = func.__name__ modname = None if hasattr(func, "__module__"): modname = func.__module__ escfuncname = ( "%s" % funcname if funcname.startswith("_") else funcname ) full_name = "%s%s" % ("%s." % clsname if clsname else "", escfuncname) header = full_name if self.remove_package_prefix: header = escfuncname path = self._get_src_path(func) doc = _doc2md(func) summary = _get_doc_summary(func) funcdef = _get_function_signature( func, ignore_self=True, remove_package=self.remove_package_prefix ) lmax = 80 if len(funcdef) > lmax: funcdef = _get_function_signature( func, ignore_self=True, wrap_arguments=True, remove_package=self.remove_package_prefix, ) if inspect.ismethod(func): func_type = "classmethod" else: if _get_class_that_defined_method(func) is None: func_type = "function" else: func_type = "method" self.generated_objects.append( { "type": func_type, "name": header, "full_name": full_name, "module": modname, "anchor_tag": _get_anchor_tag(func_type + "-" + header), "description": summary, } ) markdown = _FUNC_TEMPLATE.format( section=section, header=header, funcdef=funcdef, func_type=func_type, doc=doc if doc else "*No documentation found.*", ) if path: markdown = _SOURCE_BADGE_TEMPLATE.format(path=path) + markdown return markdown def class2md(self, cls: Any, depth: int = 2) -> str: if _is_object_ignored(cls): return "" section = "#" * depth subsection = "#" * (depth + 2) clsname = cls.__name__ modname = cls.__module__ header = clsname path = self._get_src_path(cls) doc = _doc2md(cls) summary = _get_doc_summary(cls) self.generated_objects.append( { "type": "class", "name": header, "full_name": header, "module": modname, "anchor_tag": _get_anchor_tag("class-" + header), "description": summary, } ) try: if ( hasattr(cls.__init__, "__module__") and cls.__init__.__module__ == modname ): init = self.func2md(cls.__init__, clsname=clsname) else: init = "" except (ValueError, TypeError): init = "" variables = [] for name, obj in inspect.getmembers( cls, lambda a: not (inspect.isroutine(a) or inspect.ismethod(a)) ): if not name.startswith("_") and type(obj) == property: comments = _doc2md(obj) or inspect.getcomments(obj) comments = "\n\n%s" % comments if comments else "" property_name = f"{clsname}.{name}" if self.remove_package_prefix: property_name = name variables.append( _SEPARATOR + "\n%s <kbd>property</kbd> %s%s\n" % (subsection, property_name, comments) ) handlers = [] for name, obj in inspect.getmembers(cls, inspect.ismethoddescriptor): if ( not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname ): handler_name = f"{clsname}.{name}" if self.remove_package_prefix: handler_name = name handlers.append( _SEPARATOR + "\n%s <kbd>handler</kbd> %s\n" % (subsection, handler_name) ) methods = [] for name, obj in inspect.getmembers( cls, lambda a: inspect.ismethod(a) or inspect.isfunction(a) ): if ( not name.startswith("_") and hasattr(obj, "__module__") and name not in handlers and obj.__module__ == modname ): function_md = self.func2md(obj, clsname=clsname, depth=depth + 1) if function_md: methods.append(_SEPARATOR + function_md) markdown = _CLASS_TEMPLATE.format( section=section, header=header, doc=doc if doc else "", init=init, variables="".join(variables), handlers="".join(handlers), methods="".join(methods), ) if path: markdown = _SOURCE_BADGE_TEMPLATE.format(path=path) + markdown return markdown
MIT License
reservoirpy/reservoirpy
reservoirpy/_esn.py
ESN.train
python
def train(self, inputs: Data, teachers: Data, wash_nr_time_step: int = 0, workers: int = -1, seed: int = None, verbose: bool = False, backend=None, use_memmap=None, return_states: bool = False) -> Sequence[np.ndarray]: inputs, teachers = check_input_lists(inputs, self.dim_in, teachers, self.dim_out) self._dim_out = teachers[0].shape[1] self.model.initialize(self.N, self.dim_out) lengths = [i.shape[0] for i in inputs] steps = sum(lengths) if verbose: print(f"Training on {len(inputs)} inputs ({steps} steps) " f"-- wash: {wash_nr_time_step} steps") def train_fn(*, x, y, pbar): s = self._compute_states(x, y, seed=seed, pbar=pbar) self.model.partial_fit(s[wash_nr_time_step:], y) return s _, states = parallelize(self, train_fn, workers, lengths, return_states, pbar_text="Train", verbose=verbose, x=inputs, y=teachers) self.Wout = self.model.fit() return states
Train the ESN model on set of input sequences. Parameters ---------- inputs: list of numpy.ndarray List of inputs. Note that it should always be a list of sequences, i.e. if only one sequence (array with rows representing time axis) of inputs is used, it should be alone in a list. teachers: list of numpy.ndarray List of ground truths. Note that is should always be a list of sequences of the same length than the `inputs`, i.e. if only one sequence of inputs is used, it should be alone in a list. wash_nr_time_step: int Number of states to considered as transient when training. Transient states will be discarded when computing readout matrix. By default, no states are removes. workers: int, optional If n >= 1, will enable parallelization of states computation with n threads/processes, if possible. If n = -1, will use all available resources for parallelization. By default, -1. return_states: bool, False by default If `True`, the function will return all the internal states computed during the training. Be warned that this may be too heavy for the memory of your computer. backend: kept for compatibility with previous versions. use_memmap: kept for compatibility with previous versions. verbose: bool, optional Returns ------- list of numpy.ndarray All states computed, for all inputs. Note ---- If only one input sequence is provided ("continuous time" inputs), workers should be 1, because parallelization is impossible. In other cases, if using large NumPy arrays during computation (which is often the case), prefer using `threading` backend to avoid huge overhead. Multiprocess is a good idea only in very specific cases, and this code is not (yet) well suited for this.
https://github.com/reservoirpy/reservoirpy/blob/4321ea2a1317362ee4190188ab64d3b4ec6ad2ce/reservoirpy/_esn.py#L217-L301
import time import warnings from typing import Sequence, Callable, Tuple, Union from pathlib import Path from functools import partial import numpy as np from joblib import Parallel, delayed from tqdm import tqdm from numpy.random import default_rng, SeedSequence, Generator from .utils.parallel import ParallelProgressQueue, get_joblib_backend, parallelize from .utils.validation import _check_values, add_bias, check_input_lists from .utils.save import _save from .utils.types import Weights, Data, Activation from .regression_models import RidgeRegression, SklearnLinearModel from ._base import _ESNBase def _get_offline_model(ridge: float = 0.0, sklearn_model: Callable = None, dtype: np.dtype = np.float64): if ridge > 0.0 and sklearn_model is not None: raise ValueError("Parameters 'ridge' and 'sklearn_model' can not be " "defined at the same time.") elif sklearn_model is not None: return SklearnLinearModel(sklearn_model, dtype=dtype) else: return RidgeRegression(ridge, dtype=dtype) class ESN(_ESNBase): def __init__(self, lr: float, W: np.ndarray, Win: np.ndarray, input_bias: bool = True, reg_model: Callable = None, ridge: float = 0.0, Wfb: np.ndarray = None, fbfunc: Callable = lambda x: x, noise_in: float = 0.0, noise_rc: float = 0.0, noise_out: float = 0.0, activation: Activation = np.tanh, seed: int = None, typefloat: np.dtype = np.float64): super(ESN, self).__init__(lr, W, Win, input_bias=input_bias, activation=activation, Wfb=Wfb, fbfunc=fbfunc, Wout=None, noise_in=noise_in, noise_rc=noise_rc, noise_out=noise_out, seed=seed, typefloat=typefloat) self.model = _get_offline_model(ridge, reg_model, dtype=typefloat) @property def ridge(self): return getattr(self.model, "ridge", None) @ridge.setter def ridge(self, value): if hasattr(self.model, "ridge"): self.model.ridge = value def fit_readout(self, states: Data, teachers: Data, reg_model: Callable = None, ridge: float = None, force_pinv: bool = False, verbose: bool = False) -> np.ndarray: states, teachers = check_input_lists(states, self.N, teachers, self.dim_out) if (ridge is not None) or (reg_model is not None): offline_model = _get_offline_model(ridge, reg_model, dtype=self.typefloat) elif force_pinv: offline_model = _get_offline_model(ridge=0.0) else: offline_model = self.model _check_values(array_or_list=states, value=None) if verbose: tic = time.time() print("Linear regression...") self.Wout = offline_model.fit(X=states, Y=teachers) if verbose: toc = time.time() print(f"Linear regression done! (in {toc - tic} sec)") return self.Wout
MIT License
amanchadha/iperceive
iPerceiveVideoQA/preprocessing.py
clean_str
python
def clean_str(string): string = re.sub(r"[^A-Za-z0-9(),!?:.\'`]", " ", string) string = re.sub(r"\'s", " \'s", string) string = re.sub(r"\'ve", " \'ve", string) string = re.sub(r"n\'t", " n\'t", string) string = re.sub(r"\'re", " \'re", string) string = re.sub(r"\'d", " \'d", string) string = re.sub(r"\'ll", " \'ll", string) string = re.sub(r"\'m", " \'m", string) string = re.sub(r":", " : ", string) string = re.sub(r",", " , ", string) string = re.sub(r"!", " ! ", string) string = re.sub(r"\.\.\.", " . ", string) string = re.sub(r"\.", " . ", string) string = re.sub(r"\(", " ", string) string = re.sub(r"\)", " ", string) string = re.sub(r"\?", " ? ", string) string = re.sub(r"\s{2,}", " ", string) return string.strip()
Tokenization/string cleaning for strings. Taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
https://github.com/amanchadha/iperceive/blob/7e85bbddfa926b36b7bad97bf459d83d12d72de5/iPerceiveVideoQA/preprocessing.py#L55-L76
__author__ = "Jie Lei" import os import sys import re import math import json import glob import copy import pysrt import numpy as np from tqdm import tqdm from utils import read_json_lines, load_json, save_json def merge_list_dicts(list_dicts): z = list_dicts[0].copy() for i in range(1, len(list_dicts)): z.update(list_dicts[i]) return z def get_vidname2cnt_per_show(base_path): subdirs = [name for name in os.listdir(base_path) if os.path.isdir(os.path.join(base_path, name))] vidname2cnt = {} for ele in tqdm(subdirs): cur_subdir_path = os.path.join(base_path, ele) cur_files = glob.glob(os.path.join(cur_subdir_path, "*jpg")) vidname2cnt[ele] = len(cur_files) return vidname2cnt def get_vidname2cnt_all(frame_root_path, vidname2cnt_cache_path): if os.path.exists(vidname2cnt_cache_path): print("Found frame cnt cache, loading ...") return load_json(vidname2cnt_cache_path) show_names = ["bbt", "friends", "grey", "met", "castle", "house"] vidname2cnt_list = [] for sn in show_names: print("Count frames in %s" % sn) cur_base_path = os.path.join(frame_root_path, "%s_frames" % sn) vidname2cnt_list.append(get_vidname2cnt_per_show(cur_base_path)) vidname2cnt = merge_list_dicts(vidname2cnt_list) save_json(vidname2cnt, vidname2cnt_cache_path) return
MIT License
napalm-automation/napalm-yang
napalm_yang/models/openconfig/interfaces/interface/ethernet/state/counters/__init__.py
counters._set_in_fragment_frames
python
def _set_in_fragment_frames(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-fragment-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """in_fragment_frames must be of a type compatible with yang:counter64""", "defined-type": "yang:counter64", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-fragment-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ethernet', defining_module='openconfig-if-ethernet', yang_type='yang:counter64', is_config=False)""", } ) self.__in_fragment_frames = t if hasattr(self, "_set"): self._set()
Setter method for in_fragment_frames, mapped from YANG variable /interfaces/interface/ethernet/state/counters/in_fragment_frames (yang:counter64) If this variable is read-only (config: false) in the source YANG file, then _set_in_fragment_frames is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_in_fragment_frames() directly. YANG Description: Number of fragment frames received on the interface.
https://github.com/napalm-automation/napalm-yang/blob/9148e015b086ebe311c07deb92e168ea36fd7771/napalm_yang/models/openconfig/interfaces/interface/ethernet/state/counters/__init__.py#L556-L598
from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class counters(PybindBase): __slots__ = ( "_path_helper", "_extmethods", "__in_mac_control_frames", "__in_mac_pause_frames", "__in_oversize_frames", "__in_jabber_frames", "__in_fragment_frames", "__in_8021q_frames", "__in_crc_errors", "__out_mac_control_frames", "__out_mac_pause_frames", "__out_8021q_frames", ) _yang_name = "counters" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__in_mac_control_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-mac-control-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) self.__in_mac_pause_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-mac-pause-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) self.__in_oversize_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-oversize-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) self.__in_jabber_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-jabber-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) self.__in_fragment_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-fragment-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) self.__in_8021q_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-8021q-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) self.__in_crc_errors = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-crc-errors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) self.__out_mac_control_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="out-mac-control-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) self.__out_mac_pause_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="out-mac-pause-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) self.__out_8021q_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="out-8021q-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return ["interfaces", "interface", "ethernet", "state", "counters"] def _get_in_mac_control_frames(self): return self.__in_mac_control_frames def _set_in_mac_control_frames(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-mac-control-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """in_mac_control_frames must be of a type compatible with yang:counter64""", "defined-type": "yang:counter64", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-mac-control-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ethernet', defining_module='openconfig-if-ethernet', yang_type='yang:counter64', is_config=False)""", } ) self.__in_mac_control_frames = t if hasattr(self, "_set"): self._set() def _unset_in_mac_control_frames(self): self.__in_mac_control_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-mac-control-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) def _get_in_mac_pause_frames(self): return self.__in_mac_pause_frames def _set_in_mac_pause_frames(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-mac-pause-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """in_mac_pause_frames must be of a type compatible with yang:counter64""", "defined-type": "yang:counter64", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-mac-pause-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ethernet', defining_module='openconfig-if-ethernet', yang_type='yang:counter64', is_config=False)""", } ) self.__in_mac_pause_frames = t if hasattr(self, "_set"): self._set() def _unset_in_mac_pause_frames(self): self.__in_mac_pause_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-mac-pause-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) def _get_in_oversize_frames(self): return self.__in_oversize_frames def _set_in_oversize_frames(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-oversize-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """in_oversize_frames must be of a type compatible with yang:counter64""", "defined-type": "yang:counter64", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-oversize-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ethernet', defining_module='openconfig-if-ethernet', yang_type='yang:counter64', is_config=False)""", } ) self.__in_oversize_frames = t if hasattr(self, "_set"): self._set() def _unset_in_oversize_frames(self): self.__in_oversize_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-oversize-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) def _get_in_jabber_frames(self): return self.__in_jabber_frames def _set_in_jabber_frames(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-jabber-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """in_jabber_frames must be of a type compatible with yang:counter64""", "defined-type": "yang:counter64", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="in-jabber-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ethernet', defining_module='openconfig-if-ethernet', yang_type='yang:counter64', is_config=False)""", } ) self.__in_jabber_frames = t if hasattr(self, "_set"): self._set() def _unset_in_jabber_frames(self): self.__in_jabber_frames = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..18446744073709551615"]}, int_size=64, ), is_leaf=True, yang_name="in-jabber-frames", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ethernet", defining_module="openconfig-if-ethernet", yang_type="yang:counter64", is_config=False, ) def _get_in_fragment_frames(self): return self.__in_fragment_frames
Apache License 2.0
pitthsls/pycounter
pycounter/test/conftest.py
jr1_report_xlsx
python
def jr1_report_xlsx(request): return parsedata(request.param)
Excel formatted JR1 reports.
https://github.com/pitthsls/pycounter/blob/35772d31a4eb12378e3a9821125acce53fee2ba2/pycounter/test/conftest.py#L47-L49
import os import pytest from pycounter import csvhelper from pycounter import report import pycounter.sushi def parsedata(filename): return report.parse(os.path.join(os.path.dirname(__file__), "data", filename)) @pytest.fixture(params=["csvC4JR1", "C4JR1.csv", "C4JR1_bad.csv", "C4JR1GOA.csv"]) def csv_jr1_report(request): return parsedata(request.param) @pytest.fixture(params=["simpleJR1.tsv", "tsvC4JR1", "issue_14.tsv"]) def tsv_jr1(request): return parsedata(request.param) @pytest.fixture(params=["csvC4JR1", "C4JR1.csv", "C4JR1_bad.csv"]) def csv_jr1_report_std(request): return parsedata(request.param) @pytest.fixture(params=["csvC4JR1", "C4JR1.csv"]) def csv_jr1_report_common_data(request): return parsedata(request.param) @pytest.fixture(params=["csvC4JR1", "C4JR1.csv", "C4JR1_bad.csv"]) def csv_jr1_r4_report(request): return parsedata(request.param) @pytest.fixture(params=["JR1.xlsx", "JR1_bad.xlsx", "xlsxJR1"])
MIT License
openpathsampling/openpathsampling
openpathsampling/treelogic.py
TreeMixin.__len__
python
def __len__(self): if self._len is None: self._len = len(list(iter(self))) return self._len
Returns the total number of Changes mad in a move. Returns ------- int the number of (Sub)MoveChanges in this MoveChange
https://github.com/openpathsampling/openpathsampling/blob/dcee878247bdf627aae96ccbc65d2857d5d155b9/openpathsampling/treelogic.py#L93-L106
__author__ = 'Jan-Hendrik Prinz' class TreeMixin(object): @staticmethod def _indent(s): spl = s.split('\n') spl = [' | ' + p if p[0] == ' ' else ' +- ' + p for p in spl] return '\n'.join(spl) @property def _subnodes(self): return [] def __iter__(self): yield self for subchange in self._subnodes: for change in subchange: yield change def __getitem__(self, item): if type(item) is int: return self._subnodes[item] if type(item) is list: if self._default_match(item[0], self): if len(item) > 1: for ch in self._subnodes: r = ch[item[1]] if r is not None: return r return None else: return self else: return None def __reversed__(self): for subchange in self._subnodes: for change in reversed(subchange): yield change yield self
MIT License
sean-public/python-hashes
hashes/simhash.py
simhash.create_hash
python
def create_hash(self, tokens): if type(tokens) == str: tokens = tokens.split() v = [0]*self.hashbits for t in [self._string_hash(x) for x in tokens]: bitmask = 0 for i in range(self.hashbits): bitmask = 1 << i if t & bitmask: v[i] += 1 else: v[i] -= 1 fingerprint = 0 for i in range(self.hashbits): if v[i] >= 0: fingerprint += 1 << i self.hash = fingerprint return fingerprint
Calculates a Charikar simhash with appropriate bitlength. Input can be any iterable, but for strings it will automatically break it into words first, assuming you don't want to iterate over the individual characters. Returns fingerprint so it can be used for temporary use without initializing a new object. Reference used: http://dsrg.mff.cuni.cz/~holub/sw/shash
https://github.com/sean-public/python-hashes/blob/65bf88df1cc8cc72d09b7b358bdb36d6844f5f27/hashes/simhash.py#L14-L40
from .hashtype import hashtype class simhash(hashtype):
MIT License
maxim5/hyper-engine
hyperengine/model/data_set.py
IterableDataProvider.next_batch
python
def next_batch(self, batch_size): raise NotImplementedError
Returns the next `batch_size` examples from this data set.
https://github.com/maxim5/hyper-engine/blob/743344afa52032d7b2b4a2cfb25caf2190c1a8ac/hyperengine/model/data_set.py#L73-L77
__author__ = 'maxim' import numpy as np class DataProvider(object): pass class IterableDataProvider(DataProvider): def __init__(self): super(IterableDataProvider, self).__init__() self._size = 0 self._step = 0 self._epochs_completed = 0 self._index_in_epoch = 0 self._just_completed = False @property def size(self): return self._size @property def step(self): return self._step @property def index(self): return self._epochs_completed * self._size + self._index_in_epoch @property def index_in_epoch(self): return self._index_in_epoch @property def epochs_completed(self): return self._epochs_completed @property def just_completed(self): return self._just_completed def reset_counters(self): self._step = 0 self._epochs_completed = 0 self._index_in_epoch = 0 self._just_completed = False
Apache License 2.0
opendatacube/odc-tools
apps/dc_tools/odc/apps/dc_tools/export_md.py
get_properties
python
def get_properties(dataset, property_offsets=None): props = dict() props["datetime"] = dataset.center_time props["odc:processing_datetime"] = dataset.indexed_time return {"properties": props}
Extract properties and return values in a dictionary: { 'properties': { 'datetime': time, 'odc:creation_datetime': creation_time, ... } }
https://github.com/opendatacube/odc-tools/blob/23bf317e2798e793f228532ea6e31865fc533fde/apps/dc_tools/odc/apps/dc_tools/export_md.py#L291-L307
from pathlib import Path import yaml import click import logging from datacube.testutils.io import native_geobox from datacube.storage import BandInfo from datacube import Datacube _LOG = logging.getLogger(__name__) @click.group(help=__doc__) @click.option( "--datacube-config", "-c", help="Pass the configuration file to access the database", type=click.Path(exists=True), ) @click.pass_context def cli(ctx, datacube_config): ctx.obj = Datacube(config=datacube_config).index @cli.command() @click.option("--product", required=True, help="Which product?") @click.option( "--config", required=True, help="The configuration file for transformation" ) @click.option( "--output-dir", required=True, help="New metadata yaml file is written to this dir" ) @click.option("--limit", help="maximum number of datasets to process") @click.pass_obj def transform(index, product, config, output_dir, limit): dataset_type = index.products.get_by_name(product) with open(config, "r") as config_file: cfg = yaml.load(config_file) or dict() if dataset_type.grid_spec is not None: transform_ingested_datasets(index, product, cfg, Path(output_dir), limit) else: transform_indexed_datasets(index, product, cfg, Path(output_dir), limit) def transform_ingested_datasets(index, product, config, output_dir, limit): dataset_ids = index.datasets.search_returning( limit=limit, field_names=("id",), product=product ) grids_done = False for dataset_id in dataset_ids: dataset = index.datasets.get(dataset_id.id, include_sources=True) if not dataset.uris: _LOG.warn("Empty uris or No uris (skippins): %s", dataset_id) continue if not grids_done: grids = get_grids(dataset, config.get("grids")) grids_done = all( [[1, 1] != grid["shape"] for _, grid in grids["grids"].items()] ) dataset_sections = (grids,) + _variable_sections_of_metadata(dataset, config) _make_and_write_dataset(get_output_file(dataset, output_dir), *dataset_sections) def transform_indexed_datasets(index, product, config, output_dir, limit): for dataset_id in index.datasets.search_returning( limit=limit, field_names=("id",), product=product ): dataset = index.datasets.get(dataset_id.id, include_sources=True) if not dataset.uris: _LOG.warn("Empty or No uris (skipping): %s", dataset_id) continue grids = get_grids(dataset, config.get("grids")) dataset_sections = (grids,) + _variable_sections_of_metadata(dataset, config) _make_and_write_dataset(get_output_file(dataset, output_dir), *dataset_sections) def get_output_file(dataset, output_dir): out_file_name = str(dataset.id) + "-metadata.yaml" return output_dir / out_file_name def _variable_sections_of_metadata(dataset, config): new_dataset = { "id": str(dataset.id), "crs": "EPSG:" + str(dataset.crs.epsg), "location": [uri for uri in dataset.uris], "file_format": dataset.metadata_doc.get("format", ""), } return ( new_dataset, get_geometry(dataset), get_measurements(dataset, config.get("grids")), get_properties(dataset), get_lineage(dataset), ) def _make_and_write_dataset(out_file_name, *args): dataset = dict() for arg in args: dataset.update(arg) with open(out_file_name, "w") as out_file: yaml.dump(dataset, out_file, default_flow_style=False) def get_geometry(dataset): valid_data = dataset._gs.get("valid_data") return {"geometry": valid_data} if valid_data else dict() def get_grids(dataset, band_grids=None): if not band_grids: shape, trans = get_shape_and_transform(dataset, dataset.measurements) return {"grids": {"default": {"shape": list(shape), "transform": list(trans)}}} else: grids = dict() for grid_name in band_grids: shape, trans = get_shape_and_transform(dataset, band_grids[grid_name]) grids[grid_name] = {"shape": list(shape), "transform": list(trans)} if not band_grids.get("default"): specified_bands = set() for grid in band_grids: specified_bands.update(band_grids[grid]) all_bands = set(list(dataset.measurements)) default_bands = all_bands - specified_bands if bool(default_bands): shape, trans = get_shape_and_transform(dataset, default_bands) grids["default"] = {"shape": list(shape), "transform": list(trans)} return {"grids": grids} def get_shape_and_transform(dataset, measurements): for m in measurements: try: geo = native_geobox(dataset, [m]) except Exception: _LOG.warn("Failed to compute shape and transform %s", m) continue return geo.shape, geo.transform _LOG.warn("All measurements failed to compute shape and transform %s", measurements) return [1, 1], dataset.transform def get_measurements(dataset, band_grids=None): grids_map = ( {m: grid for grid in band_grids for m in band_grids[grid] if grid != "default"} if band_grids else dict() ) measurements = dict() for m in dataset.measurements: if m not in dataset.type.measurements: _LOG.warn("Measurement not in product definition (skipping): %s", m) continue band_info = BandInfo(dataset, m) measurements[m] = {"path": dataset.measurements[m]["path"]} if band_info.band and band_info.band != 1: measurements[m]["band"] = band_info.band if band_info.layer is not None: measurements[m]["layer"] = band_info.layer if grids_map.get(m): measurements[m]["grid"] = grids_map[m] return {"measurements": measurements}
Apache License 2.0
mvantellingen/django-healthchecks
src/django_healthchecks/models.py
HeartbeatMonitorQuerySet.status_by_name
python
def status_by_name(self): t = now() monitors = self.annotate_expires_at().values_list("name", "expires_at") return {name: (expires_at < t) for name, expires_at in monitors}
Return the expired status for every heartbeat.
https://github.com/mvantellingen/django-healthchecks/blob/69a38929737610ba488df1bc496952b81ac9b43b/src/django_healthchecks/models.py#L40-L47
from datetime import timedelta from django.conf import settings from django.db import models from django.db.models import ExpressionWrapper, F from django.utils.timezone import now from django.utils.translation import gettext_lazy as _ EXPIRES_COLUMN_TYPE = models.DateTimeField() IS_EXPIRED_COLUMN_TYPE = models.BooleanField() def _get_default_timeout(): return getattr( settings, "HEALTHCHECKS_DEFAULT_HEARTBEAT_TIMEOUT", timedelta(days=1) ) class HeartbeatMonitorQuerySet(models.QuerySet): def enabled(self): return self.filter(enabled=True) def annotate_expires_at(self): return self.annotate( expires_at=ExpressionWrapper( (F("last_beat") + F("timeout")), output_field=EXPIRES_COLUMN_TYPE ) ) def expired(self): return self.annotate_expires_at().filter(expires_at__lt=now()) def expired_names(self): return list(self.expired().values_list("name", flat=True))
MIT License
sberbank-ai-lab/lightautoml
lightautoml/text/dl_transformers.py
BertEmbedder.freeze
python
def freeze(self): for param in self.transformer.parameters(): param.requires_grad = False
Freeze module parameters.
https://github.com/sberbank-ai-lab/lightautoml/blob/51a4e2bd0ebffbe0817fb50434280f8e7c40fa4c/lightautoml/text/dl_transformers.py#L437-L441
import gc from copy import deepcopy from typing import Any from typing import Dict from typing import Optional from typing import Sequence import numpy as np import torch import torch.nn as nn from sklearn.base import TransformerMixin from torch.utils.data import DataLoader from tqdm import tqdm try: from transformers import AutoModel except: import warnings warnings.warn("'transformers' - package isn't installed") from .dp_utils import CustomDataParallel from .sentence_pooling import SequenceAvgPooler from .sentence_pooling import SequenceClsPooler from .sentence_pooling import SequenceIndentityPooler from .sentence_pooling import SequenceMaxPooler from .sentence_pooling import SequenceSumPooler from .utils import _dtypes_mapping from .utils import collate_dict from .utils import parse_devices from .utils import seed_everything from .utils import single_text_hash pooling_by_name = { "mean": SequenceAvgPooler, "sum": SequenceSumPooler, "max": SequenceMaxPooler, "cls": SequenceClsPooler, "none": SequenceIndentityPooler, } class DLTransformer(TransformerMixin): _model_params = { "embed_size": 300, "hidden_size": 256, "pooling": "mean", "num_layers": 1, } _loader_params = {"batch_size": 1024, "shuffle": False, "num_workers": 4} _dataset_params = {"embedding_model": None, "max_length": 200, "embed_size": 300} _embedding_model_params = {"model_name": "bert-base-cased"} def _infer_params(self): self.model_params = deepcopy(self._model_params) self.loader_params = deepcopy(self._loader_params) self.dataset_params = deepcopy(self._dataset_params) self.embedding_model_params = deepcopy(self._embedding_model_params) def __init__( self, model, model_params: Dict, dataset, dataset_params: Dict, loader_params: Dict, device: str = "cuda", random_state: int = 42, embedding_model: Optional = None, embedding_model_params: Dict[str, Dict] = None, multigpu: bool = False, verbose: bool = False, ): super(DLTransformer, self).__init__() self._infer_params() self.device, self.device_ids = parse_devices(device, multigpu) self.random_state = random_state self.verbose = verbose seed_everything(random_state) self.model_params.update(model_params) self.model = model(**self.model_params) self.embedding_model = None if embedding_model is not None: if embedding_model_params is not None: self.embedding_model_params.update(embedding_model_params) self.embedding_model = embedding_model(**self.embedding_model_params) self.dataset = dataset self.dataset_params.update(dataset_params) self.loader_params.update(loader_params) def get_name(self) -> str: if self.embedding_model is None: name = self.model.get_name() else: name = self.model.get_name() + "_" + self.embedding_model.get_name() return name def get_out_shape(self) -> int: return self.model.get_out_shape() def fit(self, data: Any): return self @torch.no_grad() def transform(self, data: Sequence[str]) -> np.ndarray: dataset = self.dataset(data, **self.dataset_params) loader = DataLoader(dataset, collate_fn=collate_dict, **self.loader_params) result = [] if self.verbose: loader = tqdm(loader) self.model = self.model.to(self.device) if self.device_ids is not None: self.model = CustomDataParallel(self.model, device_ids=self.device_ids) self.model.eval() if self.embedding_model is not None: self.embedding_model.to(self.device) if self.device_ids is not None: self.embedding_model = CustomDataParallel(self.embedding_model, device_ids=self.device_ids) self.embedding_model.eval() for sample in loader: data = { i: sample[i].long().to(self.device) if _dtypes_mapping[i] == "long" else sample[i].to(self.device) for i in sample.keys() } if self.embedding_model is not None: embed = self.embedding_model(data) if "attention_mask" in data: length = torch.sum(data["attention_mask"], dim=1) else: length = (torch.ones(len(embed)) * self.dataset_params["max_length"]).to(self.device).long() data = {"text": embed, "length": length} embed = self.model(data).detach().cpu().numpy() result.append(embed.astype(np.float32)) result = np.vstack(result) self.model.to(torch.device("cpu")) if isinstance(self.model, CustomDataParallel): self.model = self.model.module if self.embedding_model is not None: self.embedding_model.to(torch.device("cpu")) if isinstance(self.embedding_model, CustomDataParallel): self.embedding_model = self.embedding_model.module del loader, dataset, data gc.collect() torch.cuda.empty_cache() return result def position_encoding_init(n_pos: int, embed_size: int) -> torch.Tensor: position_enc = np.array( [ [pos / np.power(10000, 2 * (j // 2) / embed_size) for j in range(embed_size)] if pos != 0 else np.zeros(embed_size) for pos in range(n_pos) ] ) position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) return torch.from_numpy(position_enc).float() class BOREP(nn.Module): name = "BOREP" _poolers = {"max", "mean", "sum"} def __init__( self, embed_size: int = 300, proj_size: int = 300, pooling: str = "mean", max_length: int = 200, init: str = "orthogonal", pos_encoding: bool = False, **kwargs: Any ): super(BOREP, self).__init__() self.embed_size = embed_size self.proj_size = proj_size self.pos_encoding = pos_encoding seed_everything(42) if self.pos_encoding: self.pos_code = position_encoding_init(max_length, self.embed_size).view(1, max_length, self.embed_size) self.pooling = pooling_by_name[pooling]() self.proj = nn.Linear(self.embed_size, self.proj_size, bias=False) if init == "orthogonal": nn.init.orthogonal_(self.proj.weight) elif init == "normal": nn.init.normal_(self.proj.weight, std=0.1) elif init == "uniform": nn.init.uniform_(self.proj.weight, a=-0.1, b=0.1) elif init == "kaiming": nn.init.kaiming_uniform_(self.proj.weight) elif init == "xavier": nn.init.xavier_uniform_(self.proj.weight) def get_out_shape(self) -> int: return self.proj_size def get_name(self) -> str: return self.name @torch.no_grad() def forward(self, inp: Dict[str, torch.Tensor]) -> torch.Tensor: x = inp["text"] batch_size, batch_max_length = x.shape[0], x.shape[1] if self.pos_encoding: x = x + self.pos_code[:, :batch_max_length, :].to(x.device) x = x.contiguous().view(batch_size * batch_max_length, -1) x = self.proj(x) out = x.contiguous().view(batch_size, batch_max_length, -1) x_length = (torch.arange(out.shape[1])[None, :].to(out.device) < inp["length"][:, None])[:, :, None] out = self.pooling(out, x_length) return out class RandomLSTM(nn.Module): name = "RandomLSTM" _poolers = ("max", "mean", "sum") def __init__( self, embed_size: int = 300, hidden_size: int = 256, pooling: str = "mean", num_layers: int = 1, **kwargs: Any ): super(RandomLSTM, self).__init__() if pooling not in self._poolers: raise ValueError("pooling - {} - not in the list of available types {}".format(pooling, self._poolers)) seed_everything(42) self.hidden_size = hidden_size self.lstm = nn.LSTM( embed_size, hidden_size, num_layers=num_layers, bidirectional=True, batch_first=True, ) self.pooling = pooling_by_name[pooling]() def get_out_shape(self) -> int: return self.hidden_size * 2 def get_name(self) -> str: return self.name @torch.no_grad() def forward(self, inp: Dict[str, torch.Tensor]) -> torch.Tensor: out, _ = self.lstm(inp["text"]) x_length = (torch.arange(out.shape[1])[None, :].to(out.device) < inp["length"][:, None])[:, :, None] out = self.pooling(out, x_length) return out class BertEmbedder(nn.Module): name = "BertEmb" _poolers = {"cls", "max", "mean", "sum", "none"} def __init__(self, model_name: str, pooling: str = "none", **kwargs: Any): super(BertEmbedder, self).__init__() if pooling not in self._poolers: raise ValueError("pooling - {} - not in the list of available types {}".format(pooling, self._poolers)) self.pooling = pooling_by_name[pooling]() self.model_name = model_name self.transformer = AutoModel.from_pretrained(model_name) def forward(self, inp: Dict[str, torch.Tensor]) -> torch.Tensor: encoded_layers, _ = self.transformer( input_ids=inp["input_ids"], attention_mask=inp["attention_mask"], token_type_ids=inp.get("token_type_ids"), return_dict=False, ) encoded_layers = self.pooling(encoded_layers, inp["attention_mask"].unsqueeze(-1).bool()) return encoded_layers
Apache License 2.0
akb89/pyfn
pyfn/marshalling/marshallers/semeval.py
marshall_annosets
python
def marshall_annosets(annosets, output_filepath, excluded_frames, excluded_sentences, excluded_annosets): logger.info('Marshalling pyfn.AnnotationSet objects to SEMEVAL XML...') if not annosets: raise InvalidParameterError('Input pyfn.AnnotationSet list is empty') logger.info('Saving output to {}'.format(output_filepath)) _marshall_annosets(annosets, output_filepath, excluded_frames, excluded_sentences, excluded_annosets)
Marshall a list of pyfn.AnnotationSet objects to SEMEVAL XML. annosets: a list of annosets to marshall. output_filepath: the absolute path to the output .xml file excluded_frames: a list of frame #id to exclude from the output excluded_sentences: a list of sentence #id to exclude from the output excluded_annosets: a list of annotationset #id to exclude from the output
https://github.com/akb89/pyfn/blob/848091d55aaa9dbb1eb939cd6c551876f9f4d6f7/pyfn/marshalling/marshallers/semeval.py#L113-L128
import datetime import logging import pytz import lxml.etree as etree import pyfn.utils.filter as f_utils from pyfn.exceptions.parameter import InvalidParameterError __all__ = ['marshall_annosets'] logger = logging.getLogger(__name__) def _add_fe_labels(layers_tag, layer_id, annoset, label_id): fe_layer = etree.SubElement(layers_tag, 'layer') fe_layer.set('ID', str(layer_id)) fe_layer.set('name', 'FE') fe_label_tags = etree.SubElement(fe_layer, 'labels') if 'FE' in annoset.labelstore.labels_by_layer_name: for fe_label in annoset.labelstore.labels_by_layer_name['FE']: if fe_label.start != -1 and fe_label.end != -1: fe_label_tag = etree.SubElement(fe_label_tags, 'label') fe_label_tag.set('ID', str(label_id)) label_id += 1 fe_label_tag.set('name', fe_label.name) fe_label_tag.set('start', str(fe_label.start)) fe_label_tag.set('end', str(fe_label.end)) return label_id def _has_fe_labels(annoset): return 'FE' in annoset.labelstore.labels_by_layer_name def _add_target_labels(layers_tag, layer_id, annoset, label_id): target_layer = etree.SubElement(layers_tag, 'layer') target_layer.set('ID', str(layer_id)) target_layer.set('name', 'Target') target_labels = etree.SubElement(target_layer, 'labels') for target_index in annoset.target.indexes: target_label = etree.SubElement(target_labels, 'label') target_label.set('ID', str(label_id)) target_label.set('name', 'Target') label_id += 1 target_label.set('start', str(target_index[0])) target_label.set('end', str(target_index[1])) return label_id def _get_annoset_tag(annosets_tag, annoset, annoset_id): annoset_tag = etree.SubElement(annosets_tag, 'annotationSet') annoset_tag.set('ID', str(annoset_id)) annoset_tag.set('frameName', annoset.target.lexunit.frame.name) return annoset_tag def _get_sentence_tag(annoset, sentences_tag, sent_id): sentence_tag = etree.SubElement(sentences_tag, 'sentence') sentence_tag.set('ID', str(sent_id)) text = etree.SubElement(sentence_tag, 'text') text.text = annoset.sentence.text return sentence_tag def _marshall_annosets(annosets, output_filepath, excluded_frames, excluded_sentences, excluded_annosets): if not annosets: raise InvalidParameterError('No input annosets to marshall. Check ' 'input parameters and try again.') root = etree.Element('corpus') root.set('XMLCreated', datetime.datetime.now( pytz.utc).strftime('%a %b %d %H:%M:%S %Z %Y')) documents_tag = etree.SubElement(root, 'documents') document_tag = etree.SubElement(documents_tag, 'document') paragraphs_tag = etree.SubElement(document_tag, 'paragraphs') paragraph_tag = etree.SubElement(paragraphs_tag, 'paragraph') sentences_tag = etree.SubElement(paragraph_tag, 'sentences') sent_text = '' sent_id = 0 annoset_id = 1 layer_id = 1 label_id = 1 for annoset in f_utils.filter_and_sort_annosets(annosets, [], excluded_frames, excluded_sentences, excluded_annosets): if annoset.sentence.text != sent_text: sentence = _get_sentence_tag(annoset, sentences_tag, sent_id) sent_id += 1 sent_text = annoset.sentence.text annosets_tag = etree.SubElement(sentence, 'annotationSets') annoset_tag = _get_annoset_tag(annosets_tag, annoset, annoset_id) annoset_id += 1 layers_tag = etree.SubElement(annoset_tag, 'layers') label_id = _add_target_labels(layers_tag, layer_id, annoset, label_id) layer_id += 1 if _has_fe_labels(annoset): label_id = _add_fe_labels(layers_tag, layer_id, annoset, label_id) layer_id += 1 tree = etree.ElementTree(root) tree.write(output_filepath, encoding='UTF-8', xml_declaration=True, pretty_print=True)
MIT License
aspose-words-cloud/aspose-words-cloud-python
asposewordscloud/models/fixed_page_save_options_data.py
FixedPageSaveOptionsData.dml_effects_rendering_mode
python
def dml_effects_rendering_mode(self): return self._dml_effects_rendering_mode
Gets the dml_effects_rendering_mode of this FixedPageSaveOptionsData. # noqa: E501 Gets or sets the value determining how DrawingML effects are rendered. { Simplified | None | Fine }. # noqa: E501 :return: The dml_effects_rendering_mode of this FixedPageSaveOptionsData. # noqa: E501 :rtype: str
https://github.com/aspose-words-cloud/aspose-words-cloud-python/blob/abf8fccfed40aa2b09c6cdcaf3f2723e1f412d85/asposewordscloud/models/fixed_page_save_options_data.py#L242-L250
import pprint import re import datetime import six import json class FixedPageSaveOptionsData(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'allow_embedding_post_script_fonts': 'bool', 'custom_time_zone_info_data': 'TimeZoneInfoData', 'dml3_d_effects_rendering_mode': 'str', 'dml_effects_rendering_mode': 'str', 'dml_rendering_mode': 'str', 'file_name': 'str', 'flat_opc_xml_mapping_only': 'bool', 'iml_rendering_mode': 'str', 'save_format': 'str', 'update_created_time_property': 'bool', 'update_fields': 'bool', 'update_last_printed_property': 'bool', 'update_last_saved_time_property': 'bool', 'update_sdt_content': 'bool', 'zip_output': 'bool', 'color_mode': 'str', 'jpeg_quality': 'int', 'metafile_rendering_options': 'MetafileRenderingOptionsData', 'numeral_format': 'str', 'optimize_output': 'bool', 'page_count': 'int', 'page_index': 'int' } attribute_map = { 'allow_embedding_post_script_fonts': 'AllowEmbeddingPostScriptFonts', 'custom_time_zone_info_data': 'CustomTimeZoneInfoData', 'dml3_d_effects_rendering_mode': 'Dml3DEffectsRenderingMode', 'dml_effects_rendering_mode': 'DmlEffectsRenderingMode', 'dml_rendering_mode': 'DmlRenderingMode', 'file_name': 'FileName', 'flat_opc_xml_mapping_only': 'FlatOpcXmlMappingOnly', 'iml_rendering_mode': 'ImlRenderingMode', 'save_format': 'SaveFormat', 'update_created_time_property': 'UpdateCreatedTimeProperty', 'update_fields': 'UpdateFields', 'update_last_printed_property': 'UpdateLastPrintedProperty', 'update_last_saved_time_property': 'UpdateLastSavedTimeProperty', 'update_sdt_content': 'UpdateSdtContent', 'zip_output': 'ZipOutput', 'color_mode': 'ColorMode', 'jpeg_quality': 'JpegQuality', 'metafile_rendering_options': 'MetafileRenderingOptions', 'numeral_format': 'NumeralFormat', 'optimize_output': 'OptimizeOutput', 'page_count': 'PageCount', 'page_index': 'PageIndex' } def __init__(self, allow_embedding_post_script_fonts=None, custom_time_zone_info_data=None, dml3_d_effects_rendering_mode=None, dml_effects_rendering_mode=None, dml_rendering_mode=None, file_name=None, flat_opc_xml_mapping_only=None, iml_rendering_mode=None, save_format=None, update_created_time_property=None, update_fields=None, update_last_printed_property=None, update_last_saved_time_property=None, update_sdt_content=None, zip_output=None, color_mode=None, jpeg_quality=None, metafile_rendering_options=None, numeral_format=None, optimize_output=None, page_count=None, page_index=None): self._allow_embedding_post_script_fonts = None self._custom_time_zone_info_data = None self._dml3_d_effects_rendering_mode = None self._dml_effects_rendering_mode = None self._dml_rendering_mode = None self._file_name = None self._flat_opc_xml_mapping_only = None self._iml_rendering_mode = None self._save_format = None self._update_created_time_property = None self._update_fields = None self._update_last_printed_property = None self._update_last_saved_time_property = None self._update_sdt_content = None self._zip_output = None self._color_mode = None self._jpeg_quality = None self._metafile_rendering_options = None self._numeral_format = None self._optimize_output = None self._page_count = None self._page_index = None self.discriminator = None if allow_embedding_post_script_fonts is not None: self.allow_embedding_post_script_fonts = allow_embedding_post_script_fonts if custom_time_zone_info_data is not None: self.custom_time_zone_info_data = custom_time_zone_info_data if dml3_d_effects_rendering_mode is not None: self.dml3_d_effects_rendering_mode = dml3_d_effects_rendering_mode if dml_effects_rendering_mode is not None: self.dml_effects_rendering_mode = dml_effects_rendering_mode if dml_rendering_mode is not None: self.dml_rendering_mode = dml_rendering_mode if file_name is not None: self.file_name = file_name if flat_opc_xml_mapping_only is not None: self.flat_opc_xml_mapping_only = flat_opc_xml_mapping_only if iml_rendering_mode is not None: self.iml_rendering_mode = iml_rendering_mode if save_format is not None: self.save_format = save_format if update_created_time_property is not None: self.update_created_time_property = update_created_time_property if update_fields is not None: self.update_fields = update_fields if update_last_printed_property is not None: self.update_last_printed_property = update_last_printed_property if update_last_saved_time_property is not None: self.update_last_saved_time_property = update_last_saved_time_property if update_sdt_content is not None: self.update_sdt_content = update_sdt_content if zip_output is not None: self.zip_output = zip_output if color_mode is not None: self.color_mode = color_mode if jpeg_quality is not None: self.jpeg_quality = jpeg_quality if metafile_rendering_options is not None: self.metafile_rendering_options = metafile_rendering_options if numeral_format is not None: self.numeral_format = numeral_format if optimize_output is not None: self.optimize_output = optimize_output if page_count is not None: self.page_count = page_count if page_index is not None: self.page_index = page_index @property def allow_embedding_post_script_fonts(self): return self._allow_embedding_post_script_fonts @allow_embedding_post_script_fonts.setter def allow_embedding_post_script_fonts(self, allow_embedding_post_script_fonts): self._allow_embedding_post_script_fonts = allow_embedding_post_script_fonts @property def custom_time_zone_info_data(self): return self._custom_time_zone_info_data @custom_time_zone_info_data.setter def custom_time_zone_info_data(self, custom_time_zone_info_data): self._custom_time_zone_info_data = custom_time_zone_info_data @property def dml3_d_effects_rendering_mode(self): return self._dml3_d_effects_rendering_mode @dml3_d_effects_rendering_mode.setter def dml3_d_effects_rendering_mode(self, dml3_d_effects_rendering_mode): allowed_values = ["Basic", "Advanced"] if not dml3_d_effects_rendering_mode.isdigit(): if dml3_d_effects_rendering_mode not in allowed_values: raise ValueError( "Invalid value for `dml3_d_effects_rendering_mode` ({0}), must be one of {1}" .format(dml3_d_effects_rendering_mode, allowed_values)) self._dml3_d_effects_rendering_mode = dml3_d_effects_rendering_mode else: self._dml3_d_effects_rendering_mode = allowed_values[int(dml3_d_effects_rendering_mode) if six.PY3 else long(dml3_d_effects_rendering_mode)] @property
MIT License
pytorchrl/pytorchrl
pytorchrl/agent/storages/on_policy/ppod_buffer.py
PPODBuffer.check_demo_buffer_capacity
python
def check_demo_buffer_capacity(self): total_demos = len(self.reward_demos) + len(self.value_demos) if total_demos > self.max_demos: for _ in range(min(total_demos - self.max_demos, len(self.value_demos))): del self.value_demos[np.array([p["MaxValue"] for p in self.value_demos]).argmin()] if len(self.reward_demos) > self.max_demos: for _ in range(len(self.reward_demos) - self.max_demos): del self.reward_demos[self.num_loaded_demos]
Check total amount of demos_6_actions. If total amount of demos_6_actions exceeds self.max_demos, pop demos_6_actions.
https://github.com/pytorchrl/pytorchrl/blob/1ea781580f31cf3bbe5b38b8cceca939ed713241/pytorchrl/agent/storages/on_policy/ppod_buffer.py#L538-L563
import os import glob import copy import uuid import torch import numpy as np from collections import defaultdict import pytorchrl as prl from pytorchrl.agent.storages.on_policy.gae_buffer import GAEBuffer as B class PPODBuffer(B): on_policy_data_fields = prl.OnPolicyDataKeys demos_data_fields = prl.DemosDataKeys def __init__(self, size, device, actor, algorithm, envs, frame_stack=1, frame_skip=0, initial_demos_dir=None, target_demos_dir=None, rho=0.1, phi=0.3, gae_lambda=0.95, alpha=10, max_demos=51, save_demo_frequency=10, num_saved_demos=10): super(PPODBuffer, self).__init__( size=size, envs=envs, actor=actor, device=device, algorithm=algorithm, gae_lambda=gae_lambda, ) self.rho = rho self.phi = phi self.alpha = alpha self.initial_rho = rho self.initial_phi = phi self.max_demos = max_demos self.initial_demos_dir = initial_demos_dir self.target_demos_dir = target_demos_dir self.frame_stack = frame_stack self.frame_skip = frame_skip self.reward_demos = [] self.value_demos = [] if initial_demos_dir: self.load_initial_demos() self.reward_threshold = min([d["TotalReward"] for d in self.reward_demos]) if len( self.reward_demos) > 0 else - np.inf else: self.reward_threshold = - np.inf self.potential_demos_val = {"env{}".format(i + 1): - np.inf for i in range(self.num_envs)} self.potential_demos = {"env{}".format(i + 1): defaultdict(list) for i in range(self.num_envs)} self.demos_in_progress = { "env{}".format(i + 1): { "ID": None, "Demo": None, "Step": 0, "DemoLength": -1, "MaxValue": - np.inf, prl.RHS: None, } for i in range(self.num_envs)} self.iter = 0 self.save_demos_every = save_demo_frequency self.num_saved_demos = num_saved_demos @classmethod def create_factory(cls, size, initial_demos_dir=None, target_demos_dir=None, frame_stack=1, frame_skip=0, rho=0.1, phi=0.3, gae_lambda=0.95, alpha=10, max_demos=51, save_demo_frequency=10, num_saved_demos=10): def create_buffer_instance(device, actor, algorithm, envs): return cls(size, device, actor, algorithm, envs, frame_stack, frame_skip, initial_demos_dir, target_demos_dir, rho, phi, gae_lambda, alpha, max_demos, save_demo_frequency, num_saved_demos) return create_buffer_instance def before_gradients(self): print("\nREWARD DEMOS {}, VALUE DEMOS {}, RHO {}, PHI {}, REWARD THRESHOLD {}\n".format( len(self.reward_demos), len(self.value_demos), self.rho, self.phi, self.reward_threshold)) last_tensors = {} step = self.step if self.step != 0 else -1 for k in (prl.OBS, prl.RHS, prl.DONE): if isinstance(self.data[k], dict): last_tensors[k] = {x: self.data[k][x][step] for x in self.data[k]} else: last_tensors[k] = self.data[k][step] with torch.no_grad(): _ = self.actor.get_action(last_tensors[prl.OBS], last_tensors[prl.RHS], last_tensors[prl.DONE]) value_dict = self.actor.get_value(last_tensors[prl.OBS], last_tensors[prl.RHS], last_tensors[prl.DONE]) next_value = value_dict.get("value_net1") next_rhs = value_dict.get("rhs") self.data[prl.VAL][step].copy_(next_value) if isinstance(next_rhs, dict): for x in self.data[prl.RHS]: self.data[prl.RHS][x][step].copy_(next_rhs[x]) else: self.data[prl.RHS][step].copy_(next_rhs) self.compute_returns() self.compute_advantages() self.iter += 1 if self.iter % self.save_demos_every == 0: self.save_demos() def get_num_channels_obs(self, sample): self.num_channels_obs = int(sample[prl.OBS][0].shape[0] // self.frame_stack) def insert_transition(self, sample): if self.size == 0 and self.data[prl.OBS] is None: self.init_tensors(sample) self.get_num_channels_obs(sample) for k in sample: if k not in self.storage_tensors: continue if k in (prl.OBS, prl.RHS, prl.DONE): pos = self.step + 1 sample_k = "Next" + k else: pos = self.step sample_k = k if isinstance(sample[k], dict): for x, v in sample[k].items(): self.data[k][x][pos].copy_(sample[sample_k][x]) else: self.data[k][pos].copy_(sample[sample_k]) self.track_potential_demos(sample) for i in range(self.num_envs): if self.demos_in_progress["env{}".format(i + 1)]["Demo"]: demo_step = self.demos_in_progress["env{}".format(i + 1)]["Step"] obs = self.data[prl.OBS][self.step][i].unsqueeze(0) if self.demos_in_progress["env{}".format(i + 1)][prl.RHS]: rhs = self.demos_in_progress["env{}".format(i + 1)][prl.RHS] done = torch.zeros(1, 1).to(self.device) else: obs, rhs, done = self.actor.actor_initial_states(obs) _, _, rhs2, algo_data = self.algo.acting_step(obs, rhs, done) self.data[prl.ACT][self.step][i].copy_(self.demos_in_progress["env{}".format( i + 1)]["Demo"][prl.ACT][demo_step]) self.data[prl.REW][self.step][i].copy_(self.demos_in_progress["env{}".format( i + 1)]["Demo"][prl.REW][demo_step]) self.data[prl.LOGP][self.step][i].copy_(torch.zeros(1)) self.data[prl.VAL][self.step][i].copy_(algo_data[prl.VAL].squeeze()) self.demos_in_progress["env{}".format(i + 1)]["Step"] += 1 self.demos_in_progress["env{}".format(i + 1)][prl.RHS] = rhs2 self.demos_in_progress["env{}".format(i + 1)]["MaxValue"] = max( [algo_data[prl.VAL].item(), self.demos_in_progress["env{}".format(i + 1)]["MaxValue"]]) if demo_step == self.demos_in_progress["env{}".format(i + 1)]["DemoLength"] - 1: if "MaxValue" in self.demos_in_progress["env{}".format(i + 1)]["Demo"].keys(): for value_demo in self.value_demos: if self.demos_in_progress["env{}".format(i + 1)]["Demo"]["ID"] == value_demo["ID"]: value_demo["MaxValue"] = self.demos_in_progress["env{}".format(i + 1)]["MaxValue"] self.sample_demo(env_id=i) else: self.data[prl.DONE][self.step + 1][i].copy_(torch.zeros(1)) obs2 = torch.roll(obs, -self.num_channels_obs, dims=1).squeeze(0) obs2[-self.num_channels_obs:].copy_( self.demos_in_progress["env{}".format(i + 1)]["Demo"][prl.OBS][demo_step + 1].to(self.device)) self.data[prl.OBS][self.step + 1][i].copy_(obs2) for k in self.data[prl.RHS]: self.data[prl.RHS][k][self.step + 1][i].copy_(rhs2[k].squeeze()) elif sample[prl.DONE2][i] == 1.0: self.sample_demo(env_id=i) self.step = (self.step + 1) % self.max_size self.size = min(self.size + 1, self.max_size) def track_potential_demos(self, sample): for i in range(self.num_envs): for tensor in self.demos_data_fields: if tensor in (prl.OBS): self.potential_demos["env{}".format(i + 1)][tensor].append( copy.deepcopy(sample[tensor][i, -self.num_channels_obs:]).cpu().numpy()) else: self.potential_demos["env{}".format(i + 1)][tensor].append( copy.deepcopy(sample[tensor][i]).cpu().numpy()) self.potential_demos_val[i] = max([self.potential_demos_val["env{}".format( i + 1)], sample[prl.VAL][i].item()]) if sample[prl.DONE2][i] == 1.0: potential_demo = {} for tensor in self.demos_data_fields: potential_demo[tensor] = torch.Tensor(np.stack( self.potential_demos["env{}".format(i + 1)][tensor])) episode_reward = potential_demo[prl.REW].sum().item() potential_demo["ID"] = str(uuid.uuid4()) potential_demo["TotalReward"] = episode_reward potential_demo["DemoLength"] = potential_demo[prl.ACT].shape[0] if episode_reward >= self.reward_threshold: self.reward_demos.append(potential_demo) self.check_demo_buffer_capacity() self.anneal_parameters() else: potential_demo["MaxValue"] = self.potential_demos_val[i] total_demos = len(self.reward_demos) + len(self.value_demos) value_thresh = - np.float("Inf") if len(self.value_demos) == 0 else min([p["MaxValue"] for p in self.value_demos]) if self.potential_demos_val["env{}".format(i + 1)] >= value_thresh or total_demos < self.max_demos: self.value_demos.append(potential_demo) self.check_demo_buffer_capacity() for tensor in self.demos_data_fields: self.potential_demos["env{}".format(i + 1)][tensor] = [] self.potential_demos_val["env{}".format(i + 1)] = - np.inf def load_initial_demos(self): num_loaded_demos = 0 initial_demos = glob.glob(self.initial_demos_dir + '/*.npz') if len(initial_demos) > self.max_demos: raise ValueError("demo dir contains more than self.max_demos demonstrations") for demo_file in initial_demos: try: demo = np.load(demo_file) new_demo = {k: {} for k in self.demos_data_fields} if demo["FrameSkip"] != self.frame_skip: raise ValueError( "Env and demo with different frame skip!") demo_act = torch.FloatTensor(demo[prl.ACT]) new_demo[prl.ACT] = demo_act demo_obs = torch.FloatTensor(demo[prl.OBS]) new_demo[prl.OBS] = demo_obs demo_rew = torch.FloatTensor(demo[prl.REW]) new_demo[prl.REW] = demo_rew new_demo.update({ "ID": str(uuid.uuid4()), "DemoLength": demo[prl.ACT].shape[0], "TotalReward": demo_rew.sum().item()}) self.reward_demos.append(new_demo) num_loaded_demos += 1 except Exception: print("Failed to load demo!") self.num_loaded_demos = num_loaded_demos print("\nLOADED {} DEMOS".format(num_loaded_demos)) def sample_demo(self, env_id): self.demos_in_progress["env{}".format(env_id + 1)]["Step"] = 0 self.demos_in_progress["env{}".format(env_id + 1)][prl.RHS] = None episode_source = np.random.choice(["reward_demo", "value_demo", "env"], p=[self.rho, self.phi, 1.0 - self.rho - self.phi]) if episode_source == "reward_demo" and len(self.reward_demos) > 0: selected = np.random.choice(range(len(self.reward_demos))) demo = copy.deepcopy(self.reward_demos[selected]) elif episode_source == "value_demo" and len(self.value_demos) > 0: probs = np.array([p["MaxValue"] for p in self.value_demos]) ** self.alpha probs = probs / probs.sum() selected = np.random.choice(range(len(self.value_demos)), p=probs) demo = copy.deepcopy(self.value_demos[selected]) else: demo = None self.demos_in_progress["env{}".format(env_id + 1)]["Demo"] = demo self.data[prl.DONE][self.step + 1][env_id].copy_(torch.ones(1).to(self.device)) for k in self.data[prl.RHS]: self.data[prl.RHS][k][self.step + 1][env_id].fill_(0.0) if demo: self.demos_in_progress["env{}".format(env_id + 1)]["DemoLength"] = demo["DemoLength"] self.demos_in_progress["env{}".format(env_id + 1)]["MaxValue"] = - np.Inf for k in range(self.frame_stack): self.data[prl.OBS][self.step + 1][env_id][ k * self.num_channels_obs:(k + 1) * self.num_channels_obs].copy_( self.demos_in_progress["env{}".format(env_id + 1)]["Demo"][prl.OBS][0].to(self.device)) else: self.data[prl.OBS][self.step + 1][env_id].copy_(self.envs.reset_single_env(env_id=env_id).squeeze()) for tensor in self.demos_data_fields: self.potential_demos["env{}".format(env_id + 1)][tensor] = [] self.potential_demos_val["env{}".format(env_id + 1)] = - np.inf def anneal_parameters(self): if 0.0 < self.rho < 1.0 and len(self.value_demos) > 0: self.rho += self.initial_phi / len(self.value_demos) self.rho = np.clip(self.rho, 0.0, self.initial_rho + self.initial_phi) if 0.0 < self.phi < 1.0 and len(self.value_demos) > 0: self.phi -= self.initial_phi / len(self.value_demos) self.phi = np.clip(self.phi, 0.0, self.initial_rho + self.initial_phi)
MIT License
academysoftwarefoundation/opencue
pycue/opencue/wrappers/filter.py
Action.value
python
def value(self): valueType = self.data.value_type if valueType == filter_pb2.GROUP_TYPE: return self.data.group_value if valueType == filter_pb2.STRING_TYPE: return self.data.string_value if valueType == filter_pb2.INTEGER_TYPE: return self.data.integer_value if valueType == filter_pb2.FLOAT_TYPE: return self.data.float_value if valueType == filter_pb2.BOOLEAN_TYPE: return self.data.boolean_value return None
Returns the value of the action; what will happen if the filter is matched. Type of value returned depends on the action's value_type. :rtype: str/int/float/bool :return: value of the action
https://github.com/academysoftwarefoundation/opencue/blob/da28ae905b81e7d1125db2073a369fdc0ae9acd4/pycue/opencue/wrappers/filter.py#L329-L348
import enum from opencue import Cuebot from opencue.compiled_proto import filter_pb2 from opencue.compiled_proto import job_pb2 from opencue.compiled_proto.filter_pb2 import Action as ActionData from opencue.compiled_proto.filter_pb2 import ActionType from opencue.compiled_proto.filter_pb2 import ActionValueType from opencue.compiled_proto.filter_pb2 import Filter as FilterData from opencue.compiled_proto.filter_pb2 import FilterType from opencue.compiled_proto.filter_pb2 import MatchSubject from opencue.compiled_proto.filter_pb2 import MatchType from opencue.compiled_proto.filter_pb2 import Matcher as MatcherData import opencue.wrappers.group __all__ = ["Filter", "Action", "Matcher", "FilterData", "ActionData", "MatcherData", "FilterType", "ActionType", "MatchType", "ActionValueType", "MatchSubject"] class Filter(object): class FilterType(enum.IntEnum): MATCH_ANY = filter_pb2.MATCH_ANY MATCH_ALL = filter_pb2.MATCH_ALL def __init__(self, filter=None): self.data = filter self.stub = Cuebot.getStub('filter') def __eq__(self, other): if not isinstance(other, self.__class__): return False return self.data == other.data def delete(self): self.stub.Delete(filter_pb2.FilterDeleteRequest(filter=self.data), timeout=Cuebot.Timeout) def createMatcher(self, subject, matchType, query): matcher = MatcherData( subject=subject, type=matchType, input=query.replace(' ', '') ) return Matcher(self.stub.CreateMatcher( filter_pb2.FilterCreateMatcherRequest(filter=self.data, data=matcher), timeout=Cuebot.Timeout).matcher) def createAction(self, actionType, value): action = ActionData( type=actionType, group_value=None, string_value=None, integer_value=0, float_value=0.0, boolean_value=False ) if isinstance(value, opencue.wrappers.group.Group): action.value_type = filter_pb2.GROUP_TYPE action.group_value = value.id() elif isinstance(value, str): action.value_type = filter_pb2.STRING_TYPE action.string_value = value elif isinstance(value, bool): action.value_type = filter_pb2.BOOLEAN_TYPE action.boolean_value = value elif isinstance(value, int): action.value_type = filter_pb2.INTEGER_TYPE action.integer_value = value elif isinstance(value, float): action.value_type = filter_pb2.FLOAT_TYPE action.float_value = value else: action.value_type = filter_pb2.NONE_TYPE return Action(self.stub.CreateAction( filter_pb2.FilterCreateActionRequest(filter=self.data, data=action), timeout=Cuebot.Timeout).action) def getActions(self): response = self.stub.GetActions(filter_pb2.FilterGetActionsRequest(filter=self.data), timeout=Cuebot.Timeout) return [Action(action) for action in response.actions.actions] def getMatchers(self): response = self.stub.GetMatchers(filter_pb2.FilterGetMatchersRequest(filter=self.data), timeout=Cuebot.Timeout) return [Matcher(matcher) for matcher in response.matchers.matchers] def lowerOrder(self): self.stub.LowerOrder(filter_pb2.FilterLowerOrderRequest(filter=self.data), timeout=Cuebot.Timeout) def raiseOrder(self): self.stub.RaiseOrder(filter_pb2.FilterRaiseOrderRequest(filter=self.data), timeout=Cuebot.Timeout) def orderFirst(self): self.stub.OrderFirst(filter_pb2.FilterOrderFirstRequest(filter=self.data), timeout=Cuebot.Timeout) def orderLast(self): self.stub.OrderLast(filter_pb2.FilterOrderLastRequest(filter=self.data), timeout=Cuebot.Timeout) def runFilterOnGroup(self, group): self.stub.RunFilterOnGroup( filter_pb2.FilterRunFilterOnGroupRequest(filter=self.data, group=group.data), timeout=Cuebot.Timeout) def runFilterOnJobs(self, jobs): jobSeq = job_pb2.JobSeq(jobs=[job.data for job in jobs]) self.stub.RunFilterOnJobs( filter_pb2.FilterRunFilterOnJobsRequest(filter=self.data, jobs=jobSeq), timeout=Cuebot.Timeout) def setEnabled(self, value): self.stub.SetEnabled(filter_pb2.FilterSetEnabledRequest(filter=self.data, enabled=value), timeout=Cuebot.Timeout) def setName(self, name): self.stub.SetName(filter_pb2.FilterSetNameRequest(filter=self.data, name=name), timeout=Cuebot.Timeout) def setType(self, filterType): self.stub.SetType(filter_pb2.FilterSetTypeRequest(filter=self.data, type=filterType), timeout=Cuebot.Timeout) def setOrder(self, order): self.stub.SetOrder(filter_pb2.FilterSetOrderRequest(filter=self.data, order=order), timeout=Cuebot.Timeout) def name(self): return self.data.name def type(self): return self.data.type def order(self): return self.data.order def isEnabled(self): return self.data.enabled def id(self): return self.data.id class Action(object): class ActionType(enum.IntEnum): MOVE_JOB_TO_GROUP = filter_pb2.MOVE_JOB_TO_GROUP PAUSE_JOB = filter_pb2.PAUSE_JOB SET_JOB_MIN_CORES = filter_pb2.SET_JOB_MIN_CORES SET_JOB_MAX_CORES = filter_pb2.SET_JOB_MAX_CORES STOP_PROCESSING = filter_pb2.STOP_PROCESSING SET_JOB_PRIORITY = filter_pb2.SET_JOB_PRIORITY SET_ALL_RENDER_LAYER_TAGS = filter_pb2.SET_ALL_RENDER_LAYER_TAGS SET_ALL_RENDER_LAYER_MEMORY = filter_pb2.SET_ALL_RENDER_LAYER_MEMORY SET_ALL_RENDER_LAYER_CORES = filter_pb2.SET_ALL_RENDER_LAYER_CORES SET_MEMORY_OPTIMIZER = filter_pb2.SET_MEMORY_OPTIMIZER class ActionValueType(enum.IntEnum): GROUP_TYPE = filter_pb2.GROUP_TYPE STRING_TYPE = filter_pb2.STRING_TYPE INTEGER_TYPE = filter_pb2.INTEGER_TYPE FLOAT_TYPE = filter_pb2.FLOAT_TYPE BOOLEAN_TYPE = filter_pb2.BOOLEAN_TYPE NONE_TYPE = filter_pb2.NONE_TYPE def __init__(self, action=None): self.data = action self.stub = Cuebot.getStub('action') def getParentFilter(self): response = self.stub.GetParentFilter( filter_pb2.ActionGetParentFilterRequest(action=self.data), timeout=Cuebot.Timeout) return Filter(response.filter) def delete(self): self.stub.Delete(filter_pb2.ActionDeleteRequest(action=self.data), timeout=Cuebot.Timeout) def commit(self): if self.isNew(): raise Exception( "unable to commit action that has not been created, proxy does not exist") self.stub.Commit(filter_pb2.ActionCommitRequest(action=self.data), timeout=Cuebot.Timeout) def isNew(self): return self.data is None def name(self): if self.value() is None: return "%s" % ActionType.Name(self.type()) return "%s %s" % (ActionType.Name(self.type()), self.value())
Apache License 2.0
azogue/hass_config
custom_components/media_player/mykodi.py
KodiDevice.async_get_albums
python
def async_get_albums(self, artist_id=None): if artist_id is None: return (yield from self.server.AudioLibrary.GetAlbums()) return (yield from self.server.AudioLibrary.GetAlbums( {"filter": {"artistid": int(artist_id)}}))
Get albums list.
https://github.com/azogue/hass_config/blob/86113e4f1db0e785fa9a095add241f423efb8403/custom_components/media_player/mykodi.py#L863-L869
import asyncio from collections import OrderedDict from functools import wraps import logging import urllib import re import os import aiohttp import voluptuous as vol from homeassistant.config import load_yaml_config_file from homeassistant.components.media_player import ( SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK, SUPPORT_PLAY_MEDIA, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_PLAY, SUPPORT_VOLUME_STEP, SUPPORT_SHUFFLE_SET, MediaPlayerDevice, PLATFORM_SCHEMA, MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO, MEDIA_TYPE_PLAYLIST, MEDIA_PLAYER_SCHEMA, DOMAIN, SUPPORT_TURN_ON) from homeassistant.const import ( STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_HOST, CONF_NAME, CONF_PORT, CONF_PROXY_SSL, CONF_USERNAME, CONF_PASSWORD, CONF_TIMEOUT, EVENT_HOMEASSISTANT_STOP) from homeassistant.core import callback from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers import script, config_validation as cv from homeassistant.helpers.template import Template from homeassistant.util.yaml import dump REQUIREMENTS = ['jsonrpc-async==0.6', 'jsonrpc-websocket==0.5'] _LOGGER = logging.getLogger(__name__) EVENT_KODI_CALL_METHOD_RESULT = 'kodi_call_method_result' CONF_USE_OFF = 'use_off_mode' CONF_TCP_PORT = 'tcp_port' CONF_TURN_ON_ACTION = 'turn_on_action' CONF_TURN_OFF_ACTION = 'turn_off_action' CONF_ENABLE_WEBSOCKET = 'enable_websocket' DEFAULT_NAME = 'Kodi' DEFAULT_PORT = 8080 DEFAULT_TCP_PORT = 9090 DEFAULT_TIMEOUT = 5 DEFAULT_PROXY_SSL = False DEFAULT_ENABLE_WEBSOCKET = True DEPRECATED_TURN_OFF_ACTIONS = { None: None, 'quit': 'Application.Quit', 'hibernate': 'System.Hibernate', 'suspend': 'System.Suspend', 'reboot': 'System.Reboot', 'shutdown': 'System.Shutdown' } MEDIA_TYPES = { 'music': MEDIA_TYPE_MUSIC, 'artist': MEDIA_TYPE_MUSIC, 'album': MEDIA_TYPE_MUSIC, 'song': MEDIA_TYPE_MUSIC, 'video': MEDIA_TYPE_VIDEO, 'set': MEDIA_TYPE_PLAYLIST, 'musicvideo': MEDIA_TYPE_VIDEO, 'movie': MEDIA_TYPE_VIDEO, 'tvshow': MEDIA_TYPE_TVSHOW, 'season': MEDIA_TYPE_TVSHOW, 'episode': MEDIA_TYPE_TVSHOW, } SUPPORT_KODI = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_SHUFFLE_SET | SUPPORT_PLAY | SUPPORT_VOLUME_STEP PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_TCP_PORT, default=DEFAULT_TCP_PORT): cv.port, vol.Optional(CONF_PROXY_SSL, default=DEFAULT_PROXY_SSL): cv.boolean, vol.Optional(CONF_TURN_ON_ACTION, default=None): cv.SCRIPT_SCHEMA, vol.Optional(CONF_TURN_OFF_ACTION): vol.Any(cv.SCRIPT_SCHEMA, vol.In(DEPRECATED_TURN_OFF_ACTIONS)), vol.Optional(CONF_USE_OFF, default=False): cv.boolean, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Inclusive(CONF_USERNAME, 'auth'): cv.string, vol.Inclusive(CONF_PASSWORD, 'auth'): cv.string, vol.Optional(CONF_ENABLE_WEBSOCKET, default=DEFAULT_ENABLE_WEBSOCKET): cv.boolean, }) SERVICE_ADD_MEDIA = 'kodi_add_to_playlist' SERVICE_CALL_METHOD = 'kodi_call_method' DATA_KODI = 'kodi' ATTR_MEDIA_TYPE = 'media_type' ATTR_MEDIA_NAME = 'media_name' ATTR_MEDIA_ARTIST_NAME = 'artist_name' ATTR_MEDIA_ID = 'media_id' ATTR_METHOD = 'method' MEDIA_PLAYER_ADD_MEDIA_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({ vol.Required(ATTR_MEDIA_TYPE): cv.string, vol.Optional(ATTR_MEDIA_ID): cv.string, vol.Optional(ATTR_MEDIA_NAME): cv.string, vol.Optional(ATTR_MEDIA_ARTIST_NAME): cv.string, }) MEDIA_PLAYER_CALL_METHOD_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({ vol.Required(ATTR_METHOD): cv.string, }, extra=vol.ALLOW_EXTRA) SERVICE_TO_METHOD = { SERVICE_ADD_MEDIA: { 'method': 'async_add_media_to_playlist', 'schema': MEDIA_PLAYER_ADD_MEDIA_SCHEMA}, SERVICE_CALL_METHOD: { 'method': 'async_call_method', 'schema': MEDIA_PLAYER_CALL_METHOD_SCHEMA}, } def _check_deprecated_turn_off(hass, turn_off_action): if isinstance(turn_off_action, str): method = DEPRECATED_TURN_OFF_ACTIONS[turn_off_action] new_config = OrderedDict( [('service', '{}.{}'.format(DOMAIN, SERVICE_CALL_METHOD)), ('data_template', OrderedDict( [('entity_id', '{{ entity_id }}'), ('method', method)]))]) example_conf = dump(OrderedDict( [(CONF_TURN_OFF_ACTION, new_config)])) _LOGGER.warning( "The '%s' action for turn off Kodi is deprecated and " "will cease to function in a future release. You need to " "change it for a generic Home Assistant script sequence, " "which is, for this turn_off action, like this:\n%s", turn_off_action, example_conf) new_config['data_template'] = OrderedDict( [(key, Template(value, hass)) for key, value in new_config['data_template'].items()]) turn_off_action = [new_config] return turn_off_action @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): if DATA_KODI not in hass.data: hass.data[DATA_KODI] = [] name = config.get(CONF_NAME) host = config.get(CONF_HOST) port = config.get(CONF_PORT) tcp_port = config.get(CONF_TCP_PORT) encryption = config.get(CONF_PROXY_SSL) websocket = config.get(CONF_ENABLE_WEBSOCKET) entity = KodiDevice( hass, name=name, host=host, port=port, tcp_port=tcp_port, encryption=encryption, username=config.get(CONF_USERNAME), password=config.get(CONF_PASSWORD), turn_on_action=config.get(CONF_TURN_ON_ACTION), turn_off_action=config.get(CONF_TURN_OFF_ACTION), use_off_mode=config.get(CONF_USE_OFF), timeout=config.get(CONF_TIMEOUT), websocket=websocket) hass.data[DATA_KODI].append(entity) async_add_devices([entity], update_before_add=True) @asyncio.coroutine def async_service_handler(service): method = SERVICE_TO_METHOD.get(service.service) if not method: return params = {key: value for key, value in service.data.items() if key != 'entity_id'} entity_ids = service.data.get('entity_id') if entity_ids: target_players = [player for player in hass.data[DATA_KODI] if player.entity_id in entity_ids] else: target_players = hass.data[DATA_KODI] update_tasks = [] for player in target_players: yield from getattr(player, method['method'])(**params) for player in target_players: if player.should_poll: update_coro = player.async_update_ha_state(True) update_tasks.append(update_coro) if update_tasks: yield from asyncio.wait(update_tasks, loop=hass.loop) if hass.services.has_service(DOMAIN, SERVICE_ADD_MEDIA): return descriptions = yield from hass.async_add_job( load_yaml_config_file, os.path.join( os.path.dirname(__file__), 'services.yaml')) for service in SERVICE_TO_METHOD: schema = SERVICE_TO_METHOD[service]['schema'] hass.services.async_register( DOMAIN, service, async_service_handler, description=descriptions.get(service), schema=schema) def cmd(func): @wraps(func) @asyncio.coroutine def wrapper(obj, *args, **kwargs): import jsonrpc_base try: yield from func(obj, *args, **kwargs) except jsonrpc_base.jsonrpc.TransportError as exc: if obj.state == STATE_OFF: log_function = _LOGGER.info else: log_function = _LOGGER.error log_function("Error calling %s on entity %s: %r", func.__name__, obj.entity_id, exc) return wrapper class KodiDevice(MediaPlayerDevice): def __init__(self, hass, name, host, port, tcp_port, encryption=False, username=None, password=None, turn_on_action=None, turn_off_action=None, use_off_mode=False, timeout=DEFAULT_TIMEOUT, websocket=True): import jsonrpc_async import jsonrpc_websocket self.hass = hass self._name = name kwargs = { 'timeout': timeout, 'session': async_get_clientsession(hass), } if username is not None: kwargs['auth'] = aiohttp.BasicAuth(username, password) image_auth_string = "{}:{}@".format(username, password) else: image_auth_string = "" http_protocol = 'https' if encryption else 'http' ws_protocol = 'wss' if encryption else 'ws' self._http_url = '{}://{}:{}/jsonrpc'.format(http_protocol, host, port) self._image_url = '{}://{}{}:{}/image'.format( http_protocol, image_auth_string, host, port) self._ws_url = '{}://{}:{}/jsonrpc'.format(ws_protocol, host, tcp_port) self._http_server = jsonrpc_async.Server(self._http_url, **kwargs) if websocket: self._ws_server = jsonrpc_websocket.Server(self._ws_url, **kwargs) self._ws_server.Player.OnPause = self.async_on_speed_event self._ws_server.Player.OnPlay = self.async_on_speed_event self._ws_server.Player.OnSpeedChanged = self.async_on_speed_event self._ws_server.Player.OnStop = self.async_on_stop self._ws_server.Application.OnVolumeChanged = self.async_on_volume_changed self._ws_server.System.OnQuit = self.async_on_quit self._ws_server.System.OnRestart = self.async_on_quit self._ws_server.System.OnSleep = self.async_on_quit def on_hass_stop(event): self.hass.async_add_job(self._ws_server.close()) self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STOP, on_hass_stop) else: self._ws_server = None if turn_on_action is not None: turn_on_action = script.Script( self.hass, turn_on_action, "{} turn ON script".format(self.name), self.async_update_ha_state(True)) if turn_off_action is not None: turn_off_action = script.Script( self.hass, _check_deprecated_turn_off(hass, turn_off_action), "{} turn OFF script".format(self.name)) self._turn_on_action = turn_on_action self._turn_off_action = turn_off_action self._use_off_mode = use_off_mode self._flag_switch_off = False self._enable_websocket = websocket self._players = list() self._properties = {} self._item = {} self._app_properties = {} @callback def async_on_speed_event(self, sender, data): self._properties['speed'] = data['player']['speed'] if not hasattr(data['item'], 'id'): force_refresh = True else: force_refresh = data['item']['id'] != self._item.get('id') self.async_schedule_update_ha_state(force_refresh) @callback def async_on_stop(self, sender, data): if self._players is None: return self._players = [] self._properties = {} self._item = {} self.async_schedule_update_ha_state() @callback def async_on_volume_changed(self, sender, data): self._app_properties['volume'] = data['volume'] self._app_properties['muted'] = data['muted'] self.async_schedule_update_ha_state() @callback def async_on_quit(self, sender, data): self._flag_switch_off = True self._players = None self._properties = {} self._item = {} self._app_properties = {} self.hass.async_add_job(self._ws_server.close()) @asyncio.coroutine def _get_players(self): import jsonrpc_base try: return (yield from self.server.Player.GetActivePlayers()) except jsonrpc_base.jsonrpc.TransportError: if self._players is not None: _LOGGER.info("Unable to fetch kodi data") _LOGGER.debug("Unable to fetch kodi data", exc_info=True) return None @property def state(self): if self._players is None: return STATE_OFF if not self._players and self._flag_switch_off and self._use_off_mode: return STATE_OFF elif not self._players: return STATE_IDLE self._flag_switch_off = False if self._properties['speed'] == 0 and not self._properties['live']: return STATE_PAUSED return STATE_PLAYING @asyncio.coroutine def async_ws_connect(self): import jsonrpc_base try: ws_loop_future = yield from self._ws_server.ws_connect() except jsonrpc_base.jsonrpc.TransportError: _LOGGER.info("Unable to connect to Kodi via websocket") _LOGGER.debug( "Unable to connect to Kodi via websocket", exc_info=True) return @asyncio.coroutine def ws_loop_wrapper(): try: yield from ws_loop_future except jsonrpc_base.TransportError: pass self.async_schedule_update_ha_state() self.hass.loop.create_task(ws_loop_wrapper()) @asyncio.coroutine def async_update(self): self._players = yield from self._get_players() if self._players is None: self._properties = {} self._item = {} self._app_properties = {} return if self._enable_websocket and not self._ws_server.connected: self.hass.async_add_job(self.async_ws_connect()) self._app_properties = yield from self.server.Application.GetProperties( ['volume', 'muted'] ) if self._players: player_id = self._players[0]['playerid'] assert isinstance(player_id, int) self._properties = yield from self.server.Player.GetProperties( player_id, ['time', 'totaltime', 'speed', 'live'] ) self._item = (yield from self.server.Player.GetItem( player_id, ['title', 'file', 'uniqueid', 'thumbnail', 'artist', 'albumartist', 'showtitle', 'album', 'season', 'episode'] ))['item'] else: self._properties = {} self._item = {} self._app_properties = {} @property def server(self): if self._enable_websocket and self._ws_server.connected: return self._ws_server return self._http_server @property def name(self): return self._name @property def should_poll(self): return not (self._enable_websocket and self._ws_server.connected) @property def volume_level(self): if 'volume' in self._app_properties: return self._app_properties['volume'] / 100.0 @property def is_volume_muted(self): return self._app_properties.get('muted') @property def media_content_id(self): return self._item.get('uniqueid', None) @property def media_content_type(self): return MEDIA_TYPES.get(self._item.get('type')) @property def media_duration(self): if self._properties.get('live'): return None total_time = self._properties.get('totaltime') if total_time is None: return None return ( total_time['hours'] * 3600 + total_time['minutes'] * 60 + total_time['seconds']) @property def media_image_url(self): thumbnail = self._item.get('thumbnail') if thumbnail is None: return None url_components = urllib.parse.urlparse(thumbnail) if url_components.scheme == 'image': return '{}/{}'.format( self._image_url, urllib.parse.quote_plus(thumbnail)) @property def media_title(self): return self._item.get( 'title', self._item.get('label', self._item.get('file'))) @property def media_series_title(self): return self._item.get('showtitle') @property def media_season(self): return self._item.get('season') @property def media_episode(self): return self._item.get('episode') @property def media_album_name(self): return self._item.get('album') @property def media_artist(self): artists = self._item.get('artist', []) if artists: return artists[0] return None @property def media_album_artist(self): artists = self._item.get('albumartist', []) if artists: return artists[0] return None @property def supported_features(self): supported_features = SUPPORT_KODI if self._turn_on_action is not None: supported_features |= SUPPORT_TURN_ON if self._turn_off_action is not None: supported_features |= SUPPORT_TURN_OFF return supported_features @cmd @asyncio.coroutine def async_turn_on(self): if self._turn_on_action is not None: yield from self._turn_on_action.async_run( variables={"entity_id": self.entity_id}) else: _LOGGER.warning("turn_on requested but turn_on_action is none") if self._use_off_mode: self._flag_switch_off = False self.async_schedule_update_ha_state() @cmd @asyncio.coroutine def async_turn_off(self): if self._turn_off_action is not None: yield from self._turn_off_action.async_run( variables={"entity_id": self.entity_id}) else: _LOGGER.warning("turn_off requested but turn_off_action is none") if self._use_off_mode: self._flag_switch_off = True self.async_schedule_update_ha_state() @cmd @asyncio.coroutine def async_volume_up(self): assert ( yield from self.server.Input.ExecuteAction('volumeup')) == 'OK' @cmd @asyncio.coroutine def async_volume_down(self): assert ( yield from self.server.Input.ExecuteAction('volumedown')) == 'OK' @cmd def async_set_volume_level(self, volume): return self.server.Application.SetVolume(int(volume * 100)) @cmd def async_mute_volume(self, mute): return self.server.Application.SetMute(mute) @asyncio.coroutine def async_set_play_state(self, state): players = yield from self._get_players() if players is not None and players: yield from self.server.Player.PlayPause( players[0]['playerid'], state) @cmd def async_media_play_pause(self): return self.async_set_play_state('toggle') @cmd def async_media_play(self): return self.async_set_play_state(True) @cmd def async_media_pause(self): return self.async_set_play_state(False) @cmd @asyncio.coroutine def async_media_stop(self): players = yield from self._get_players() if players: yield from self.server.Player.Stop(players[0]['playerid']) @asyncio.coroutine def _goto(self, direction): players = yield from self._get_players() if players: if direction == 'previous': yield from self.server.Player.Seek(players[0]['playerid'], 0) yield from self.server.Player.GoTo( players[0]['playerid'], direction) @cmd def async_media_next_track(self): return self._goto('next') @cmd def async_media_previous_track(self): return self._goto('previous') @cmd @asyncio.coroutine def async_media_seek(self, position): players = yield from self._get_players() time = {} time['milliseconds'] = int((position % 1) * 1000) position = int(position) time['seconds'] = int(position % 60) position /= 60 time['minutes'] = int(position % 60) position /= 60 time['hours'] = int(position) if players: yield from self.server.Player.Seek(players[0]['playerid'], time) @cmd def async_play_media(self, media_type, media_id, **kwargs): if media_type == "CHANNEL": return self.server.Player.Open( {"item": {"channelid": int(media_id)}}) elif media_type == "PLAYLIST": return self.server.Player.Open( {"item": {"playlistid": int(media_id)}}) return self.server.Player.Open( {"item": {"file": str(media_id)}}) @asyncio.coroutine def async_set_shuffle(self, shuffle): if len(self._players) < 1: raise RuntimeError("Error: No active player.") yield from self.server.Player.SetShuffle( {"playerid": self._players[0]['playerid'], "shuffle": shuffle}) @asyncio.coroutine def async_call_method(self, method, **kwargs): import jsonrpc_base _LOGGER.debug("Run API method %s, kwargs=%s", method, kwargs) result_ok = False try: result = yield from getattr(self.server, method)(**kwargs) result_ok = True except jsonrpc_base.jsonrpc.ProtocolError as exc: result = exc.args[2]['error'] _LOGGER.error("Run API method %s.%s(%s) error: %s", self.entity_id, method, kwargs, result) except jsonrpc_base.jsonrpc.TransportError: result = None _LOGGER.warning("TransportError trying to run API method " "%s.%s(%s)", self.entity_id, method, kwargs) if isinstance(result, dict): event_data = {'entity_id': self.entity_id, 'result': result, 'result_ok': result_ok, 'input': {'method': method, 'params': kwargs}} _LOGGER.debug("EVENT kodi_call_method_result: %s", event_data) self.hass.bus.async_fire(EVENT_KODI_CALL_METHOD_RESULT, event_data=event_data) return result @asyncio.coroutine def async_add_media_to_playlist( self, media_type, media_id=None, media_name='ALL', artist_name=''): import jsonrpc_base params = {"playlistid": 0} if media_type == "SONG": if media_id is None: media_id = yield from self.async_find_song( media_name, artist_name) if media_id: params["item"] = {"songid": int(media_id)} elif media_type == "ALBUM": if media_id is None: if media_name == "ALL": yield from self.async_add_all_albums(artist_name) return media_id = yield from self.async_find_album( media_name, artist_name) if media_id: params["item"] = {"albumid": int(media_id)} else: raise RuntimeError("Unrecognized media type.") if media_id is not None: try: yield from self.server.Playlist.Add(params) except jsonrpc_base.jsonrpc.ProtocolError as exc: result = exc.args[2]['error'] _LOGGER.error("Run API method %s.Playlist.Add(%s) error: %s", self.entity_id, media_type, result) except jsonrpc_base.jsonrpc.TransportError: _LOGGER.warning("TransportError trying to add playlist to %s", self.entity_id) else: _LOGGER.warning("No media detected for Playlist.Add") @asyncio.coroutine def async_add_all_albums(self, artist_name): artist_id = yield from self.async_find_artist(artist_name) albums = yield from self.async_get_albums(artist_id) for alb in albums['albums']: yield from self.server.Playlist.Add( {"playlistid": 0, "item": {"albumid": int(alb['albumid'])}}) @asyncio.coroutine def async_clear_playlist(self): return self.server.Playlist.Clear({"playlistid": 0}) @asyncio.coroutine def async_get_artists(self): return (yield from self.server.AudioLibrary.GetArtists()) @asyncio.coroutine
MIT License
tatumdmortimer/popgen-stats
snpEffSummary.py
get_args
python
def get_args(): parser = argparse.ArgumentParser(description='Summarize snpEff output') parser.add_argument("vcf", help="annotated vcf file", action=FullPaths, type=is_file) parser.add_argument("gene", help="genes output file", action=FullPaths, type=is_file) return parser.parse_args()
Parse command line arguments
https://github.com/tatumdmortimer/popgen-stats/blob/eecdc4b10ea860cfd49e4fd21daa3b93b009350d/snpEffSummary.py#L38-L45
import sys, os, argparse import pandas import vcf from datetime import datetime class FullPaths(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values))) def listdir_fullpath(d): return [os.path.join(d, f) for f in os.listdir(d)] def is_dir(dirname): if not os.path.isdir(dirname): msg = "{0} is not a directory".format(dirname) raise argparse.ArgumentTypeError(msg) else: return dirname def is_file(filename): if not os.path.isfile(filename): msg = "{0} is not a file".format(filename) raise argparse.ArgumentTypeError(msg) else: return filename
MIT License
christabor/moal
MOAL/data_structures/trees/splay_trees.py
SplayTree.right_rotate
python
def right_rotate(self, node): node_left_child = node.left_child if node_left_child: node.left_child = node_left_child.right_child if node_left_child.right_child: node_left_child.right_child.parent = node node_left_child.parent = node.parent if not node.parent: self.root = node_left_child elif node == node.parent.left_child: node.parent.left_child = node_left_child else: node.parent.right_child = node_left_child if node_left_child: node_left_child.right = node node.parent = node_left_child
Right rotation along a node
https://github.com/christabor/moal/blob/7d3062f6a49e45147172b7d577472b1e2aa084fa/MOAL/data_structures/trees/splay_trees.py#L66-L85
__author__ = """Chris Tabor (dxdstudio@gmail.com)""" if __name__ == '__main__': from os import getcwd from os import sys sys.path.append(getcwd()) from MOAL.data_structures.trees.avl_trees import AVLTree from MOAL.data_structures.trees.binary_search_trees import Node from MOAL.data_structures.trees.binary_search_trees import recurse_bst from MOAL.data_structures.trees.binary_search_trees import populate_bst from MOAL.helpers.display import Section DEBUG = True class SplayNode(Node): pass class SplayTree(AVLTree): def _put(self, *args, **kwargs): self.put(*args, **kwargs) def subtree_minimum(self, node): while node.left_child: node = node.left_child return node def subtree_maximum(self, node): while node.right_child: node = node.right_child return node def has_grandparent(self, node): return node.parent.parent is not None def left_rotate(self, node): node_right_child = node.right_child if node_right_child: node.right_child = node_right_child.left_child if node_right_child.left_child: node_right_child.left_child.parent = node node_right_child.parent = node.parent if not node.parent: self.root = node_right_child elif node == node.parent.left_child: node.parent.left_child = node_right_child else: node.parent.right_child = node_right_child if node_right_child: node_right_child.left_child = node node.parent = node_right_child
Apache License 2.0
felix-hilden/pyfactor
pyfactor/_cli.py
infer_graph_from_sources
python
def infer_graph_from_sources(sources: List[str]) -> Path: parts = [make_absolute(Path(s)).stem for s in sources] return Path('-'.join(parts)).with_suffix('.gv')
Infer graph name from sources.
https://github.com/felix-hilden/pyfactor/blob/c07c4f4369a9fa3678a0103670484a6fdf1b56b0/pyfactor/_cli.py#L128-L131
from argparse import ArgumentParser from pathlib import Path from typing import List, Optional parser = ArgumentParser( allow_abbrev=False, description='Script dependency visualiser.' ) group_mode = parser.add_argument_group('Source and output') group_mode.add_argument('sources', nargs='*', help=( 'source file names. If sources was disabled by providing no names, ' '--graph is used as direct input for rendering. Disabling two or more of ' 'SOURCES, --graph and --output will return with an error code 1.' )) group_mode.add_argument('--graph', '-g', nargs='?', default='-', const=None, help=( 'write or read intermediate graph file. Graph output is disabled by default. ' 'If a value is specified, it is used as the file name. ' 'If no value is provided, the name is inferred from combining SOURCES. ' 'See SOURCES for more information.' )) group_mode.add_argument('--output', '-o', help=( 'render file name. By default the name is inferred from --graph. ' 'If the name is a single hyphen, render output is disabled ' 'and a graph is written to --graph. See SOURCES for more information. ' 'NOTE: --format is appended to the name' )) group_mode.add_argument('--format', '-f', default='svg', help=( 'render file format, appended to all render file names (default: %(default)s) ' 'NOTE: displaying docstring tooltips is only available in svg and cmap formats' )) group_mode.add_argument( '--legend', nargs='?', default=None, const='pyfactor-legend', help=( 'render a legend, optionally specify a file name (default: %(const)s)' ) ) group_parse = parser.add_argument_group('Parsing options') group_parse.add_argument( '--imports', '-i', default='interface', help=( 'duplicate or resolve import nodes. ' 'Valid values are duplicate, interface and resolve (default: %(default)s). ' 'Duplicating produces a node for each import in the importing source. ' 'Resolving imports links edges directly to the original definitions instead. ' '"interface" leaves import nodes that reference definitions directly below ' 'the import in the module hierarchy and resolves others.' ) ) group_parse.add_argument( '--skip-external', '-se', action='store_true', help=( 'do not visualise imports to external modules' ) ) group_parse.add_argument( '--exclude', '-e', action='append', help='exclude nodes in the source' ) group_parse.add_argument( '--collapse-waypoints', '-cw', action='store_true', help=( 'remove children of waypoint nodes and mark them as collapsed' ) ) group_parse.add_argument( '--collapse-exclude', '-ce', action='append', help=( 'exclude waypoint nodes from being collapsed' 'when --collapse-waypoints is set' ) ) group_parse.add_argument( '--root', '-r', default=None, help=( 'only show root and its children in the graph ' 'NOTE: does not affect graph coloring' ) ) group_graph = parser.add_argument_group('Graph appearance') group_graph.add_argument( '--stagger', type=int, default=2, help='max Graphviz unflatten stagger' ) group_graph.add_argument( '--no-fanout', action='store_true', help='disable Graphviz unflatten fanout' ) group_graph.add_argument( '--chain', type=int, default=1, help='max Graphviz unflatten chain' ) group_graph.add_argument( '--graph-attr', '-ga', action='append', help=( 'Graphviz graph attributes as colon-separated name-value pairs ' '(e.g. -ga overlap:false) NOTE: overrided by Pyfactor' ) ) group_graph.add_argument( '--node-attr', '-na', action='append', help=( 'Graphviz node attributes as colon-separated name-value pairs ' '(e.g. -na style:filled,rounded) NOTE: overrided by Pyfactor' ) ) group_graph.add_argument( '--edge-attr', '-ea', action='append', help=( 'Graphviz edge attributes as colon-separated name-value pairs ' '(e.g. -ea arrowsize:2) NOTE: overrided by Pyfactor' ) ) group_graph.add_argument('--engine', help='Graphviz layout engine') group_misc = parser.add_argument_group('Miscellaneous options') group_misc.add_argument('--view', action='store_true', help=( 'open result in default application after rendering' )) group_misc.add_argument( '--renderer', help='Graphviz output renderer' ) group_misc.add_argument( '--formatter', help='Graphviz output formatter' ) group_misc.add_argument( '--version', '-v', action='store_true', help='display version number and exit' ) class ArgumentError(RuntimeError): def make_absolute(path: Path) -> Path: return path if path.is_absolute() else Path.cwd() / path
MIT License
haaleo/swarmlib
swarmlib/abc/main.py
configure_parser
python
def configure_parser(sub_parsers): parser = sub_parsers.add_parser( 'bees', description='Solving an minimization problem using the artificial bee colony algorithm', help='artificial bee colony algorithm for minimization problem') parser.add_argument( '-f', '--function', type=str, default='michalewicz', help='''Choose the function that is used for searching the minimum. Choices are any of the 2D or nD single objective functions available in the 'landscapes' package (https://git.io/JTSFv). Example arguments: 'michalewicz', 'ackley', 'rastrigin'.''', choices=[*FUNCTIONS], metavar='') parser.add_argument( '-u', '--upper-boundary', type=float, default=4., help='Upper boundary of the function (default 4)') parser.add_argument( '-l', '--lower-boundary', type=float, default=0., help='Lower boundary of the function (default 0)') parser.add_argument( '-n', '--iteration-number', type=int, default=30, help='Number of iterations to execute (default 30)') parser.add_argument( '-t', '--trials', type=int, default=3, help='Maximum number of trials a bee tries to find a new, better food source before it becomes exhausted.') parser.add_argument( '-a', '--alpha', type=float, default=1., help='Scaling parameter used for levy flight step. (default 1)') parser.add_argument( '-la', '--lambda', type=float, default=1.5, help='Randomization parameter used for the levy flights distribution. (default 1.5)') parser.add_argument( 'bees', type=int, help='Number of employed bees used for solving') parser.set_defaults(func=_run_abc)
Get the argument parser for artificial bee colony
https://github.com/haaleo/swarmlib/blob/044b10be5694359900495403cc9f0e84d38a9e88/swarmlib/abc/main.py#L23-L87
import logging from .abc_problem import ABCProblem from ..util.functions import FUNCTIONS LOGGER = logging.getLogger(__name__) def _run_abc(args): LOGGER.info('Start artificial bee colony with parameters="%s"', args) args['function'] = FUNCTIONS[args['function']] problem = ABCProblem(**args) problem.solve() problem.replay()
BSD 3-Clause New or Revised License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/scene/hunterdouglas_powerview.py
PowerViewScene.name
python
def name(self): return self._scene.name
Return the name of the scene.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/scene/hunterdouglas_powerview.py#L86-L88
import asyncio import logging import voluptuous as vol from homeassistant.components.scene import Scene, DOMAIN from homeassistant.const import CONF_PLATFORM from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import async_generate_entity_id _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['aiopvapi==1.5.4'] ENTITY_ID_FORMAT = DOMAIN + '.{}' HUB_ADDRESS = 'address' PLATFORM_SCHEMA = vol.Schema({ vol.Required(CONF_PLATFORM): 'hunterdouglas_powerview', vol.Required(HUB_ADDRESS): cv.string, }) SCENE_DATA = 'sceneData' ROOM_DATA = 'roomData' SCENE_NAME = 'name' ROOM_NAME = 'name' SCENE_ID = 'id' ROOM_ID = 'id' ROOM_ID_IN_SCENE = 'roomId' STATE_ATTRIBUTE_ROOM_NAME = 'roomName' @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): from aiopvapi.scenes import Scenes from aiopvapi.rooms import Rooms from aiopvapi.resources.scene import Scene as PvScene hub_address = config.get(HUB_ADDRESS) websession = async_get_clientsession(hass) _scenes = yield from Scenes( hub_address, hass.loop, websession).get_resources() _rooms = yield from Rooms( hub_address, hass.loop, websession).get_resources() if not _scenes or not _rooms: _LOGGER.error( "Unable to initialize PowerView hub: %s", hub_address) return pvscenes = (PowerViewScene(hass, PvScene(_raw_scene, hub_address, hass.loop, websession), _rooms) for _raw_scene in _scenes[SCENE_DATA]) async_add_devices(pvscenes) class PowerViewScene(Scene): def __init__(self, hass, scene, room_data): self._scene = scene self.hass = hass self._room_name = None self._sync_room_data(room_data) self.entity_id = async_generate_entity_id( ENTITY_ID_FORMAT, str(self._scene.id), hass=hass) def _sync_room_data(self, room_data): room = next((room for room in room_data[ROOM_DATA] if room[ROOM_ID] == self._scene.room_id), {}) self._room_name = room.get(ROOM_NAME, '') @property
MIT License
dpressel/mead-baseline
layers/eight_mile/tf/serialize.py
save_tlm_output_npz
python
def save_tlm_output_npz(tlm: tf.keras.layers.Layer, npz: str, embeddings_keys: List[str] = None, name: str = "TLM", verbose: bool = False): d = to_tlm_array(tlm, embeddings_keys, name) if hasattr(tlm, 'output'): d.update(to_weight_array(tlm.output, name=f'{name}/output')) elif hasattr(tlm, 'output_layer'): d.update(to_weight_array(tlm.output_layer, name=f'{name}/output')) if verbose: print(d.keys()) np.savez(npz, **d)
Save a TLM to an NPZ file with an output layer :param tlm: A Transformer LM-type module :param npz: A file to save :param embeddings_keys: A key to get embeddings from. Defaults to `None`, in which case, all embeddings are written :param name: A name for this TLM :param verbose: whether output :return: None
https://github.com/dpressel/mead-baseline/blob/ee6a7c154293be0f0d7d637e41efe9aabd3bbf80/layers/eight_mile/tf/serialize.py#L397-L414
import numpy as np from typing import Dict, List from eight_mile.tf.layers import ( TransformerEncoderStack, TransformerEncoder, TransformerDecoderStack, TransformerDecoder, MultiHeadedAttention, PassThru, FFN, SingleHeadReduction, SpatialGatingUnit, GatedMLPEncoder, GatedMLPEncoderStack ) from eight_mile.tf.embeddings import LookupTableEmbeddings, LearnedPositionalLookupTableEmbeddingsWithBias, LearnedPositionalLookupTableEmbeddings import tensorflow as tf def to_weight_array(tf_layer: tf.keras.layers.Layer, name: str) -> Dict: weights, bias = tf_layer.get_weights() return {f"{name}/weights": weights.T, f"{name}/bias": bias} def from_weight_array(tf_layer: tf.keras.layers.Layer, d: Dict, name: str): weights = d[f"{name}/weights"] bias = d[f"{name}/bias"] tf_layer.set_weights([weights.T, bias]) def to_ffn_array(tf_ffn: FFN, name: str) -> Dict: d = {} d.update(to_weight_array(tf_ffn.expansion, f"{name}/expansion")) d.update(to_weight_array(tf_ffn.squeeze, f"{name}/squeeze")) return d def from_ffn_array(tf_ffn: FFN, d: Dict, name: str): from_weight_array(tf_ffn.expansion, d, f"{name}/expansion") from_weight_array(tf_ffn.squeeze, d, f"{name}/squeeze") def to_attn_array(tf_attn: tf.keras.layers.Layer, name: str) -> Dict: d = {} d.update(to_weight_array(tf_attn.w_Q, f"{name}/w_Q")) d.update(to_weight_array(tf_attn.w_K, f"{name}/w_K")) d.update(to_weight_array(tf_attn.w_V, f"{name}/w_V")) if hasattr(tf_attn, 'w_O'): d.update(to_weight_array(tf_attn.w_O, f"{name}/w_O")) if hasattr(tf_attn, 'rpr_key'): rpr_key_weights = tf_attn.rpr_key.get_weights()[0] d.update({f"{name}/rpr_key": rpr_key_weights}) if hasattr(tf_attn, 'rpr_value'): rpr_value_weights = tf_attn.rpr_value.get_weights()[0] d.update({f"{name}/rpr_value": rpr_value_weights}) return d def from_attn_array(tf_attn: tf.keras.layers.Layer, d: Dict, name: str): from_weight_array(tf_attn.w_Q, d, f"{name}/w_Q") from_weight_array(tf_attn.w_K, d, f"{name}/w_K") from_weight_array(tf_attn.w_V, d, f"{name}/w_V") if hasattr(tf_attn, 'w_O'): from_weight_array(tf_attn.w_O, d, f"{name}/w_O") if hasattr(tf_attn, 'rpr_key'): tf_attn.rpr_key.set_weights([d[f"{name}/rpr_key"]]) if hasattr(tf_attn, 'rpr_value'): tf_attn.rpr_value.set_weights([d[f"{name}/rpr_value"]]) def to_sgu_array(tf_sgu: SpatialGatingUnit, name: str) -> Dict: d = {} d.update(to_weight_array(tf_sgu.norm, f"{name}/norm")) d.update(to_weight_array(tf_sgu.proj, f"{name}/proj")) return d def to_gmlp_encoder_array(tf_encoder: GatedMLPEncoder, name: str) -> Dict: d = {} d.update(to_weight_array(tf_encoder.to_ffn, f"{name}/to_ffn")) d.update(to_weight_array(tf_encoder.from_sgu, f"{name}/from_sgu")) d.update(to_weight_array(tf_encoder.norm, f"{name}/norm")) d.update(to_sgu_array(tf_encoder.spatial_gating_unit, f"{name}/spatial_gating_unit")) return d def to_encoder_array(tf_encoder: TransformerEncoder, name: str) -> Dict: d = {} d.update(to_weight_array(tf_encoder.ln1, f"{name}/ln1")) d.update(to_weight_array(tf_encoder.ln2, f"{name}/ln2")) d.update(to_attn_array(tf_encoder.self_attn, f"{name}/attn")) d.update(to_ffn_array(tf_encoder.ffn, f"{name}/ffn")) return d def from_encoder_array(tf_encoder: TransformerEncoder, d: Dict, name: str): from_weight_array(tf_encoder.ln1, d, f"{name}/ln1") from_weight_array(tf_encoder.ln2, d, f"{name}/ln2") from_attn_array(tf_encoder.self_attn, d, f"{name}/attn") from_ffn_array(tf_encoder.ffn, d, f"{name}/ffn") def to_decoder_array(tf_decoder: TransformerDecoder, name: str) -> Dict: d = {} d.update(to_weight_array(tf_decoder.ln1, f"{name}/ln1")) d.update(to_weight_array(tf_decoder.ln2, f"{name}/ln2")) d.update(to_weight_array(tf_decoder.ln3, f"{name}/ln3")) d.update(to_attn_array(tf_decoder.self_attn, f"{name}/self_attn")) d.update(to_attn_array(tf_decoder.src_attn, f"{name}/src_attn")) d.update(to_ffn_array(tf_decoder.ffn, f"{name}/ffn")) return d def to_embed_array(tf_embed: tf.keras.layers.Layer, name: str) -> Dict: d = {} if hasattr(tf_embed, 'pos'): pos_weights = tf.keras.backend.get_value(tf_embed.pos) d.update({f"{name}/pos_weights": pos_weights}) if hasattr(tf_embed, 'bias'): bias = tf.keras.backend.get_value(tf_embed.bias) d.update({f"{name}/bias": bias.squeeze()}) weights = tf.keras.backend.get_value(tf_embed.W) d.update({f"{name}/weights": weights}) return d def from_embed_array(tf_embed: tf.keras.layers.Layer, d: Dict, name: str, bias=None): weights = [d[f"{name}/weights"]] if hasattr(tf_embed, 'pos'): pos_weights = d[f"{name}/pos_weights"] weights = [pos_weights] + weights if hasattr(tf_embed, 'bias') and bias is not None: weights = weights + [bias.reshape(1, -1)] tf_embed.set_weights(weights) def to_gmlp_encoder_stack_array( tf_encoder_stack: GatedMLPEncoderStack, name: str = "GatedMLPEncoderStack" ) -> Dict: d = {} if isinstance(tf_encoder_stack.ln, tf.keras.layers.LayerNormalization): d.update(to_weight_array(tf_encoder_stack.ln, f"{name}/ln")) for i, enc_tf in enumerate(tf_encoder_stack.encoders): d.update(to_gmlp_encoder_array(enc_tf, f"{name}/{i}")) return d def to_encoder_stack_array( tf_encoder_stack: TransformerEncoderStack, name: str = "TransformerEncoderStack" ) -> Dict: d = {} if isinstance(tf_encoder_stack.ln, tf.keras.layers.LayerNormalization): d.update(to_weight_array(tf_encoder_stack.ln, f"{name}/ln")) for i, enc_tf in enumerate(tf_encoder_stack.encoders): d.update(to_encoder_array(enc_tf, f"{name}/{i}")) return d def from_encoder_stack_array(tf_encoder_stack: TransformerEncoderStack, d: Dict, name: str = "TransformerEncoderStack"): if isinstance(tf_encoder_stack.ln, tf.keras.layers.LayerNormalization): from_weight_array(tf_encoder_stack.ln, d, f"{name}/ln") for i, enc_tf in enumerate(tf_encoder_stack.encoders): from_encoder_array(enc_tf, d, f"{name}/{i}") def to_decoder_stack_array( tf_decoder_stack: TransformerDecoderStack, name: str = "TransformerDecoderStack" ) -> Dict: d = {} if isinstance(tf_decoder_stack.ln, tf.keras.layers.LayerNormalization): d.update(to_weight_array(tf_decoder_stack.ln, f"{name}/ln")) for i, dec_tf in enumerate(tf_decoder_stack.decoders): d.update(to_decoder_array(dec_tf, f"{name}/{i}")) return d def from_decoder_array(tf_decoder: TransformerDecoder, d: Dict, name: str): from_weight_array(tf_decoder.ln1, d, f"{name}/ln1") from_weight_array(tf_decoder.ln2, d, f"{name}/ln2") from_weight_array(tf_decoder.ln3, d, f"{name}/ln3") from_attn_array(tf_decoder.src_attn, d, f"{name}/src_attn") from_attn_array(tf_decoder.self_attn, d, f"{name}/self_attn") from_ffn_array(tf_decoder.ffn, d, f"{name}/ffn") def from_decoder_stack_array( tf_decoder_stack: TransformerDecoderStack, d: Dict, name: str = "TransformerDecoderStack" ): if isinstance(tf_decoder_stack.ln, tf.keras.layers.LayerNormalization): from_weight_array(tf_decoder_stack.ln, d, f"{name}/ln") for i, dec_pyt in enumerate(tf_decoder_stack.decoders): from_decoder_array(dec_pyt, d, f"{name}/{i}") def to_tlm_array(tf_tlm: tf.keras.layers.Layer, embeddings_keys: List[str] = None, name: str = "TLM") -> Dict: d = {} transformer = tf_tlm.transformer if hasattr(tf_tlm, 'transformer') else tf_tlm.generator if isinstance(transformer, GatedMLPEncoderStack): d.update(to_gmlp_encoder_stack_array(transformer, name=f"{name}/GatedMLPEncoderStack")) else: d.update(to_encoder_stack_array(transformer, name=f"{name}/TransformerEncoderStack")) keys_to_write = embeddings_keys if embeddings_keys else list(tf_tlm.embeddings.keys()) for embeddings_key in keys_to_write: d.update(to_embed_array(tf_tlm.embeddings[embeddings_key], name=f"{name}/Embeddings/{embeddings_key}")) if hasattr(tf_tlm.embeddings.reduction, 'ln'): d.update(to_weight_array(tf_tlm.embeddings.reduction.ln, name=f"{name}/Embeddings/reduction/ln")) return d def from_tlm_array(tf_tlm: tf.keras.layers.Layer, d: Dict, embeddings_keys: List[str] = None, name: str = "TLM"): transformer = tf_tlm.transformer if hasattr(tf_tlm, 'transformer') else tf_tlm.generator from_encoder_stack_array(transformer, d, name=f"{name}/TransformerEncoderStack") keys_to_restore = embeddings_keys if embeddings_keys else list(tf_tlm.embeddings.keys()) for embeddings_key in keys_to_restore: if isinstance(tf_tlm.embeddings[embeddings_key], LearnedPositionalLookupTableEmbeddingsWithBias): bias = d[f"{name}/Embeddings/tt/weights"][0] from_embed_array(tf_tlm.embeddings[embeddings_key], d, f"{name}/Embeddings/{embeddings_key}", bias) else: from_embed_array(tf_tlm.embeddings[embeddings_key], d, f"{name}/Embeddings/{embeddings_key}") if hasattr(tf_tlm.embeddings.reduction, 'ln'): from_weight_array(tf_tlm.embeddings.reduction.ln, d, f"{name}/Embeddings/reduction/ln") def save_tlm_npz(tf_tlm: tf.keras.layers.Layer, npz: str, embeddings_keys: List[str] = None, name: str = "TLM", verbose: bool = False): d = to_tlm_array(tf_tlm, embeddings_keys, name) if verbose: print(d.keys()) np.savez(npz, **d)
Apache License 2.0
rajammanabrolu/worldgeneration
evennia-engine/evennia/evennia/utils/dbserialize.py
unpack_dbobj
python
def unpack_dbobj(item): _init_globals() try: obj = item[3] and _TO_MODEL_MAP[item[1]].objects.get(id=item[3]) except ObjectDoesNotExist: return None except TypeError: if hasattr(item, "pk"): return item return None if item[1] in _IGNORE_DATETIME_MODELS: return obj else: return _TO_DATESTRING(obj) == item[2] and obj or None
Check and convert internal representations back to Django database models. Args: item (packed_dbobj): The fact that item is a packed dbobj should be checked before this call. Returns: unpacked (any): Either the original input or converts the internal store back to a database representation (its typeclass is returned if applicable).
https://github.com/rajammanabrolu/worldgeneration/blob/5e97df013399e1a401d0a7ec184c4b9eb3100edd/evennia-engine/evennia/evennia/utils/dbserialize.py#L442-L473
from functools import update_wrapper from collections import defaultdict, MutableSequence, MutableSet, MutableMapping from collections import OrderedDict, deque try: from pickle import dumps, loads except ImportError: from pickle import dumps, loads from django.core.exceptions import ObjectDoesNotExist from django.contrib.contenttypes.models import ContentType from django.utils.safestring import SafeString, SafeBytes from evennia.utils.utils import uses_database, is_iter, to_str, to_bytes from evennia.utils import logger __all__ = ("to_pickle", "from_pickle", "do_pickle", "do_unpickle", "dbserialize", "dbunserialize") PICKLE_PROTOCOL = 2 _ERROR_DELETED_ATTR = ( "{cls_name} {obj} has had its root Attribute deleted. " "It must be cast to a {non_saver_name} before it can be modified further." ) def _get_mysql_db_version(): from django.db import connection conn = connection.cursor() conn.execute("SELECT VERSION()") version = conn.fetchone() return version and str(version[0]) or "" _GA = object.__getattribute__ _SA = object.__setattr__ _FROM_MODEL_MAP = None _TO_MODEL_MAP = None _IGNORE_DATETIME_MODELS = None _SESSION_HANDLER = None def _IS_PACKED_DBOBJ(o): return isinstance(o, tuple) and len(o) == 4 and o[0] == "__packed_dbobj__" def _IS_PACKED_SESSION(o): return isinstance(o, tuple) and len(o) == 3 and o[0] == "__packed_session__" if uses_database("mysql") and _get_mysql_db_version() < "5.6.4": _DATESTRING = "%Y:%m:%d-%H:%M:%S:000000" else: _DATESTRING = "%Y:%m:%d-%H:%M:%S:%f" def _TO_DATESTRING(obj): try: return _GA(obj, "db_date_created").strftime(_DATESTRING) except AttributeError: try: obj.save() except AttributeError: return None return _GA(obj, "db_date_created").strftime(_DATESTRING) def _init_globals(): global _FROM_MODEL_MAP, _TO_MODEL_MAP, _SESSION_HANDLER, _IGNORE_DATETIME_MODELS if not _FROM_MODEL_MAP: _FROM_MODEL_MAP = defaultdict(str) _FROM_MODEL_MAP.update(dict((c.model, c.natural_key()) for c in ContentType.objects.all())) if not _TO_MODEL_MAP: from django.conf import settings _TO_MODEL_MAP = defaultdict(str) _TO_MODEL_MAP.update( dict((c.natural_key(), c.model_class()) for c in ContentType.objects.all()) ) _IGNORE_DATETIME_MODELS = [] for src_key, dst_key in settings.ATTRIBUTE_STORED_MODEL_RENAME: _TO_MODEL_MAP[src_key] = _TO_MODEL_MAP.get(dst_key, None) _IGNORE_DATETIME_MODELS.append(src_key) if not _SESSION_HANDLER: from evennia.server.sessionhandler import SESSION_HANDLER as _SESSION_HANDLER def _save(method): def save_wrapper(self, *args, **kwargs): self.__doc__ = method.__doc__ ret = method(self, *args, **kwargs) self._save_tree() return ret return update_wrapper(save_wrapper, method) class _SaverMutable(object): def __init__(self, *args, **kwargs): self._parent = kwargs.pop("_parent", None) self._db_obj = kwargs.pop("_db_obj", None) self._data = None def __bool__(self): return bool(self._data) def _save_tree(self): if self._parent: self._parent._save_tree() elif self._db_obj: if not self._db_obj.pk: cls_name = self.__class__.__name__ try: non_saver_name = cls_name.split("_Saver", 1)[1].lower() except IndexError: non_saver_name = cls_name raise ValueError( _ERROR_DELETED_ATTR.format( cls_name=cls_name, obj=self, non_saver_name=non_saver_name ) ) self._db_obj.value = self else: logger.log_err("_SaverMutable %s has no root Attribute to save to." % self) def _convert_mutables(self, data): def process_tree(item, parent): dtype = type(item) if dtype in (str, int, float, bool, tuple): return item elif dtype == list: dat = _SaverList(_parent=parent) dat._data.extend(process_tree(val, dat) for val in item) return dat elif dtype == dict: dat = _SaverDict(_parent=parent) dat._data.update((key, process_tree(val, dat)) for key, val in item.items()) return dat elif dtype == set: dat = _SaverSet(_parent=parent) dat._data.update(process_tree(val, dat) for val in item) return dat return item return process_tree(data, self) def __repr__(self): return self._data.__repr__() def __len__(self): return self._data.__len__() def __iter__(self): return self._data.__iter__() def __getitem__(self, key): return self._data.__getitem__(key) def __eq__(self, other): return self._data == other def __ne__(self, other): return self._data != other @_save def __setitem__(self, key, value): self._data.__setitem__(key, self._convert_mutables(value)) @_save def __delitem__(self, key): self._data.__delitem__(key) class _SaverList(_SaverMutable, MutableSequence): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._data = list() @_save def __iadd__(self, otherlist): self._data = self._data.__add__(otherlist) return self._data def __add__(self, otherlist): return list(self._data) + otherlist @_save def insert(self, index, value): self._data.insert(index, self._convert_mutables(value)) def __eq__(self, other): try: return list(self._data) == list(other) except TypeError: return False def __ne__(self, other): try: return list(self._data) != list(other) except TypeError: return True def index(self, value, *args): return self._data.index(value, *args) class _SaverDict(_SaverMutable, MutableMapping): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._data = dict() def has_key(self, key): return key in self._data class _SaverSet(_SaverMutable, MutableSet): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._data = set() def __contains__(self, value): return self._data.__contains__(value) @_save def add(self, value): self._data.add(self._convert_mutables(value)) @_save def discard(self, value): self._data.discard(value) class _SaverOrderedDict(_SaverMutable, MutableMapping): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._data = OrderedDict() def has_key(self, key): return key in self._data class _SaverDeque(_SaverMutable): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._data = deque() @_save def append(self, *args, **kwargs): self._data.append(*args, **kwargs) @_save def appendleft(self, *args, **kwargs): self._data.appendleft(*args, **kwargs) @_save def clear(self): self._data.clear() @_save def extendleft(self, *args, **kwargs): self._data.extendleft(*args, **kwargs) def _getmaxlen(self): return self._data.maxlen def _setmaxlen(self, value): self._data.maxlen = value def _delmaxlen(self): del self._data.maxlen maxlen = property(_getmaxlen, _setmaxlen, _delmaxlen) @_save def pop(self, *args, **kwargs): return self._data.pop(*args, **kwargs) @_save def popleft(self, *args, **kwargs): return self._data.popleft(*args, **kwargs) @_save def reverse(self): self._data.reverse() @_save def rotate(self, *args): self._data.rotate(*args) _DESERIALIZE_MAPPING = { _SaverList.__name__: list, _SaverDict.__name__: dict, _SaverSet.__name__: set, _SaverOrderedDict.__name__: OrderedDict, _SaverDeque.__name__: deque, } def deserialize(obj): def _iter(obj): typ = type(obj) tname = typ.__name__ if tname in ("_SaverDict", "dict"): return {_iter(key): _iter(val) for key, val in obj.items()} elif tname in _DESERIALIZE_MAPPING: return _DESERIALIZE_MAPPING[tname](_iter(val) for val in obj) elif is_iter(obj): return typ(_iter(val) for val in obj) return obj return _iter(obj) def pack_dbobj(item): _init_globals() obj = item natural_key = _FROM_MODEL_MAP[ hasattr(obj, "id") and hasattr(obj, "db_date_created") and hasattr(obj, "__dbclass__") and obj.__dbclass__.__name__.lower() ] return ( natural_key and ("__packed_dbobj__", natural_key, _TO_DATESTRING(obj), _GA(obj, "id")) or item )
MIT License
rebiocoder/bioforum
venv/Lib/site-packages/django/contrib/gis/geos/point.py
Point.z
python
def z(self): return self._cs.getOrdinate(2, 0) if self.hasz else None
Return the Z component of the Point.
https://github.com/rebiocoder/bioforum/blob/08c8ff2f07ae667d37ce343f537e878d78ac8fe2/venv/Lib/site-packages/django/contrib/gis/geos/point.py#L128-L130
from ctypes import c_uint from django.contrib.gis import gdal from django.contrib.gis.geos import prototypes as capi from django.contrib.gis.geos.error import GEOSException from django.contrib.gis.geos.geometry import GEOSGeometry class Point(GEOSGeometry): _minlength = 2 _maxlength = 3 has_cs = True def __init__(self, x=None, y=None, z=None, srid=None): if x is None: coords = [] elif isinstance(x, (tuple, list)): coords = x elif isinstance(x, (float, int)) and isinstance(y, (float, int)): if isinstance(z, (float, int)): coords = [x, y, z] else: coords = [x, y] else: raise TypeError('Invalid parameters given for Point initialization.') point = self._create_point(len(coords), coords) super().__init__(point, srid=srid) def _ogr_ptr(self): return gdal.geometries.Point._create_empty() if self.empty else super()._ogr_ptr() @classmethod def _create_empty(cls): return cls._create_point(None, None) @classmethod def _create_point(cls, ndim, coords): if not ndim: return capi.create_point(None) if ndim < 2 or ndim > 3: raise TypeError('Invalid point dimension: %s' % ndim) cs = capi.create_cs(c_uint(1), c_uint(ndim)) i = iter(coords) capi.cs_setx(cs, 0, next(i)) capi.cs_sety(cs, 0, next(i)) if ndim == 3: capi.cs_setz(cs, 0, next(i)) return capi.create_point(cs) def _set_list(self, length, items): ptr = self._create_point(length, items) if ptr: capi.destroy_geom(self.ptr) self._ptr = ptr self._post_init() else: raise GEOSException('Geometry resulting from slice deletion was invalid.') def _set_single(self, index, value): self._cs.setOrdinate(index, 0, value) def __iter__(self): for i in range(len(self)): yield self[i] def __len__(self): if self.empty: return 0 if self.hasz: return 3 else: return 2 def _get_single_external(self, index): if index == 0: return self.x elif index == 1: return self.y elif index == 2: return self.z _get_single_internal = _get_single_external @property def x(self): return self._cs.getOrdinate(0, 0) @x.setter def x(self, value): self._cs.setOrdinate(0, 0, value) @property def y(self): return self._cs.getOrdinate(1, 0) @y.setter def y(self, value): self._cs.setOrdinate(1, 0, value) @property
MIT License
trentm/cmdln
examples/svn.py
MySVN.do_propset
python
def do_propset(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
Set PROPNAME to PROPVAL on files, dirs, or revisions. usage: 1. propset PROPNAME [PROPVAL | -F VALFILE] PATH... 2. propset PROPNAME --revprop -r REV [PROPVAL | -F VALFILE] [URL] 1. Creates a versioned, local propchange in working copy. 2. Creates an unversioned, remote propchange on repos revision. Note: svn recognizes the following special versioned properties but will store any arbitrary properties set: svn:ignore - A newline separated list of file patterns to ignore. svn:keywords - Keywords to be expanded. Valid keywords are: URL, HeadURL - The URL for the head version of the object. Author, LastChangedBy - The last person to modify the file. Date, LastChangedDate - The date/time the object was last modified. Rev, Revision, - The last revision the object changed. LastChangedRevision Id - A compressed summary of the previous 4 keywords. svn:executable - If present, make the file executable. This property cannot be set on a directory. A non-recursive attempt will fail, and a recursive attempt will set the property only on the file children of the directory. svn:eol-style - One of 'native', 'LF', 'CR', 'CRLF'. svn:mime-type - The mimetype of the file. Used to determine whether to merge the file, and how to serve it from Apache. A mimetype beginning with 'text/' (or an absent mimetype) is treated as text. Anything else is treated as binary. svn:externals - A newline separated list of module specifiers, each of which consists of a relative directory path, optional revision flags, and an URL. For example foo http://example.com/repos/zig foo/bar -r 1234 http://example.com/repos/zag ${cmd_option_list}
https://github.com/trentm/cmdln/blob/e3cb9b26f3b5427b3f545e3ae34ee22bac36f31d/examples/svn.py#L868-L907
import sys import cmdln class MySVN(cmdln.Cmdln): name = "svn" def __init__(self, *args, **kwargs): cmdln.Cmdln.__init__(self, *args, **kwargs) cmdln.Cmdln.do_help.aliases.append("h") @cmdln.option("--no-auto-props", action='store_true', help='disable automatic properties') @cmdln.option("--auto-props", action='store_true', help='enable automatic properties') @cmdln.option("--force", action='store_true', help='force operation to run') @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-N", "--non-recursive", action='store_true', help='operate on single directory only') @cmdln.option("--targets", metavar='ARG', help='pass contents of file ARG as additional args') def do_add(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("praise", "annotate", "ann") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("-v", "--verbose", action='store_true', help='print extra information') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') def do_blame(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') def do_cat(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("co") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("-N", "--non-recursive", action='store_true', help='operate on single directory only') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') def do_checkout(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--diff3-cmd", metavar='ARG', help='use ARG as merge command') def do_cleanup(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("ci") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--encoding", metavar='ARG', help='treat value as being in charset encoding ARG') @cmdln.option("--editor-cmd", metavar='ARG', help='use ARG as external editor') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--force-log", action='store_true', help='force validity of log message source') @cmdln.option("--targets", metavar='ARG', help='pass contents of file ARG as additional args') @cmdln.option("-N", "--non-recursive", action='store_true', help='operate on single directory only') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-F", "--file", metavar='ARG', help='read data from file ARG') @cmdln.option("-m", "--message", metavar='ARG', help='specify commit message ARG') def do_commit(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("cp") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--encoding", metavar='ARG', help='treat value as being in charset encoding ARG') @cmdln.option("--editor-cmd", metavar='ARG', help='use ARG as external editor') @cmdln.option("--force-log", action='store_true', help='force validity of log message source') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') @cmdln.option("-F", "--file", metavar='ARG', help='read data from file ARG') @cmdln.option("-m", "--message", metavar='ARG', help='specify commit message ARG') def do_copy(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("del", "remove", "rm") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--encoding", metavar='ARG', help='treat value as being in charset encoding ARG') @cmdln.option("--editor-cmd", metavar='ARG', help='use ARG as external editor') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--targets", metavar='ARG', help='pass contents of file ARG as additional args') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-F", "--file", metavar='ARG', help='read data from file ARG') @cmdln.option("-m", "--message", metavar='ARG', help='specify commit message ARG') @cmdln.option("--force-log", action='store_true', help='force validity of log message source') @cmdln.option("--force", action='store_true', help='force operation to run') def do_delete(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("di") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--notice-ancestry", action='store_true', help='notice ancestry when calculating differences') @cmdln.option("--no-diff-deleted", action='store_true', help='do not print differences for deleted files') @cmdln.option("--diff-cmd", metavar='ARG', help='use ARG as diff command') @cmdln.option("-N", "--non-recursive", action='store_true', help='operate on single directory only') @cmdln.option("-x", "--extensions", metavar='ARG', help='pass ARG as bundled options to GNU diff') @cmdln.option("--new", metavar='ARG', help='use ARG as the newer target') @cmdln.option("--old", metavar='ARG', help='use ARG as the older target') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') def do_diff(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.option("--native-eol", metavar='ARG', help="use a different EOL marker than the standard\nsystem marker for files with a native svn:eol-style\nproperty. ARG may be one of 'LF', 'CR', 'CRLF'") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--force", action='store_true', help='force operation to run') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') def do_export(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.option("--no-auto-props", action='store_true', help='disable automatic properties') @cmdln.option("--auto-props", action='store_true', help='enable automatic properties') @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--encoding", metavar='ARG', help='treat value as being in charset encoding ARG') @cmdln.option("--editor-cmd", metavar='ARG', help='use ARG as external editor') @cmdln.option("--force-log", action='store_true', help='force validity of log message source') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("-N", "--non-recursive", action='store_true', help='operate on single directory only') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-F", "--file", metavar='ARG', help='read data from file ARG') @cmdln.option("-m", "--message", metavar='ARG', help='specify commit message ARG') def do_import(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("-R", "--recursive", action='store_true', help='descend recursively') @cmdln.option("--targets", metavar='ARG', help='pass contents of file ARG as additional args') def do_info(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("ls") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("-R", "--recursive", action='store_true', help='descend recursively') @cmdln.option("-v", "--verbose", action='store_true', help='print extra information') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') def do_list(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--xml", action='store_true', help='output in XML') @cmdln.option("--incremental", action='store_true', help='give output suitable for concatenation') @cmdln.option("--stop-on-copy", action='store_true', help='do not cross copies while traversing history') @cmdln.option("--targets", metavar='ARG', help='pass contents of file ARG as additional args') @cmdln.option("-v", "--verbose", action='store_true', help='print extra information') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') def do_log(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--ignore-ancestry", action='store_true', help='ignore ancestry when calculating merges') @cmdln.option("--diff3-cmd", metavar='ARG', help='use ARG as merge command') @cmdln.option("--dry-run", action='store_true', help='try operation but make no changes') @cmdln.option("--force", action='store_true', help='force operation to run') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-N", "--non-recursive", action='store_true', help='operate on single directory only') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') def do_merge(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--force-log", action='store_true', help='force validity of log message source') @cmdln.option("--encoding", metavar='ARG', help='treat value as being in charset encoding ARG') @cmdln.option("--editor-cmd", metavar='ARG', help='use ARG as external editor') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-F", "--file", metavar='ARG', help='read data from file ARG') @cmdln.option("-m", "--message", metavar='ARG', help='specify commit message ARG') def do_mkdir(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("mv", "rename", "ren") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--force-log", action='store_true', help='force validity of log message source') @cmdln.option("--encoding", metavar='ARG', help='treat value as being in charset encoding ARG') @cmdln.option("--editor-cmd", metavar='ARG', help='use ARG as external editor') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--force", action='store_true', help='force operation to run') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') @cmdln.option("-F", "--file", metavar='ARG', help='read data from file ARG') @cmdln.option("-m", "--message", metavar='ARG', help='specify commit message ARG') def do_move(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("pdel", "pd") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--revprop", action='store_true', help='operate on a revision property (use with -r)') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') @cmdln.option("-R", "--recursive", action='store_true', help='descend recursively') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') def do_propdel(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("pedit", "pe") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--force", action='store_true', help='force operation to run') @cmdln.option("--editor-cmd", metavar='ARG', help='use ARG as external editor') @cmdln.option("--encoding", metavar='ARG', help='treat value as being in charset encoding ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--revprop", action='store_true', help='operate on a revision property (use with -r)') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') def do_propedit(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("pget", "pg") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--strict", action='store_true', help='use strict semantics') @cmdln.option("--revprop", action='store_true', help='operate on a revision property (use with -r)') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') @cmdln.option("-R", "--recursive", action='store_true', help='descend recursively') def do_propget(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("plist", "pl") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--revprop", action='store_true', help='operate on a revision property (use with -r)') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') @cmdln.option("-R", "--recursive", action='store_true', help='descend recursively') @cmdln.option("-v", "--verbose", action='store_true', help='print extra information') def do_proplist(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args) @cmdln.alias("pset", "ps") @cmdln.option("--config-dir", metavar='ARG', help='read user configuration files from directory ARG') @cmdln.option("--force", action='store_true', help='force operation to run') @cmdln.option("--encoding", metavar='ARG', help='treat value as being in charset encoding ARG') @cmdln.option("--non-interactive", action='store_true', help='do no interactive prompting') @cmdln.option("--no-auth-cache", action='store_true', help='do not cache authentication tokens') @cmdln.option("--password", metavar='ARG', help='specify a password ARG') @cmdln.option("--username", metavar='ARG', help='specify a username ARG') @cmdln.option("--revprop", action='store_true', help='operate on a revision property (use with -r)') @cmdln.option("-R", "--recursive", action='store_true', help='descend recursively') @cmdln.option("--targets", metavar='ARG', help='pass contents of file ARG as additional args') @cmdln.option("-r", "--revision", metavar='ARG', help='ARG (some commands also take ARG1:ARG2 range)\nA revision argument can be one of:\n NUMBER revision number\n "{" DATE "}" revision at start of the date\n "HEAD" latest in repository\n "BASE" base rev of item\'s working copy\n "COMMITTED" last commit at or before BASE\n "PREV" revision just before COMMITTED') @cmdln.option("-q", "--quiet", action='store_true', help='print as little as possible') @cmdln.option("-F", "--file", metavar='ARG', help='read data from file ARG')
MIT License
qiskit-partners/mthree
mthree/mitigation.py
M3Mitigation.readout_fidelity
python
def readout_fidelity(self, qubits=None): if self.single_qubit_cals is None: raise M3Error('Mitigator is not calibrated') if qubits is None: qubits = range(self.num_qubits) else: outliers = [kk for kk in qubits if kk >= self.num_qubits] if any(outliers): raise M3Error('One or more qubit indices out of range: {}'.format(outliers)) fids = [] for kk in qubits: qubit = self.single_qubit_cals[kk] if qubit is not None: fids.append(np.mean(qubit.diagonal())) else: fids.append(None) return fids
Compute readout fidelity for calibrated qubits. Parameters: qubits (array_like): Qubits to compute over, default is all. Returns: list: List of qubit fidelities. Raises: M3Error: Mitigator is not calibrated. M3Error: Qubit indices out of range.
https://github.com/qiskit-partners/mthree/blob/a24b06da2b6c6c8e86de2456e1e0d9b3f4b0b6e8/mthree/mitigation.py#L649-L678
import warnings from time import perf_counter import psutil import numpy as np import scipy.linalg as la import scipy.sparse.linalg as spla import orjson from qiskit import QuantumCircuit, transpile, execute from qiskit.providers import BaseBackend from mthree.matrix import _reduced_cal_matrix, sdd_check from mthree.utils import counts_to_vector, vector_to_quasiprobs from mthree.norms import ainv_onenorm_est_lu, ainv_onenorm_est_iter from mthree.matvec import M3MatVec from mthree.exceptions import M3Error from mthree.classes import QuasiCollection def _tensor_meas_states(qubit, num_qubits, initial_reset=False): qc0 = QuantumCircuit(num_qubits, 1) if initial_reset: qc0.reset(qubit) qc0.measure(qubit, 0) qc1 = QuantumCircuit(num_qubits, 1) if initial_reset: qc1.reset(qubit) qc1.x(qubit) qc1.measure(qubit, 0) return [qc0, qc1] def _marg_meas_states(num_qubits, initial_reset=False): qc0 = QuantumCircuit(num_qubits) if initial_reset: qc0.reset(range(num_qubits)) qc0.measure_all() qc1 = QuantumCircuit(num_qubits) if initial_reset: qc1.reset(range(num_qubits)) qc1.x(range(num_qubits)) qc1.measure_all() return [qc0, qc1] def _balanced_cal_strings(num_qubits): strings = [] for rep in range(1, num_qubits+1): str1 = '' str2 = '' for jj in range(int(np.ceil(num_qubits / rep))): str1 += str(jj % 2) * rep str2 += str((jj+1) % 2) * rep strings.append(str1[:num_qubits]) strings.append(str2[:num_qubits]) return strings def _balanced_cal_circuits(cal_strings, initial_reset=False): num_qubits = len(cal_strings[0]) circs = [] for string in cal_strings: qc = QuantumCircuit(num_qubits) if initial_reset: qc.reset(range(num_qubits)) for idx, bit in enumerate(string[::-1]): if bit == '1': qc.x(idx) qc.measure_all() circs.append(qc) return circs class M3Mitigation(): def __init__(self, system=None, iter_threshold=4096): self.system = system self.single_qubit_cals = None self.num_qubits = system.configuration().num_qubits if system else None self.iter_threshold = iter_threshold self.cal_shots = None self.cal_method = 'balanced' self.rep_delay = None def _form_cals(self, qubits): qubits = np.asarray(qubits, dtype=int) cals = np.zeros(4*qubits.shape[0], dtype=float) for kk, qubit in enumerate(qubits[::-1]): cals[4*kk:4*kk+4] = self.single_qubit_cals[qubit].ravel() return cals def _check_sdd(self, counts, qubits, distance=None): num_bits = len(qubits) if distance is None: distance = num_bits bitstring_len = len(next(iter(counts))) if bitstring_len != num_bits: raise M3Error('Bitstring length ({}) does not match'.format(bitstring_len) + ' number of qubits ({})'.format(num_bits)) cals = self._form_cals(qubits) return sdd_check(counts, cals, num_bits, distance) def tensored_cals_from_system(self, qubits=None, shots=8192, method='balanced', rep_delay=None, cals_file=None): warnings.warn("This method is deprecated, use 'cals_from_system' instead.") self.cals_from_system(qubits=qubits, shots=shots, method=method, rep_delay=rep_delay, cals_file=cals_file) def cals_from_system(self, qubits=None, shots=8192, method='balanced', initial_reset=False, rep_delay=None, cals_file=None): if qubits is None: qubits = range(self.num_qubits) self.cal_method = method self.rep_delay = rep_delay self._grab_additional_cals(qubits, shots=shots, method=method, rep_delay=rep_delay, initial_reset=initial_reset) if cals_file: with open(cals_file, 'wb') as fd: fd.write(orjson.dumps(self.single_qubit_cals, option=orjson.OPT_SERIALIZE_NUMPY)) def cals_from_file(self, cals_file): with open(cals_file, 'r', encoding='utf-8') as fd: self.single_qubit_cals = [np.asarray(cal) if cal else None for cal in orjson.loads(fd.read())] def tensored_cals_from_file(self, cals_file): warnings.warn("This method is deprecated, use 'cals_from_file' instead.") self.cals_from_file(cals_file) def _grab_additional_cals(self, qubits, shots=8192, method='balanced', rep_delay=None, initial_reset=False): if self.system is None: raise M3Error("System is not set. Use 'cals_from_file'.") if self.single_qubit_cals is None: self.single_qubit_cals = [None]*self.num_qubits if self.cal_shots is None: self.cal_shots = shots if self.rep_delay is None: self.rep_delay = rep_delay if method not in ['independent', 'balanced', 'marginal']: raise M3Error('Invalid calibration method.') if isinstance(qubits, dict): qubits = list(qubits) elif isinstance(qubits, list): if isinstance(qubits[0], dict): _qubits = [] for item in qubits: _qubits.extend(list(item)) qubits = list(set(_qubits)) num_cal_qubits = len(qubits) if method == 'marginal': circs = _marg_meas_states(num_cal_qubits, initial_reset=initial_reset) trans_qcs = transpile(circs, self.system, initial_layout=qubits, optimization_level=0) elif method == 'balanced': cal_strings = _balanced_cal_strings(num_cal_qubits) circs = _balanced_cal_circuits(cal_strings, initial_reset=initial_reset) trans_qcs = transpile(circs, self.system, initial_layout=qubits, optimization_level=0) else: circs = [] for kk in qubits: circs.extend(_tensor_meas_states(kk, self.num_qubits, initial_reset=initial_reset)) trans_qcs = transpile(circs, self.system, optimization_level=0) if isinstance(self.system, BaseBackend): job = execute(trans_qcs, self.system, optimization_level=0, shots=self.cal_shots, rep_delay=self.rep_delay) else: job = self.system.run(trans_qcs, shots=self.cal_shots, rep_delay=self.rep_delay) counts = job.result().get_counts() bad_list = [] if method == 'independent': for idx, qubit in enumerate(qubits): self.single_qubit_cals[qubit] = np.zeros((2, 2), dtype=float) prep0_counts = counts[2*idx] P10 = prep0_counts.get('1', 0) / self.cal_shots P00 = 1-P10 self.single_qubit_cals[qubit][:, 0] = [P00, P10] prep1_counts = counts[2*idx+1] P01 = prep1_counts.get('0', 0) / self.cal_shots P11 = 1-P01 self.single_qubit_cals[qubit][:, 1] = [P01, P11] if P01 >= P00: bad_list.append(qubit) elif method == 'marginal': prep0_counts = counts[0] prep1_counts = counts[1] for idx, qubit in enumerate(qubits): self.single_qubit_cals[qubit] = np.zeros((2, 2), dtype=float) count_vals = 0 index = num_cal_qubits-idx-1 for key, val in prep0_counts.items(): if key[index] == '0': count_vals += val P00 = count_vals / self.cal_shots P10 = 1-P00 self.single_qubit_cals[qubit][:, 0] = [P00, P10] count_vals = 0 for key, val in prep1_counts.items(): if key[index] == '1': count_vals += val P11 = count_vals / self.cal_shots P01 = 1-P11 self.single_qubit_cals[qubit][:, 1] = [P01, P11] if P01 >= P00: bad_list.append(qubit) else: cals = [np.zeros((2, 2), dtype=float) for kk in range(num_cal_qubits)] for idx, count in enumerate(counts): target = cal_strings[idx][::-1] good_prep = np.zeros(num_cal_qubits, dtype=float) denom = self.cal_shots * num_cal_qubits for key, val in count.items(): key = key[::-1] for kk in range(num_cal_qubits): if key[kk] == target[kk]: good_prep[kk] += val for kk, cal in enumerate(cals): if target[kk] == '0': cal[0, 0] += good_prep[kk] / denom else: cal[1, 1] += good_prep[kk] / denom for jj, cal in enumerate(cals): cal[1, 0] = 1.0 - cal[0, 0] cal[0, 1] = 1.0 - cal[1, 1] if cal[0, 1] >= cal[0, 0]: bad_list.append(qubits[jj]) for idx, cal in enumerate(cals): self.single_qubit_cals[qubits[idx]] = cal if any(bad_list): raise M3Error('Faulty qubits detected: {}'.format(bad_list)) def apply_correction(self, counts, qubits, distance=None, method='auto', max_iter=25, tol=1e-5, return_mitigation_overhead=False, details=False): given_list = False if isinstance(counts, (list, np.ndarray)): given_list = True if not given_list: counts = [counts] if isinstance(qubits, dict): qubits = [list(qubits)] elif not any(isinstance(qq, (list, tuple, np.ndarray, dict)) for qq in qubits): qubits = [qubits]*len(counts) else: if isinstance(qubits[0], dict): qubits = [list(qu) for qu in qubits] if len(qubits) != len(counts): raise M3Error('Length of counts does not match length of qubits.') quasi_out = [] for idx, cnts in enumerate(counts): quasi_out.append( self._apply_correction(cnts, qubits=qubits[idx], distance=distance, method=method, max_iter=max_iter, tol=tol, return_mitigation_overhead=return_mitigation_overhead, details=details) ) if not given_list: return quasi_out[0] return QuasiCollection(quasi_out) def _apply_correction(self, counts, qubits, distance=None, method='auto', max_iter=25, tol=1e-5, return_mitigation_overhead=False, details=False): counts = dict(counts) shots = sum(counts.values()) num_bits = len(qubits) num_elems = len(counts) if distance is None: distance = num_bits bitstring_len = len(next(iter(counts))) if bitstring_len != num_bits: raise M3Error('Bitstring length ({}) does not match'.format(bitstring_len) + ' number of qubits ({})'.format(num_bits)) if self.single_qubit_cals is None: warnings.warn('No calibration data. Calibrating: {}'.format(qubits)) self._grab_additional_cals(qubits, method=self.cal_method) missing_qubits = [qq for qq in qubits if self.single_qubit_cals[qq] is None] if any(missing_qubits): warnings.warn('Computing missing calibrations for qubits: {}'.format(missing_qubits)) self._grab_additional_cals(missing_qubits, method=self.cal_method) if method == 'auto': current_free_mem = psutil.virtual_memory().available / 1024**3 if num_elems <= self.iter_threshold and ((num_elems**2+num_elems)*8/1024**3 < current_free_mem/2): method = 'direct' else: method = 'iterative' if method == 'direct': st = perf_counter() mit_counts, col_norms, gamma = self._direct_solver(counts, qubits, distance, return_mitigation_overhead) dur = perf_counter()-st mit_counts.shots = shots if gamma is not None: mit_counts.mitigation_overhead = gamma * gamma if details: info = {'method': 'direct', 'time': dur, 'dimension': num_elems} info['col_norms'] = col_norms return mit_counts, info return mit_counts elif method == 'iterative': iter_count = np.zeros(1, dtype=int) def callback(_): iter_count[0] += 1 if details: st = perf_counter() mit_counts, col_norms, gamma = self._matvec_solver(counts, qubits, distance, tol, max_iter, 1, callback, return_mitigation_overhead) dur = perf_counter()-st mit_counts.shots = shots if gamma is not None: mit_counts.mitigation_overhead = gamma * gamma info = {'method': 'iterative', 'time': dur, 'dimension': num_elems} info['iterations'] = iter_count[0] info['col_norms'] = col_norms return mit_counts, info mit_counts, gamma = self._matvec_solver(counts, qubits, distance, tol, max_iter, 0, None, return_mitigation_overhead) mit_counts.shots = shots if gamma is not None: mit_counts.mitigation_overhead = gamma * gamma return mit_counts else: raise M3Error('Invalid method: {}'.format(method)) def reduced_cal_matrix(self, counts, qubits, distance=None): counts = dict(counts) num_bits = len(qubits) if distance is None: distance = num_bits bitstring_len = len(next(iter(counts))) if bitstring_len != num_bits: raise M3Error('Bitstring length ({}) does not match'.format(bitstring_len) + ' number of qubits ({})'.format(num_bits)) cals = self._form_cals(qubits) A, counts, _ = _reduced_cal_matrix(counts, cals, num_bits, distance) return A, counts def _direct_solver(self, counts, qubits, distance=None, return_mitigation_overhead=False): cals = self._form_cals(qubits) num_bits = len(qubits) A, sorted_counts, col_norms = _reduced_cal_matrix(counts, cals, num_bits, distance) vec = counts_to_vector(sorted_counts) LU = la.lu_factor(A, check_finite=False) x = la.lu_solve(LU, vec, check_finite=False) gamma = None if return_mitigation_overhead: gamma = ainv_onenorm_est_lu(A, LU) out = vector_to_quasiprobs(x, sorted_counts) return out, col_norms, gamma def _matvec_solver(self, counts, qubits, distance, tol=1e-5, max_iter=25, details=0, callback=None, return_mitigation_overhead=False): cals = self._form_cals(qubits) M = M3MatVec(dict(counts), cals, distance) L = spla.LinearOperator((M.num_elems, M.num_elems), matvec=M.matvec, rmatvec=M.rmatvec) diags = M.get_diagonal() def precond_matvec(x): out = x / diags return out P = spla.LinearOperator((M.num_elems, M.num_elems), precond_matvec) vec = counts_to_vector(M.sorted_counts) out, error = spla.gmres(L, vec, tol=tol, atol=tol, maxiter=max_iter, M=P, callback=callback) if error: raise M3Error('GMRES did not converge: {}'.format(error)) gamma = None if return_mitigation_overhead: gamma = ainv_onenorm_est_iter(M, tol=tol, max_iter=max_iter) quasi = vector_to_quasiprobs(out, M.sorted_counts) if details: return quasi, M.get_col_norms(), gamma return quasi, gamma
Apache License 2.0
molssi/qcelemental
qcelemental/util/gph_uno_bipartite.py
_formDirected
python
def _formDirected(g, match): import networkx as nx d = nx.DiGraph() for ee in g.edges(): if ee in match or (ee[1], ee[0]) in match: if g.nodes[ee[0]]["bipartite"] == 0: d.add_edge(ee[0], ee[1]) else: d.add_edge(ee[1], ee[0]) else: if g.nodes[ee[0]]["bipartite"] == 0: d.add_edge(ee[1], ee[0]) else: d.add_edge(ee[0], ee[1]) return d
r"""Form directed graph D from G and matching M. Parameters ---------- g : Undirected bipartite graph. Nodes are separated by their 'bipartite' attribute. match : List of edges forming a matching of `g`. Returns ------- networkx.DiGraph Directed graph, with edges in `match` pointing from set-0 (bipartite attribute==0) to set-1 (bipartite attrbiute==1), and the other edges in `g` but not in `match` pointing from set-1 to set-0.
https://github.com/molssi/qcelemental/blob/a5885ee4421bd1e411f08c23653ff949036e88c2/qcelemental/util/gph_uno_bipartite.py#L34-L70
import numpy as np
BSD 3-Clause New or Revised License
hack4impact-upenn/idle-free-philly
app/main/messaging.py
set_cookie
python
def set_cookie(resp, key, val, expiration=1): expires = datetime.utcnow() + timedelta(hours=expiration) expires_str = expires.strftime('%a, %d %b %Y %H:%M:%S GMT') resp.set_cookie(key, value=val, expires=expires_str)
Sets a expiring cookie in the response.
https://github.com/hack4impact-upenn/idle-free-philly/blob/42c8362eff19f2b131c31d83db4ac0cb665f5e12/app/main/messaging.py#L389-L393
import string import itertools from flask import request, make_response, current_app from flask.ext.rq import get_queue from . import main from .. import db from ..utils import ( geocode, upload_image, attach_image_to_incident_report, url_for_external ) from ..models import Agency, IncidentReport, Location, User from ..reports.forms import IncidentReportForm from datetime import datetime, timedelta import twilio.twiml from twilio.rest import TwilioRestClient STEP_INIT = 0 STEP_LOCATION = 1 STEP_AGENCY = 2 STEP_OTHER_AGENCY = 3 STEP_LICENSE_PLATE = 4 STEP_VEHICLE_ID = 5 STEP_DURATION = 6 STEP_DESCRIPTION = 7 STEP_PICTURE = 8 @main.route('/report_incident', methods=['GET']) def handle_message(): body = str(request.values.get('Body')).lower().strip() num_media = int(request.values.get('NumMedia')) twilio_hosted_media_url = str(request.values.get('MediaUrl0')) if num_media > 0 else None message_sid = str(request.values.get('MessageSid')) phone_number = str(request.values.get('From')) twiml = twilio.twiml.Response() step = int(request.cookies.get('messagecount', 0)) vehicle_id = str(request.cookies.get('vehicle_id', '')) agency_name = str(request.cookies.get('agency_name', '')) license_plate = str(request.cookies.get('license_plate', '')) duration = int(request.cookies.get('duration', 0)) description = str(request.cookies.get('description', '')) location = str(request.cookies.get('location', '')) picture_url = str(request.cookies.get('picture_url', '')) if 'report' == body.lower(): vehicle_id = '' agency_name = '' license_plate = '' duration = 0 description = '' location = '' picture_url = '' step = handle_start_report(twiml) elif step == STEP_LOCATION: location, step = handle_location_step(body, step, twiml) elif step == STEP_AGENCY: agency_name, step = handle_agency_step(body, step, twiml) elif step == STEP_OTHER_AGENCY: agency_name, step = handle_other_agency_step(body, step, twiml) elif step == STEP_LICENSE_PLATE: license_plate, step = handle_license_plate_step(body, step, twiml) elif step == STEP_VEHICLE_ID: vehicle_id, step = handle_vehicle_id_step(body, step, twiml) elif step == STEP_DURATION: duration, step = handle_duration_step(body, step, twiml) elif step == STEP_DESCRIPTION: description, step = handle_description_step(body, step, twiml) elif step == STEP_PICTURE: step, image_job_id = handle_picture_step(step, message_sid, twilio_hosted_media_url) new_incident = handle_create_report(agency_name, description, duration, license_plate, location, picture_url, vehicle_id, phone_number) twiml.message('Thanks! See your report on the map at {}' .format(url_for_external('main.index'))) if new_incident.user is None: twiml.message('Want to keep track of all your reports? Create an ' 'account at {}' .format(url_for_external('account.register'))) else: twiml.message('See all your reports at {}' .format(url_for_external('reports.view_my_reports'))) if image_job_id is not None: get_queue().enqueue( attach_image_to_incident_report, depends_on=image_job_id, incident_report=new_incident, image_job_id=image_job_id, ) step = STEP_INIT else: twiml.message('Welcome to {}! Please reply "report" to report an ' 'idling incident.' .format(current_app.config['APP_NAME'])) response = make_response(str(twiml)) set_cookie(response, 'messagecount', str(step)) set_cookie(response, 'agency_name', agency_name) set_cookie(response, 'vehicle_id', vehicle_id) set_cookie(response, 'license_plate', license_plate) set_cookie(response, 'duration', str(duration)) set_cookie(response, 'description', description) set_cookie(response, 'location', location) set_cookie(response, 'picture_url', picture_url) return response def handle_create_report(agency_name, description, duration, license_plate, location, picture_url, vehicle_id, phone_number): lat, lon = geocode(location) agency = Agency.get_agency_by_name(agency_name) if agency is None: agency = Agency(name=agency_name, is_official=False, is_public=True) db.session.add(agency) db.session.commit() new_incident = IncidentReport( agency=agency, vehicle_id=vehicle_id, license_plate=license_plate if license_plate else None, duration=timedelta(minutes=duration), description=description, location=Location( latitude=lat, longitude=lon, original_user_text=location ), picture_url=picture_url if picture_url else None, user=User.query.filter_by(phone_number=phone_number).first() ) db.session.add(new_incident) db.session.commit() return new_incident def handle_start_report(twiml): step = STEP_LOCATION twiml.message('What is your location? Be specific! (e.g. "34th and ' 'Spruce in Philadelphia PA")') return step def handle_location_step(body, step, twiml): validator_form = IncidentReportForm() errors = data_errors(form=validator_form, field=validator_form.location, data=body) if len(errors) == 0: location = body step = STEP_AGENCY agencies = Agency.query.filter_by(is_official=True).order_by( Agency.name).all() letters = all_strings(len(agencies) + 1) agencies_listed = get_agencies_listed(agencies, letters) twiml.message('Which agency operates the vehicle you see idling? ' 'Select from the following list or enter {} for Other.' .format(letters[-1])) twiml.message(agencies_listed) else: location = '' reply_with_errors(errors, twiml, 'location') return location, step def handle_agency_step(body_upper, step, twiml): body_upper = body_upper.upper() agencies = Agency.query.filter_by(is_official=True).order_by( Agency.name).all() letters = all_strings(len(agencies) + 1) letters_to_agency = dict(zip(letters, agencies)) if body_upper == letters[-1]: step = STEP_OTHER_AGENCY agency_name = '' twiml.message('You selected Other. What is the name of the agency ' 'which operates the vehicle?') elif body_upper in letters_to_agency.keys(): step = STEP_LICENSE_PLATE agency_name = letters_to_agency[body_upper].name twiml.message('What is the license plate number? Reply "no" to skip. ' '(e.g. MG1234E)') else: agency_name = '' twiml.message('Please enter a letter A through {}.' .format(letters[-1])) return agency_name, step def handle_other_agency_step(body, step, twiml): agency_name = body step = STEP_LICENSE_PLATE twiml.message('What is the license plate number? Reply "no" to skip. ' '(e.g. MG1234E)') return agency_name, step def handle_license_plate_step(body, step, twiml): if body.lower() == 'no': body = '' validator_form = IncidentReportForm() errors = data_errors( form=validator_form, field=validator_form.license_plate, data=body ) if len(errors) == 0: license_plate = body step = STEP_VEHICLE_ID twiml.message('What is the Vehicle ID? This is usually on the back or ' 'side of the vehicle. (e.g. 105014)') else: license_plate = '' reply_with_errors(errors, twiml, 'license plate') return license_plate, step def handle_vehicle_id_step(body, step, twiml): validator_form = IncidentReportForm() errors = data_errors(form=validator_form, field=validator_form.vehicle_id, data=body) if len(errors) == 0: vehicle_id = body step = STEP_DURATION twiml.message('How many minutes have you observed the vehicle idling? ' '(e.g. 10)') else: vehicle_id = '' reply_with_errors(errors, twiml, 'vehicle ID') return vehicle_id, step def handle_duration_step(body, step, twiml): errors = [] try: body = int(body) except ValueError: errors.append('Please enter a valid integer.') if len(errors) == 0: duration = body step = STEP_DESCRIPTION twiml.message('Please describe the situation (e.g. The driver is ' 'sleeping)') else: duration = 0 reply_with_errors(errors, twiml, 'duration') return duration, step def handle_description_step(body, step, twiml): validator_form = IncidentReportForm() errors = data_errors(form=validator_form, field=validator_form.description, data=body) if len(errors) == 0: description = body step = STEP_PICTURE twiml.message('Last, can you take a photo of the vehicle and text ' 'it back? Reply "no" to skip.') else: description = '' reply_with_errors(errors, twiml, 'description') return description, step def handle_picture_step(step, message_sid, twilio_hosted_media_url): image_job_id = None if twilio_hosted_media_url is not None: account_sid = current_app.config['TWILIO_ACCOUNT_SID'] auth_token = current_app.config['TWILIO_AUTH_TOKEN'] image_job_id = get_queue().enqueue( upload_image, imgur_client_id=current_app.config['IMGUR_CLIENT_ID'], imgur_client_secret=current_app.config['IMGUR_CLIENT_SECRET'], app_name=current_app.config['APP_NAME'], image_url=twilio_hosted_media_url ).id get_queue().enqueue( delete_mms, depends_on=image_job_id, account_sid=account_sid, auth_token=auth_token, message_sid=message_sid ) return step, image_job_id def reply_with_errors(errors, twiml, field_name): twiml.message('Sorry, there were some errors with your response. ' 'Please enter the {} again.'.format(field_name)) twiml.message('Errors:\n{}'.format('\n'.join(errors))) def delete_mms(account_sid, auth_token, message_sid): client = TwilioRestClient(account_sid, auth_token) for media in client.messages.get(message_sid).media_list.list(): media.delete() def get_agencies_listed(agencies, letters): agencies_listed = '\n'.join( '{}: {}'.format(l, ag.name) for l, ag in zip(letters, agencies) ) agencies_listed += '\n{}: Other'.format(letters[-1]) return agencies_listed def all_strings(max_count): repeat_size = 1 count = 0 ret = [] while True: for s in itertools.product(string.ascii_uppercase, repeat=repeat_size): count += 1 if count > max_count: return ret ret.append(''.join(s)) repeat_size += 1
MIT License
influxdata/influxdb-python
influxdb/influxdb08/client.py
InfluxDBClient.create_scheduled_delete
python
def create_scheduled_delete(self, json_body): raise NotImplementedError()
Create schedule delete from database. 2013-11-08: This endpoint has not been implemented yet in ver0.0.8, but it is documented in http://influxdb.org/docs/api/http.html. See also: src/api/http/api.go:l57
https://github.com/influxdata/influxdb-python/blob/7cb565698c88bfbf9f4804650231bd28d09e2e6d/influxdb/influxdb08/client.py#L378-L386
import warnings import json import socket import requests import requests.exceptions from six.moves import xrange from six.moves.urllib.parse import urlparse from influxdb import chunked_json session = requests.Session() class InfluxDBClientError(Exception): def __init__(self, content, code=-1): super(InfluxDBClientError, self).__init__( "{0}: {1}".format(code, content)) self.content = content self.code = code class InfluxDBClient(object): def __init__(self, host='localhost', port=8086, username='root', password='root', database=None, ssl=False, verify_ssl=False, timeout=None, retries=3, use_udp=False, udp_port=4444): self._host = host self._port = port self._username = username self._password = password self._database = database self._timeout = timeout self._retries = retries self._verify_ssl = verify_ssl self._use_udp = use_udp self._udp_port = udp_port if use_udp: self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self._scheme = "http" if ssl is True: self._scheme = "https" self._baseurl = "{0}://{1}:{2}".format( self._scheme, self._host, self._port) self._headers = { 'Content-type': 'application/json', 'Accept': 'text/plain'} @staticmethod def from_dsn(dsn, **kwargs): init_args = {} conn_params = urlparse(dsn) scheme_info = conn_params.scheme.split('+') if len(scheme_info) == 1: scheme = scheme_info[0] modifier = None else: modifier, scheme = scheme_info if scheme != 'influxdb': raise ValueError('Unknown scheme "{0}".'.format(scheme)) if modifier: if modifier == 'udp': init_args['use_udp'] = True elif modifier == 'https': init_args['ssl'] = True else: raise ValueError('Unknown modifier "{0}".'.format(modifier)) if conn_params.hostname: init_args['host'] = conn_params.hostname if conn_params.port: init_args['port'] = conn_params.port if conn_params.username: init_args['username'] = conn_params.username if conn_params.password: init_args['password'] = conn_params.password if conn_params.path and len(conn_params.path) > 1: init_args['database'] = conn_params.path[1:] init_args.update(kwargs) return InfluxDBClient(**init_args) def switch_database(self, database): self._database = database def switch_db(self, database): warnings.warn( "switch_db is deprecated, and will be removed " "in future versions. Please use " "``InfluxDBClient.switch_database(database)`` instead.", FutureWarning) return self.switch_database(database) def switch_user(self, username, password): self._username = username self._password = password def request(self, url, method='GET', params=None, data=None, expected_response_code=200): url = "{0}/{1}".format(self._baseurl, url) if params is None: params = {} auth = { 'u': self._username, 'p': self._password } params.update(auth) if data is not None and not isinstance(data, str): data = json.dumps(data) retry = True _try = 0 while retry: try: response = session.request( method=method, url=url, params=params, data=data, headers=self._headers, verify=self._verify_ssl, timeout=self._timeout ) break except (requests.exceptions.ConnectionError, requests.exceptions.Timeout): _try += 1 if self._retries != 0: retry = _try < self._retries else: raise requests.exceptions.ConnectionError if response.status_code == expected_response_code: return response else: raise InfluxDBClientError(response.content, response.status_code) def write(self, data): self.request( url="write", method='POST', params=None, data=data, expected_response_code=200 ) return True def write_points(self, data, time_precision='s', *args, **kwargs): def list_chunks(data_list, n): for i in xrange(0, len(data_list), n): yield data_list[i:i + n] batch_size = kwargs.get('batch_size') if batch_size and batch_size > 0: for item in data: name = item.get('name') columns = item.get('columns') point_list = item.get('points', []) for batch in list_chunks(point_list, batch_size): item = [{ "points": batch, "name": name, "columns": columns }] self._write_points( data=item, time_precision=time_precision) return True return self._write_points(data=data, time_precision=time_precision) def write_points_with_precision(self, data, time_precision='s'): warnings.warn( "write_points_with_precision is deprecated, and will be removed " "in future versions. Please use " "``InfluxDBClient.write_points(time_precision='..')`` instead.", FutureWarning) return self._write_points(data=data, time_precision=time_precision) def _write_points(self, data, time_precision): if time_precision not in ['s', 'm', 'ms', 'u']: raise Exception( "Invalid time precision is given. (use 's', 'm', 'ms' or 'u')") if self._use_udp and time_precision != 's': raise Exception( "InfluxDB only supports seconds precision for udp writes" ) url = "db/{0}/series".format(self._database) params = { 'time_precision': time_precision } if self._use_udp: self.send_packet(data) else: self.request( url=url, method='POST', params=params, data=data, expected_response_code=200 ) return True def delete_points(self, name): url = "db/{0}/series/{1}".format(self._database, name) self.request( url=url, method='DELETE', expected_response_code=204 ) return True
MIT License
avigad/boole
boole/core/info.py
default_str
python
def default_str(expr): return expr.to_string()
The default printer for expressions. Simply call the to_string method. Arguments: - `expr`: an expression
https://github.com/avigad/boole/blob/2a436c2967dbc968f6a5877c220b9757c3bc17c3/boole/core/info.py#L95-L102
class ExprInfo(object): def __init__(self, name, info): self.name = name self.info = info def __getitem__(self, key): return self.info.__getitem__(key) def __getattr__(self, name): try: return self.info[name] except KeyError: return None def __setitem__(self, key, elt): self.info.__setitem__(key, elt) def __delitem__(self, key): return self.info.__delitem__(key) def __str__(self): return self.name def update(self, info): self.name = info.name for k in info.info: self.info[k] = info.info[k]
Apache License 2.0
meanpug/django-gsheets
gsheets/decorators.py
full_jitter
python
def full_jitter(value): return random.uniform(0, value)
Jitter the value across the full range (0 to value). This corresponds to the "Full Jitter" algorithm specified in the AWS blog's post on the performance of various jitter algorithms. (http://www.awsarchitectureblog.com/2015/03/backoff.html) Args: value: The unadulterated backoff value.
https://github.com/meanpug/django-gsheets/blob/9349995144b9e23fb7179a960f51e3350fc6aac8/gsheets/decorators.py#L90-L100
from __future__ import unicode_literals import functools import operator import logging import random import time import traceback import sys logger = logging.getLogger(__name__) def expo(base=2, factor=1, max_value=None): n = 0 while True: a = factor * base ** n if max_value is None or a < max_value: yield a n += 1 else: yield max_value def fibo(max_value=None): a = 1 b = 1 while True: if max_value is None or a < max_value: yield a a, b = b, a + b else: yield max_value def constant(interval=1): while True: yield interval def random_jitter(value): return value + random.random()
MIT License
prajdabre/yanmtt
transformers/src/transformers/testing_utils.py
get_gpu_count
python
def get_gpu_count(): if is_torch_available(): import torch return torch.cuda.device_count() elif is_tf_available(): import tensorflow as tf return len(tf.config.list_physical_devices("GPU")) else: return 0
Return the number of available gpus (regardless of whether torch or tf is used)
https://github.com/prajdabre/yanmtt/blob/4d329c3bcb81ca432d5947bb4673897086ee7f32/transformers/src/transformers/testing_utils.py#L384-L397
import inspect import logging import os import re import shutil import sys import tempfile import unittest from distutils.util import strtobool from io import StringIO from pathlib import Path from .file_utils import ( is_datasets_available, is_faiss_available, is_flax_available, is_pandas_available, is_scatter_available, is_sentencepiece_available, is_soundfile_availble, is_tf_available, is_tokenizers_available, is_torch_available, is_torch_tpu_available, ) from .integrations import is_optuna_available, is_ray_available SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy" DUMMY_UNKWOWN_IDENTIFIER = "julien-c/dummy-unknown" DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer" def parse_flag_from_env(key, default=False): try: value = os.environ[key] except KeyError: _value = default else: try: _value = strtobool(value) except ValueError: raise ValueError("If set, {} must be yes or no.".format(key)) return _value def parse_int_from_env(key, default=None): try: value = os.environ[key] except KeyError: _value = default else: try: _value = int(value) except ValueError: raise ValueError("If set, {} must be a int.".format(key)) return _value _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) _run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=False) _run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False) _run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False) _run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False) _tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None) def is_pt_tf_cross_test(test_case): if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available(): return unittest.skip("test is PT+TF test")(test_case) else: try: import pytest except ImportError: return test_case else: return pytest.mark.is_pt_tf_cross_test()(test_case) def is_pipeline_test(test_case): if not _run_pipeline_tests: return unittest.skip("test is pipeline test")(test_case) else: try: import pytest except ImportError: return test_case else: return pytest.mark.is_pipeline_test()(test_case) def slow(test_case): if not _run_slow_tests: return unittest.skip("test is slow")(test_case) else: return test_case def custom_tokenizers(test_case): if not _run_custom_tokenizers: return unittest.skip("test of custom tokenizers")(test_case) else: return test_case def require_git_lfs(test_case): if not _run_git_lfs_tests: return unittest.skip("test of git lfs workflow")(test_case) else: return test_case def require_torch(test_case): if not is_torch_available(): return unittest.skip("test requires PyTorch")(test_case) else: return test_case def require_torch_scatter(test_case): if not is_scatter_available(): return unittest.skip("test requires PyTorch scatter")(test_case) else: return test_case def require_tf(test_case): if not is_tf_available(): return unittest.skip("test requires TensorFlow")(test_case) else: return test_case def require_flax(test_case): if not is_flax_available(): test_case = unittest.skip("test requires JAX & Flax")(test_case) return test_case def require_sentencepiece(test_case): if not is_sentencepiece_available(): return unittest.skip("test requires SentencePiece")(test_case) else: return test_case def require_tokenizers(test_case): if not is_tokenizers_available(): return unittest.skip("test requires tokenizers")(test_case) else: return test_case def require_pandas(test_case): if not is_pandas_available(): return unittest.skip("test requires pandas")(test_case) else: return test_case def require_scatter(test_case): if not is_scatter_available(): return unittest.skip("test requires PyTorch Scatter")(test_case) else: return test_case def require_torch_multi_gpu(test_case): if not is_torch_available(): return unittest.skip("test requires PyTorch")(test_case) import torch if torch.cuda.device_count() < 2: return unittest.skip("test requires multiple GPUs")(test_case) else: return test_case def require_torch_non_multi_gpu(test_case): if not is_torch_available(): return unittest.skip("test requires PyTorch")(test_case) import torch if torch.cuda.device_count() > 1: return unittest.skip("test requires 0 or 1 GPU")(test_case) else: return test_case require_torch_non_multi_gpu_but_fix_me = require_torch_non_multi_gpu def require_torch_tpu(test_case): if not is_torch_tpu_available(): return unittest.skip("test requires PyTorch TPU") else: return test_case if is_torch_available(): import torch torch_device = "cuda" if torch.cuda.is_available() else "cpu" else: torch_device = None def require_torch_gpu(test_case): if torch_device != "cuda": return unittest.skip("test requires CUDA")(test_case) else: return test_case def require_datasets(test_case): if not is_datasets_available(): return unittest.skip("test requires `datasets`")(test_case) else: return test_case def require_faiss(test_case): if not is_faiss_available(): return unittest.skip("test requires `faiss`")(test_case) else: return test_case def require_optuna(test_case): if not is_optuna_available(): return unittest.skip("test requires optuna")(test_case) else: return test_case def require_ray(test_case): if not is_ray_available(): return unittest.skip("test requires Ray/tune")(test_case) else: return test_case def require_soundfile(test_case): if not is_soundfile_availble(): return unittest.skip("test requires soundfile")(test_case) else: return test_case
MIT License
trusted-ai/adversarial-robustness-toolbox
art/utils.py
load_cifar10
python
def load_cifar10( raw: bool = False, ) -> DATASET_TYPE: def load_batch(fpath: str) -> Tuple[np.ndarray, np.ndarray]: with open(fpath, "rb") as file_: if sys.version_info < (3,): content = six.moves.cPickle.load(file_) else: content = six.moves.cPickle.load(file_, encoding="bytes") content_decoded = {} for key, value in content.items(): content_decoded[key.decode("utf8")] = value content = content_decoded data = content["data"] labels = content["labels"] data = data.reshape(data.shape[0], 3, 32, 32) return data, labels path = get_file( "cifar-10-batches-py", extract=True, path=config.ART_DATA_PATH, url="https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz", ) num_train_samples = 50000 x_train = np.zeros((num_train_samples, 3, 32, 32), dtype=np.uint8) y_train = np.zeros((num_train_samples,), dtype=np.uint8) for i in range(1, 6): fpath = os.path.join(path, "data_batch_" + str(i)) data, labels = load_batch(fpath) x_train[(i - 1) * 10000 : i * 10000, :, :, :] = data y_train[(i - 1) * 10000 : i * 10000] = labels fpath = os.path.join(path, "test_batch") x_test, y_test = load_batch(fpath) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) x_train = x_train.transpose((0, 2, 3, 1)) x_test = x_test.transpose((0, 2, 3, 1)) min_, max_ = 0.0, 255.0 if not raw: min_, max_ = 0.0, 1.0 x_train, y_train = preprocess(x_train, y_train, clip_values=(0, 255)) x_test, y_test = preprocess(x_test, y_test, clip_values=(0, 255)) return (x_train, y_train), (x_test, y_test), min_, max_
Loads CIFAR10 dataset from config.CIFAR10_PATH or downloads it if necessary. :param raw: `True` if no preprocessing should be applied to the data. Otherwise, data is normalized to 1. :return: `(x_train, y_train), (x_test, y_test), min, max`
https://github.com/trusted-ai/adversarial-robustness-toolbox/blob/564f46f99b3cb0406fe3570919b8e71a4c5bba9d/art/utils.py#L728-L793
from __future__ import absolute_import, division, print_function, unicode_literals import logging import math import os import shutil import sys import tarfile import warnings import zipfile from functools import wraps from inspect import signature from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union import numpy as np import six from scipy.special import gammainc from tqdm.auto import tqdm from art import config if TYPE_CHECKING: import torch logger = logging.getLogger(__name__) DATASET_TYPE = Tuple[ Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray], float, float ] CLIP_VALUES_TYPE = Tuple[Union[int, float, np.ndarray], Union[int, float, np.ndarray]] if TYPE_CHECKING: from art.defences.preprocessor.preprocessor import Preprocessor PREPROCESSING_TYPE = Optional[ Union[ Tuple[Union[int, float, np.ndarray], Union[int, float, np.ndarray]], Preprocessor, Tuple[Preprocessor, ...] ] ] from art.estimators.classification.blackbox import BlackBoxClassifier from art.estimators.classification.catboost import CatBoostARTClassifier from art.estimators.classification.classifier import ( Classifier, ClassifierClassLossGradients, ClassifierDecisionTree, ClassifierLossGradients, ClassifierNeuralNetwork, ) from art.estimators.classification.detector_classifier import DetectorClassifier from art.estimators.classification.ensemble import EnsembleClassifier from art.estimators.classification.GPy import GPyGaussianProcessClassifier from art.estimators.classification.keras import KerasClassifier from art.estimators.classification.lightgbm import LightGBMClassifier from art.estimators.classification.mxnet import MXClassifier from art.estimators.classification.pytorch import PyTorchClassifier from art.estimators.classification.query_efficient_bb import QueryEfficientGradientEstimationClassifier from art.estimators.classification.scikitlearn import ( ScikitlearnAdaBoostClassifier, ScikitlearnBaggingClassifier, ScikitlearnClassifier, ScikitlearnDecisionTreeClassifier, ScikitlearnDecisionTreeRegressor, ScikitlearnExtraTreeClassifier, ScikitlearnExtraTreesClassifier, ScikitlearnGradientBoostingClassifier, ScikitlearnLogisticRegression, ScikitlearnRandomForestClassifier, ScikitlearnSVC, ) from art.estimators.classification.tensorflow import TensorFlowClassifier, TensorFlowV2Classifier from art.estimators.classification.xgboost import XGBoostClassifier from art.estimators.object_detection.object_detector import ObjectDetector from art.estimators.object_detection.python_object_detector import PyTorchObjectDetector from art.estimators.object_detection.pytorch_faster_rcnn import PyTorchFasterRCNN from art.estimators.object_detection.tensorflow_faster_rcnn import TensorFlowFasterRCNN from art.estimators.pytorch import PyTorchEstimator from art.estimators.regression.scikitlearn import ScikitlearnRegressor from art.estimators.speech_recognition.pytorch_deep_speech import PyTorchDeepSpeech from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR from art.estimators.tensorflow import TensorFlowV2Estimator CLASSIFIER_LOSS_GRADIENTS_TYPE = Union[ ClassifierLossGradients, EnsembleClassifier, GPyGaussianProcessClassifier, KerasClassifier, MXClassifier, PyTorchClassifier, ScikitlearnLogisticRegression, ScikitlearnSVC, TensorFlowClassifier, TensorFlowV2Classifier, QueryEfficientGradientEstimationClassifier, ] CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE = Union[ ClassifierClassLossGradients, EnsembleClassifier, GPyGaussianProcessClassifier, KerasClassifier, MXClassifier, PyTorchClassifier, ScikitlearnLogisticRegression, ScikitlearnSVC, TensorFlowClassifier, TensorFlowV2Classifier, ] CLASSIFIER_NEURALNETWORK_TYPE = Union[ ClassifierNeuralNetwork, DetectorClassifier, EnsembleClassifier, KerasClassifier, MXClassifier, PyTorchClassifier, TensorFlowClassifier, TensorFlowV2Classifier, ] CLASSIFIER_DECISION_TREE_TYPE = Union[ ClassifierDecisionTree, LightGBMClassifier, ScikitlearnDecisionTreeClassifier, ScikitlearnExtraTreesClassifier, ScikitlearnGradientBoostingClassifier, ScikitlearnRandomForestClassifier, XGBoostClassifier, ] CLASSIFIER_TYPE = Union[ Classifier, BlackBoxClassifier, CatBoostARTClassifier, DetectorClassifier, EnsembleClassifier, GPyGaussianProcessClassifier, KerasClassifier, LightGBMClassifier, MXClassifier, PyTorchClassifier, ScikitlearnClassifier, ScikitlearnDecisionTreeClassifier, ScikitlearnExtraTreeClassifier, ScikitlearnAdaBoostClassifier, ScikitlearnBaggingClassifier, ScikitlearnExtraTreesClassifier, ScikitlearnGradientBoostingClassifier, ScikitlearnRandomForestClassifier, ScikitlearnLogisticRegression, ScikitlearnSVC, TensorFlowClassifier, TensorFlowV2Classifier, XGBoostClassifier, CLASSIFIER_NEURALNETWORK_TYPE, ] REGRESSOR_TYPE = Union[ScikitlearnRegressor, ScikitlearnDecisionTreeRegressor] PYTORCH_ESTIMATOR_TYPE = Union[ PyTorchClassifier, PyTorchDeepSpeech, PyTorchEstimator, PyTorchObjectDetector, PyTorchFasterRCNN, ] OBJECT_DETECTOR_TYPE = Union[ ObjectDetector, PyTorchObjectDetector, PyTorchFasterRCNN, TensorFlowFasterRCNN, ] SPEECH_RECOGNIZER_TYPE = Union[ PyTorchDeepSpeech, TensorFlowLingvoASR, ] TENSORFLOWV2_ESTIMATOR_TYPE = Union[ TensorFlowV2Classifier, TensorFlowV2Estimator, ] class _Deprecated: _instance = None def __new__(cls): if _Deprecated._instance is None: _Deprecated._instance = object.__new__(cls) return _Deprecated._instance Deprecated = _Deprecated() def deprecated(end_version: str, *, reason: str = "", replaced_by: str = "") -> Callable: def decorator(function): reason_msg = "\n" + reason if reason else reason replaced_msg = f" It will be replaced by '{replaced_by}'." if replaced_by else replaced_by deprecated_msg = ( f"Function '{function.__name__}' is deprecated and will be removed in future release {end_version}." ) @wraps(function) def wrapper(*args, **kwargs): warnings.simplefilter("always", category=DeprecationWarning) warnings.warn( deprecated_msg + replaced_msg + reason_msg, category=DeprecationWarning, stacklevel=2, ) warnings.simplefilter("default", category=DeprecationWarning) return function(*args, **kwargs) return wrapper return decorator def deprecated_keyword_arg(identifier: str, end_version: str, *, reason: str = "", replaced_by: str = "") -> Callable: def decorator(function): reason_msg = "\n" + reason if reason else reason replaced_msg = f" It will be replaced by '{replaced_by}'." if replaced_by else replaced_by deprecated_msg = ( f"Keyword argument '{identifier}' in '{function.__name__}' is deprecated and will be removed in" f" future release {end_version}." ) @wraps(function) def wrapper(*args, **kwargs): params = signature(function).bind(*args, **kwargs) params.apply_defaults() if params.signature.parameters[identifier].default is not Deprecated: raise ValueError("Deprecated keyword argument must default to the Decorator singleton.") if replaced_by != "" and replaced_by not in params.arguments: raise ValueError("Deprecated keyword replacement not found in function signature.") if params.arguments[identifier] is not Deprecated: warnings.simplefilter("always", category=DeprecationWarning) warnings.warn(deprecated_msg + replaced_msg + reason_msg, category=DeprecationWarning, stacklevel=2) warnings.simplefilter("default", category=DeprecationWarning) return function(*args, **kwargs) return wrapper return decorator def projection(values: np.ndarray, eps: Union[int, float, np.ndarray], norm_p: Union[int, float, str]) -> np.ndarray: tol = 10e-8 values_tmp = values.reshape((values.shape[0], -1)) if norm_p == 2: if isinstance(eps, np.ndarray): raise NotImplementedError("The parameter `eps` of type `np.ndarray` is not supported to use with norm 2.") values_tmp = values_tmp * np.expand_dims( np.minimum(1.0, eps / (np.linalg.norm(values_tmp, axis=1) + tol)), axis=1 ) elif norm_p == 1: if isinstance(eps, np.ndarray): raise NotImplementedError("The parameter `eps` of type `np.ndarray` is not supported to use with norm 1.") values_tmp = values_tmp * np.expand_dims( np.minimum(1.0, eps / (np.linalg.norm(values_tmp, axis=1, ord=1) + tol)), axis=1, ) elif norm_p in [np.inf, "inf"]: if isinstance(eps, np.ndarray): eps = eps * np.ones_like(values) eps = eps.reshape([eps.shape[0], -1]) values_tmp = np.sign(values_tmp) * np.minimum(abs(values_tmp), eps) else: raise NotImplementedError( 'Values of `norm_p` different from 1, 2, `np.inf` and "inf" are currently not ' "supported." ) values = values_tmp.reshape(values.shape) return values def random_sphere( nb_points: int, nb_dims: int, radius: Union[int, float, np.ndarray], norm: Union[int, float, str], ) -> np.ndarray: if norm == 1: if isinstance(radius, np.ndarray): raise NotImplementedError( "The parameter `radius` of type `np.ndarray` is not supported to use with norm 1." ) a_tmp = np.zeros(shape=(nb_points, nb_dims + 1)) a_tmp[:, -1] = np.sqrt(np.random.uniform(0, radius ** 2, nb_points)) for i in range(nb_points): a_tmp[i, 1:-1] = np.sort(np.random.uniform(0, a_tmp[i, -1], nb_dims - 1)) res = (a_tmp[:, 1:] - a_tmp[:, :-1]) * np.random.choice([-1, 1], (nb_points, nb_dims)) elif norm == 2: if isinstance(radius, np.ndarray): raise NotImplementedError( "The parameter `radius` of type `np.ndarray` is not supported to use with norm 2." ) a_tmp = np.random.randn(nb_points, nb_dims) s_2 = np.sum(a_tmp ** 2, axis=1) base = gammainc(nb_dims / 2.0, s_2 / 2.0) ** (1 / nb_dims) * radius / np.sqrt(s_2) res = a_tmp * (np.tile(base, (nb_dims, 1))).T elif norm in [np.inf, "inf"]: if isinstance(radius, np.ndarray): radius = radius * np.ones(shape=(nb_points, nb_dims)) res = np.random.uniform(-radius, radius, (nb_points, nb_dims)) else: raise NotImplementedError("Norm {} not supported".format(norm)) return res def original_to_tanh( x_original: np.ndarray, clip_min: Union[float, np.ndarray], clip_max: Union[float, np.ndarray], tanh_smoother: float = 0.999999, ) -> np.ndarray: x_tanh = np.clip(x_original, clip_min, clip_max) x_tanh = (x_tanh - clip_min) / (clip_max - clip_min) x_tanh = np.arctanh(((x_tanh * 2) - 1) * tanh_smoother) return x_tanh def tanh_to_original( x_tanh: np.ndarray, clip_min: Union[float, np.ndarray], clip_max: Union[float, np.ndarray], ) -> np.ndarray: return (np.tanh(x_tanh) + 1.0) / 2.0 * (clip_max - clip_min) + clip_min def to_categorical(labels: Union[np.ndarray, List[float]], nb_classes: Optional[int] = None) -> np.ndarray: labels = np.array(labels, dtype=np.int32) if nb_classes is None: nb_classes = np.max(labels) + 1 categorical = np.zeros((labels.shape[0], nb_classes), dtype=np.float32) categorical[np.arange(labels.shape[0]), np.squeeze(labels)] = 1 return categorical def float_to_categorical(labels: np.ndarray, nb_classes: Optional[int] = None): labels = np.array(labels) unique = np.unique(labels) unique.sort() indexes = [np.where(unique == value)[0] for value in labels] if nb_classes is None: nb_classes = len(unique) categorical = np.zeros((labels.shape[0], nb_classes), dtype=np.float32) categorical[np.arange(labels.shape[0]), np.squeeze(indexes)] = 1 return categorical def floats_to_one_hot(labels: np.ndarray): labels = np.array(labels) for feature in labels.T: unique = np.unique(feature) unique.sort() for index, value in enumerate(unique): feature[feature == value] = index return labels.astype(np.float32) def check_and_transform_label_format( labels: np.ndarray, nb_classes: Optional[int] = None, return_one_hot: bool = True ) -> np.ndarray: if labels is not None: if len(labels.shape) == 2 and labels.shape[1] > 1: if not return_one_hot: labels = np.argmax(labels, axis=1) elif len(labels.shape) == 2 and labels.shape[1] == 1 and nb_classes is not None and nb_classes > 2: labels = np.squeeze(labels) if return_one_hot: labels = to_categorical(labels, nb_classes) elif len(labels.shape) == 2 and labels.shape[1] == 1 and nb_classes is not None and nb_classes == 2: pass elif len(labels.shape) == 1: if return_one_hot: if nb_classes == 2: labels = np.expand_dims(labels, axis=1) else: labels = to_categorical(labels, nb_classes) else: raise ValueError( "Shape of labels not recognised." "Please provide labels in shape (nb_samples,) or (nb_samples, nb_classes)" ) return labels def random_targets(labels: np.ndarray, nb_classes: int) -> np.ndarray: if len(labels.shape) > 1: labels = np.argmax(labels, axis=1) result = np.zeros(labels.shape) for class_ind in range(nb_classes): other_classes = list(range(nb_classes)) other_classes.remove(class_ind) in_cl = labels == class_ind result[in_cl] = np.random.choice(other_classes) return to_categorical(result, nb_classes) def least_likely_class(x: np.ndarray, classifier: "CLASSIFIER_TYPE") -> np.ndarray: return to_categorical(np.argmin(classifier.predict(x), axis=1), nb_classes=classifier.nb_classes) def second_most_likely_class(x: np.ndarray, classifier: "CLASSIFIER_TYPE") -> np.ndarray: return to_categorical( np.argpartition(classifier.predict(x), -2, axis=1)[:, -2], nb_classes=classifier.nb_classes, ) def get_label_conf(y_vec: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: assert len(y_vec.shape) == 2 confs, labels = np.amax(y_vec, axis=1), np.argmax(y_vec, axis=1) return confs, labels def get_labels_np_array(preds: np.ndarray) -> np.ndarray: if len(preds.shape) >= 2: preds_max = np.amax(preds, axis=1, keepdims=True) else: preds_max = np.round(preds) y = preds == preds_max y = y.astype(np.uint8) return y def compute_success_array( classifier: "CLASSIFIER_TYPE", x_clean: np.ndarray, labels: np.ndarray, x_adv: np.ndarray, targeted: bool = False, batch_size: int = 1, ) -> float: adv_preds = classifier.predict(x_adv, batch_size=batch_size) if len(adv_preds.shape) >= 2: adv_preds = np.argmax(adv_preds, axis=1) else: adv_preds = np.round(adv_preds) if targeted: attack_success = adv_preds == np.argmax(labels, axis=1) else: preds = classifier.predict(x_clean, batch_size=batch_size) if len(preds.shape) >= 2: preds = np.argmax(preds, axis=1) else: preds = np.round(preds) attack_success = adv_preds != preds return attack_success def compute_success( classifier: "CLASSIFIER_TYPE", x_clean: np.ndarray, labels: np.ndarray, x_adv: np.ndarray, targeted: bool = False, batch_size: int = 1, ) -> float: attack_success = compute_success_array(classifier, x_clean, labels, x_adv, targeted, batch_size) return np.sum(attack_success) / x_adv.shape[0] def compute_accuracy(preds: np.ndarray, labels: np.ndarray, abstain: bool = True) -> Tuple[np.ndarray, int]: has_pred = np.sum(preds, axis=1) idx_pred = np.where(has_pred)[0] labels = np.argmax(labels[idx_pred], axis=1) num_correct = np.sum(np.argmax(preds[idx_pred], axis=1) == labels) coverage_rate = len(idx_pred) / preds.shape[0] if abstain: acc_rate = num_correct / preds[idx_pred].shape[0] else: acc_rate = num_correct / preds.shape[0] return acc_rate, coverage_rate
MIT License
cober2019/network-automation
NETCONF-YANG/InterfaceQoS/Interfaces QoS.py
parse_stats
python
def parse_stats(config) -> None: key = is_key(config) policies = collections.defaultdict(list) if config.get("diffserv-target-entry", {}).get("direction", {}): print( f"\n-------------------------\n{key}\nPolicy Direction: {config.get('diffserv-target-entry', {}).get('direction', {})}") print( f"Policy Name: {config.get('diffserv-target-entry', {}).get('policy-name', {})}\n-------------------------\n") for stat in config.get("diffserv-target-entry", {}).get("diffserv-target-classifier-statistics", {}): queue = [] queue.append(f"{stat.get('classifier-entry-name', {}):<20}") queue.append(f'{stat.get("classifier-entry-statistics", {}).get("classified-rate", {}):<20}') queue.append(f"{stat.get('classifier-entry-statistics', {}).get('classified-bytes', {}):20}") queue.append(f"{stat.get('classifier-entry-statistics', {}).get('classified-pkts', {}):20}") queue.append(f"{stat.get('queuing-statistics', {}).get('output-bytes', {}):20}") queue.append(f"{stat.get('queuing-statistics', {}).get('output-pkts', {}):20}") queue.append(f"{stat.get('queuing-statistics', {}).get('drop-pkts', {}):20}") queue.append(f"{stat.get('queuing-statistics', {}).get('drop-bytes', {}):20}") queue.append(f"{stat.get('queuing-statistics', {}).get('wred-stats', {}).get('early-drop-pkts', {}):20}") queue.append(f"{stat.get('queuing-statistics', {}).get('wred-stats', {}).get('early-drop-bytes', {}):20}") policies[config.get('diffserv-target-entry', {}).get('policy-name', {})].append(queue) print_queues(policies)
Search key value pairs, print policy, queues, stats
https://github.com/cober2019/network-automation/blob/796b7760ca1f1e496a841c613eaff05ddba71b16/NETCONF-YANG/InterfaceQoS/Interfaces QoS.py#L72-L99
from ncclient import manager import xmltodict import collections all_ints = f"""<filter> <interfaces-state xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces"> <interface> </interface> </interfaces-state> </filter>""" def is_instance(list_or_dict) -> list: if isinstance(list_or_dict, list): make_list = list_or_dict else: make_list = [list_or_dict] return make_list def create_netconf_connection(username, password, host) -> manager: try: netconf_session = manager.connect(host=host, port=830, username=username, password=password, device_params={'name': 'csr'}) except manager.operations.errors.TimeoutExpiredError: raise ConnectionError(f"Connection to {host} failed") except manager.transport.AuthenticationError: raise ConnectionError(f"Invalid Credentials") return netconf_session def print_queues(policies) -> None: cat = ("Queue ->", "Class Rate ->", "Class Pkts ->", "Class Bytes ->", "Out Bytes ->", "Out Pkts ->", "Drop Bytes ->", "Drop Pkts ->", "WRED Pkts ->", "WRED Bytes ->") for v in policies.values(): queue = (list(zip(*v))) for b, i in zip(cat, queue): print(f"{b} {' '.join(i)}") def is_key(config) -> list: try: int_type = config.get('name', {}).get('#text', {}) except AttributeError: int_type = config['name'] return int_type
MIT License
wavefronthq/python-client
wavefront_api_client/models/timeseries.py
Timeseries.data
python
def data(self): return self._data
Gets the data of this Timeseries. # noqa: E501 Data returned by this time series. This is returned as a list of points, where each point is represented as a two-element list with 1st element being the timestamp in epoch SECONDS and the 2nd element being the numeric value of the series at the timestamp # noqa: E501 :return: The data of this Timeseries. # noqa: E501 :rtype: list[list[float]]
https://github.com/wavefronthq/python-client/blob/e410ce0dd8a2334e995456f4f3d44e0f04664a3a/wavefront_api_client/models/timeseries.py#L71-L79
import pprint import re import six from wavefront_api_client.configuration import Configuration class Timeseries(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'list[list[float]]', 'host': 'str', 'label': 'str', 'tags': 'dict(str, str)' } attribute_map = { 'data': 'data', 'host': 'host', 'label': 'label', 'tags': 'tags' } def __init__(self, data=None, host=None, label=None, tags=None, _configuration=None): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._data = None self._host = None self._label = None self._tags = None self.discriminator = None if data is not None: self.data = data if host is not None: self.host = host if label is not None: self.label = label if tags is not None: self.tags = tags @property
Apache License 2.0
jeffh/yacs
fabfile.py
deploy
python
def deploy(upgrade=1): upload_monit_conf() clean() with cd('/www/yacs/'): if not exists('virtualenv'): puts('Creating Virtual Environment...') sudo('virtualenv --distribute virtualenv', user=USER) puts('Uploading to remote...') with settings(warn_only=True): run('rm -rf tmp') run('mkdir tmp') upload_project(remote_dir='tmp') yacs_dirname = sudo('ls tmp').strip() sudo('mv -f tmp/%s /www/yacs/tmp' % yacs_dirname) sudo('chown -R %s /www/yacs/tmp' % USER) sudo('chgrp -R %s /www/yacs/tmp' % GROUP) run('rm -rf tmp') with cd('/www/yacs/'): puts('Replacing remote codebase...') sudo('rm -rf django', user=USER) sudo('mv -f tmp django', user=USER) with cd('/www/yacs/django'): puts('Removing extra files...') with settings(warn_only=True): sudo('find . -name ".*" | xargs rm -r', user=USER) sudo('rm yacs.db', user=USER) puts('Installing dependencies...') pip_prefix = '--upgrade' if not int(upgrade): pip_prefix = '' sudo(PIP + ' install %s -r requirements.txt' % pip_prefix, user=USER) envs = remote_vars('YACS_ENV', 'YACS_SECRET_KEY', 'YACS_DATABASE_URL') puts('Running migrations...') managepy('syncdb --noinput', envs) managepy('migrate --noinput', envs) puts('Gathering static files...') managepy('collectstatic --noinput', envs) puts("Clearing caches...") sudo('service memcached restart') managepy('clear_cache', envs) puts('Restarting gunicorn...') sudo('service monit restart') sudo('monit restart yacs') update_crontab() puts('Done!')
Deploys to the given system. Use salt, chef, or puppet to configure the outside packages. Things required to be set up: - python - database driver - virtualenv - coffeescript - java - pip - database (postgres; postgres user) - created database & user - webserver (nginx; www-data user) - webserver config to proxypass to gunicorn (nginx) - memcached
https://github.com/jeffh/yacs/blob/6bdb2299905f6321be5de788f16a9464a70a4206/fabfile.py#L74-L135
import time import urllib2 from fabric.api import run, local, settings, cd, sudo, task, output, puts from fabric.contrib.project import upload_project from fabric.contrib.files import upload_template APPS = 'api courses courses_viz scheduler'.split(' ') USER = 'www-data' GROUP = 'www-data' PYTHON = '/www/yacs/virtualenv/bin/python' PIP = '/www/yacs/virtualenv/bin/pip' @task def verbose(): output['everything'] = True def exists(name): with settings(warn_only=True): return not run('[ -e "%s" ]' % name).failed def remote_vars(*keys): sb = [] for key in keys: value = run('echo $' + key).strip() sb.append('='.join([key, '"%s"' % value.replace('"', '\\"')])) return ' '.join(sb) def upload_monit_conf(): if not exists('/etc/monit/conf.d/'): puts('monit missing... skipping') return puts('Uploading monit config...') context = dict( projectpath='/www/yacs/django/', user=USER, gunicorn='/www/yacs/virtualenv/bin/gunicorn', workers=4, logs='/www/yacs/logs/', wsgi='yacs.wsgi:application', pid='/tmp/yacs.pid', env=remote_vars('YACS_DATABASE_URL', 'YACS_SECRET_KEY'), ) upload_template('yacs.monit', '/etc/monit/conf.d/yacs.conf', context=context, use_sudo=True, backup=False) def update_crontab(): context = dict( projectpath='/www/yacs/django/', python='/www/yacs/virtualenv/bin/python', user=USER, logpath='/www/yacs/logs/', ) upload_template('yacs.cron', 'yacs_cron', context=context, backup=False) sudo('crontab -u {0} yacs_cron'.format(USER)) sudo('rm -f yacs_cron') def managepy(command, prefix_cmd=''): sudo('%s %s manage.py %s' % (prefix_cmd, PYTHON, command), user=USER) @task
MIT License
joke2k/faker
faker/providers/isbn/isbn.py
ISBN10._check_digit
python
def _check_digit(self) -> str: weights = range(1, 10) body = "".join([part for part in [self.group, self.registrant, self.publication] if part is not None]) remainder = sum(int(b) * w for b, w in zip(body, weights)) % 11 check_digit = "X" if remainder == 10 else str(remainder) return str(check_digit)
Calculate the check digit for ISBN-10. See https://en.wikipedia.org/wiki/International_Standard_Book_Number for calculation.
https://github.com/joke2k/faker/blob/3818045332f4cb2911e5ac18f69e385bf0c51af0/faker/providers/isbn/isbn.py#L63-L72
from typing import Any, Optional class ISBN: MAX_LENGTH = 13 def __init__( self, ean: Optional[str] = None, group: Optional[str] = None, registrant: Optional[str] = None, publication: Optional[str] = None, ) -> None: self.ean = ean self.group = group self.registrant = registrant self.publication = publication class ISBN13(ISBN): def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.check_digit = self._check_digit() def _check_digit(self) -> str: weights = (1 if x % 2 == 0 else 3 for x in range(12)) body = "".join([part for part in [self.ean, self.group, self.registrant, self.publication] if part is not None]) remainder = sum(int(b) * w for b, w in zip(body, weights)) % 10 diff = 10 - remainder check_digit = 0 if diff == 10 else diff return str(check_digit) def format(self, separator: str = "") -> str: return separator.join( [ part for part in [ self.ean, self.group, self.registrant, self.publication, self.check_digit, ] if part is not None ] ) class ISBN10(ISBN): def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.check_digit = self._check_digit()
MIT License
wavefronthq/python-client
wavefront_api_client/models/specific_data.py
SpecificData.fast_reader_builder
python
def fast_reader_builder(self): return self._fast_reader_builder
Gets the fast_reader_builder of this SpecificData. # noqa: E501 :return: The fast_reader_builder of this SpecificData. # noqa: E501 :rtype: FastReaderBuilder
https://github.com/wavefronthq/python-client/blob/e410ce0dd8a2334e995456f4f3d44e0f04664a3a/wavefront_api_client/models/specific_data.py#L113-L120
import pprint import re import six from wavefront_api_client.configuration import Configuration class SpecificData(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'class_loader': 'ClassLoader', 'conversions': 'list[ConversionObject]', 'fast_reader_builder': 'FastReaderBuilder', 'fast_reader_enabled': 'bool' } attribute_map = { 'class_loader': 'classLoader', 'conversions': 'conversions', 'fast_reader_builder': 'fastReaderBuilder', 'fast_reader_enabled': 'fastReaderEnabled' } def __init__(self, class_loader=None, conversions=None, fast_reader_builder=None, fast_reader_enabled=None, _configuration=None): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._class_loader = None self._conversions = None self._fast_reader_builder = None self._fast_reader_enabled = None self.discriminator = None if class_loader is not None: self.class_loader = class_loader if conversions is not None: self.conversions = conversions if fast_reader_builder is not None: self.fast_reader_builder = fast_reader_builder if fast_reader_enabled is not None: self.fast_reader_enabled = fast_reader_enabled @property def class_loader(self): return self._class_loader @class_loader.setter def class_loader(self, class_loader): self._class_loader = class_loader @property def conversions(self): return self._conversions @conversions.setter def conversions(self, conversions): self._conversions = conversions @property
Apache License 2.0
openml/automlbenchmark
amlb/benchmarks/file.py
load_file_benchmark
python
def load_file_benchmark(name: str, benchmark_definition_dirs: List[str]) -> Tuple[str, Optional[str], List[Namespace]]: benchmark_file = _find_local_benchmark_definition(name, benchmark_definition_dirs) log.info("Loading benchmark definitions from %s.", benchmark_file) tasks = config_load(benchmark_file) benchmark_name, _ = os.path.splitext(os.path.basename(benchmark_file)) return benchmark_name, benchmark_file, tasks
Loads benchmark from a local file.
https://github.com/openml/automlbenchmark/blob/6e0296b097455caf18d754e79a2bd85e85d01548/amlb/benchmarks/file.py#L26-L32
import logging import os from typing import List, Tuple, Optional from amlb.utils import config_load, Namespace log = logging.getLogger(__name__) def _find_local_benchmark_definition(name: str, benchmark_definition_dirs: List[str]) -> str: if os.path.exists(name): return name for bd in benchmark_definition_dirs: bf = os.path.join(bd, f"{name}.yaml") if os.path.exists(bf): return bf raise ValueError(f"Incorrect benchmark name or path `{name}`, name not available in {benchmark_definition_dirs}.")
MIT License
iexcloud/pyex
pyEX/metadata/__init__.py
queryMetadata
python
def queryMetadata( id="", key="", subkey="", token="", version="stable", filter="", format="json" ): url = "metadata/time-series" if not id and key: id = "*" if id: url += "/{}".format(id) if key: url += "/{}".format(key) if subkey: url += "/{}".format(subkey) return _get(url, token=token, version=version, filter=filter, format=format)
Get inventory of available time series endpoints Args: id (str): Timeseries ID key (str): Timeseries Key subkey (str): Timeseries Subkey token (str): Access token version (str): API version filter (str): https://iexcloud.io/docs/api/#filter-results format (str): output format
https://github.com/iexcloud/pyex/blob/48223a046d120703e8cc8f6c57f8a1450ee3f835/pyEX/metadata/__init__.py#L15-L38
from functools import wraps import pandas as pd from ..common import _get
Apache License 2.0
mila-iqia/myia
myia/opt/dde.py
ValuePropagator.run
python
def run(self, root): for p in root.parameters: self.add_value(p, WILDCARD, ANYTHING) for ct in self.manager.all_nodes: if ct.is_constant(): if isinstance(ct.value, tuple): self.add_value(ct, WILDCARD, ANYTHING) else: self.add_value(ct, ANYTHING, ct.value) self.add_need(root.return_, ANYTHING) while self.todo: nxt = self.todo.pop() self.process_node(nxt)
Run the algorithm.
https://github.com/mila-iqia/myia/blob/56774a39579b4ec4123f44843ad4ca688acc859b/myia/opt/dde.py#L57-L70
from collections import defaultdict from .. import abstract, xtype from ..abstract import ANYTHING, DEAD, PartialApplication from ..ir import Constant, Graph from ..operations import Primitive, primitives as P from ..utils import Named, Registry, newenv, tracer, untested WILDCARD = Named("WILDCARD") MAX_NEED_DEPTH = 5 class ValuePropagator: def __init__(self, resources, root): self.resources = resources self.manager = resources.opt_manager self.need = defaultdict(set) self.flow = defaultdict(set) self.backflow = defaultdict(set) self.results = defaultdict(lambda: defaultdict(set)) self.todo = set() self.run(root)
MIT License
ulordchain/uwallet-client-pro
gui/qt/__init__.py
UWalletGui.start_new_window
python
def start_new_window(self, path, uri): for w in self.windows: if w.wallet.storage.path == path: w.bring_to_top() break else: wallet = self.daemon.load_wallet(path) if not wallet: wizard = InstallWizard(self.config, self.app, self.plugins, self.daemon.network, path) wallet = wizard.run_and_get_wallet() if not wallet: return self.daemon.add_wallet(wallet) w = self.create_window_for_wallet(wallet) if uri: w.pay_to_URI(uri) return w
Raises the window for the wallet if it is open. Otherwise opens the wallet and creates a new window for it.
https://github.com/ulordchain/uwallet-client-pro/blob/103a3f7cf4e566e87f366739774f688339aea398/gui/qt/__init__.py#L174-L194
import sys import os import signal try: import PyQt4 except Exception: sys.exit("Error: Could not import PyQt4 on Linux systems, you may try 'sudo apt-get install python-qt4'") from PyQt4.QtGui import * from PyQt4.QtCore import * import PyQt4.QtCore as QtCore from uwallet.i18n import _, set_language from uwallet.plugins import run_hook from uwallet import SimpleConfig, Wallet, WalletStorage from uwallet.paymentrequest import InvoiceStore from uwallet.contacts import Contacts from uwallet.synchronizer import Synchronizer from uwallet.verifier import SPV from uwallet.util import DebugMem from uwallet.wallet import Abstract_Wallet from installwizard import InstallWizard try: import icons_rc except Exception: sys.exit("Error: Could not import icons_rc.py, please generate it with: 'pyrcc4 icons.qrc -o gui/qt/icons_rc.py'") from util import * from main_window import UWalletWindow class OpenFileEventFilter(QObject): def __init__(self, windows): self.windows = windows super(OpenFileEventFilter, self).__init__() def eventFilter(self, obj, event): if event.type() == QtCore.QEvent.FileOpen: if len(self.windows) >= 1: self.windows[0].pay_to_URI(event.url().toEncoded()) return True return False class UWalletGui: def __init__(self, config, daemon, plugins): set_language(config.get('language','zh_CN')) self.config = config self.daemon = daemon self.plugins = plugins self.windows = [] self.efilter = OpenFileEventFilter(self.windows) self.app = QApplication(sys.argv) self.app.installEventFilter(self.efilter) self.timer = Timer() self.invoices = InvoiceStore(self.config) self.contacts = Contacts(self.config) self.dark_icon = self.config.get("dark_icon", False) self.tray = QSystemTrayIcon(self.tray_icon(), None) self.tray.setToolTip('UWalletLite') self.tray.activated.connect(self.tray_activated) self.build_tray_menu() self.tray.show() self.app.connect(self.app, QtCore.SIGNAL('new_window'), self.start_new_window) run_hook('init_qt', self) def build_tray_menu(self): self.old_menu = self.tray.contextMenu() m = QMenu() for window in self.windows: submenu = m.addMenu(window.wallet.basename()) submenu.addAction(_("Show/Hide"), window.show_or_hide) submenu.addAction(_("Close"), window.close) m.addSeparator() m.addAction(_("Exit UWalletLite"), self.close) self.tray.setContextMenu(m) def tray_icon(self): if self.dark_icon: return QIcon(':icons/electrum_dark_icon.png') else: return QIcon(':icons/electrum_light_icon.png') def toggle_tray_icon(self): self.dark_icon = not self.dark_icon self.config.set_key("dark_icon", self.dark_icon, True) self.tray.setIcon(self.tray_icon()) def tray_activated(self, reason): if reason == QSystemTrayIcon.DoubleClick: if all([w.is_hidden() for w in self.windows]): for w in self.windows: w.bring_to_top() else: for w in self.windows: w.hide() def close(self): for window in self.windows: window.close() def new_window(self, path, uri=None): self.app.emit(SIGNAL('new_window'), path, uri) def create_window_for_wallet(self, wallet): w = UWalletWindow(self, wallet) self.windows.append(w) self.build_tray_menu() run_hook('on_new_window', w) return w
MIT License